v0.2.1 - migrate to axum v0.7

This commit is contained in:
minish 2024-05-27 14:28:14 -04:00
parent 2e92ab4bf0
commit 76701113c5
Signed by: min
GPG Key ID: FEECFF24EF0CE9E9
7 changed files with 502 additions and 391 deletions

822
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
[package]
name = "breeze"
version = "0.2.0"
version = "0.2.1"
edition = "2021"
[dependencies]
axum = { version = "0.6.1", features = ["macros"] }
hyper = { version = "0.14", features = ["full"] }
axum = { version = "0.7.5", features = ["macros", "http2"] }
http = "1.1.0"
tokio = { version = "1", features = ["full"] }
tokio-util = { version = "0.7.4", features = ["full"] }
tokio-stream = "0.1"
@ -16,11 +16,11 @@ async-recursion = "1.0.0"
walkdir = "2"
tracing = "0.1"
tracing-subscriber = "0.3"
serde = { version = "1.0.189", features = ["derive"] }
serde = { version = "1.0", features = ["derive"] }
serde_with = "3.4.0"
toml = "0.8.2"
clap = { version = "4.4.6", features = ["derive"] }
serde_with = "3.4.0"
anyhow = "1.0.79"
anyhow = "1.0"
dashmap = { version = "5.5.3", features = ["rayon", "inline"] }
rayon = "1.8"
atomic-time = "0.1.4"

View File

@ -33,7 +33,7 @@ impl Disk {
/// Formats the path on disk for a `saved_name`.
fn path_for(&self, saved_name: &str) -> PathBuf {
let mut p = self.cfg.save_path.clone();
p.push(&saved_name);
p.push(saved_name);
p
}

View File

@ -6,7 +6,7 @@ use std::{
time::Duration,
};
use axum::extract::BodyStream;
use axum::body::BodyDataStream;
use bytes::{BufMut, Bytes, BytesMut};
use img_parts::{DynImage, ImageEXIF};
use rand::distributions::{Alphanumeric, DistString};
@ -167,7 +167,7 @@ impl Engine {
saved_name: &str,
provided_len: usize,
mut use_cache: bool,
mut stream: BodyStream,
mut stream: BodyDataStream,
lifetime: Option<Duration>,
keep_exif: bool,
) -> Result<(), axum::Error> {
@ -206,8 +206,8 @@ impl Engine {
// if we have an i/o task, send it off
// also cloning this is okay because it's a Bytes
if !coalesce_and_strip {
debug!("sending chunk to i/o task");
if let Some(ref tx) = tx {
debug!("sending chunk to i/o task");
let _ = tx.send(chunk.clone()).await;
}
}
@ -248,8 +248,8 @@ impl Engine {
};
// send what we did over to the i/o task, all in one chunk
debug!("sending filled buffer to i/o task");
if let Some(ref tx) = tx {
debug!("sending filled buffer to i/o task");
let _ = tx.send(data.clone()).await;
}
@ -281,7 +281,7 @@ impl Engine {
&self,
ext: &str,
provided_len: usize,
stream: BodyStream,
stream: BodyDataStream,
lifetime: Option<Duration>,
keep_exif: bool,
) -> Result<ProcessOutcome, axum::Error> {

View File

@ -7,7 +7,7 @@ use axum::{
routing::{get, post},
Router,
};
use tokio::{fs, signal};
use tokio::{fs, net::TcpListener, signal};
use tracing::{info, warn};
mod cache;
@ -64,13 +64,11 @@ async fn main() {
.with_state(Arc::new(engine));
// start web server
axum::Server::bind(
&cfg.http
.listen_on
.parse()
.expect("failed to parse listen_on address"),
)
.serve(app.into_make_service())
let listener = TcpListener::bind(&cfg.http.listen_on)
.await
.expect("failed to bind to given `http.listen_on` address! make sure it's valid, and the port isn't already bound");
axum::serve(listener, app)
.with_graceful_shutdown(shutdown_signal())
.await
.expect("failed to start server");

View File

@ -1,10 +1,10 @@
use std::{ffi::OsStr, path::PathBuf, sync::Arc, time::Duration};
use axum::{
extract::{BodyStream, Query, State},
http::HeaderValue,
body::Body,
extract::{Query, State},
};
use hyper::{header, HeaderMap, StatusCode};
use http::{header, HeaderMap, HeaderValue, StatusCode};
use serde::Deserialize;
use serde_with::{serde_as, DurationSeconds};
@ -35,7 +35,7 @@ pub async fn new(
State(engine): State<Arc<crate::engine::Engine>>,
Query(req): Query<NewRequest>,
headers: HeaderMap,
stream: BodyStream,
body: Body,
) -> Result<String, StatusCode> {
// check upload key, if i need to
if !engine.cfg.upload_key.is_empty() && req.key.unwrap_or_default() != engine.cfg.upload_key {
@ -62,6 +62,9 @@ pub async fn new(
.unwrap()
.unwrap_or(usize::MAX);
// turn body into stream
let stream = Body::into_data_stream(body);
// pass it off to the engine to be processed!
match engine
.process(
@ -78,9 +81,7 @@ pub async fn new(
ProcessOutcome::Success(url) => Ok(url),
// 413 Payload Too Large
ProcessOutcome::TemporaryUploadTooLarge => {
Err(StatusCode::PAYLOAD_TOO_LARGE)
}
ProcessOutcome::TemporaryUploadTooLarge => Err(StatusCode::PAYLOAD_TOO_LARGE),
// 400 Bad Request
ProcessOutcome::TemporaryUploadLifetimeTooLong => Err(StatusCode::BAD_REQUEST),

View File

@ -3,12 +3,12 @@ use std::{
};
use axum::{
body::StreamBody,
body::Body,
extract::{Path, State},
response::{IntoResponse, Response},
};
use hyper::{http::HeaderValue, StatusCode};
use http::{HeaderValue, StatusCode};
use tokio_util::io::ReaderStream;
use crate::engine::UploadData;
@ -31,11 +31,11 @@ impl IntoResponse for UploadData {
let content_length = HeaderValue::from_str(&len_str).unwrap();
// create a streamed body response (we want to stream larger files)
let reader = ReaderStream::new(file);
let stream = StreamBody::new(reader);
let stream = ReaderStream::new(file);
let body = Body::from_stream(stream);
// extract mutable headers from the response
let mut res = stream.into_response();
let mut res = body.into_response();
let headers = res.headers_mut();
// clear headers, browser can imply content type