mostly just auth
This commit is contained in:
parent
a3c69ef914
commit
2662128bbb
|
@ -129,7 +129,7 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
|||
|
||||
[[package]]
|
||||
name = "breeze"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
dependencies = [
|
||||
"archived",
|
||||
"async-recursion",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "breeze"
|
||||
version = "0.1.1"
|
||||
version = "0.1.2"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
|
|
@ -20,16 +20,17 @@ use tokio::{
|
|||
use tokio_stream::StreamExt;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::view::{ViewSuccess, ViewError};
|
||||
use crate::view::{ViewError, ViewSuccess};
|
||||
|
||||
pub struct Engine {
|
||||
// state
|
||||
cache: RwLock<Archive>, // in-memory cache. note/ i plan to lock the cache specifically only when needed rather than locking the whole struct
|
||||
cache: RwLock<Archive>, // in-memory cache
|
||||
pub upl_count: AtomicUsize, // cached count of uploaded files
|
||||
|
||||
// config
|
||||
pub base_url: String, // base url for formatting upload urls
|
||||
save_path: PathBuf, // where uploads are saved to disk
|
||||
pub upload_key: String, // authorisation key for uploading new files
|
||||
|
||||
cache_max_length: usize, // if an upload is bigger than this size, it won't be cached
|
||||
}
|
||||
|
@ -39,6 +40,7 @@ impl Engine {
|
|||
pub fn new(
|
||||
base_url: String,
|
||||
save_path: PathBuf,
|
||||
upload_key: String,
|
||||
cache_max_length: usize,
|
||||
cache_lifetime: Duration,
|
||||
cache_full_scan_freq: Duration, // how often the cache will be scanned for expired items
|
||||
|
@ -54,6 +56,7 @@ impl Engine {
|
|||
|
||||
base_url,
|
||||
save_path,
|
||||
upload_key,
|
||||
|
||||
cache_max_length,
|
||||
}
|
||||
|
@ -141,11 +144,11 @@ impl Engine {
|
|||
// create file to save upload to
|
||||
let mut file = File::create(path)
|
||||
.await
|
||||
.expect("could not open file! make sure your upload path exists");
|
||||
.expect("could not open file! make sure your upload path is valid");
|
||||
|
||||
// receive chunks and save them to file
|
||||
while let Some(chunk) = rx.recv().await {
|
||||
debug!(target: "process_upload", "writing chunk to disk (length: {})", chunk.len());
|
||||
debug!("writing chunk to disk (length: {})", chunk.len());
|
||||
file.write_all(&chunk)
|
||||
.await
|
||||
.expect("error while writing file to disk");
|
||||
|
@ -157,15 +160,15 @@ impl Engine {
|
|||
let chunk = chunk.unwrap();
|
||||
|
||||
// send chunk to io task
|
||||
debug!(target: "process_upload", "sending data to io task");
|
||||
debug!("sending data to io task");
|
||||
tx.send(chunk.clone())
|
||||
.await
|
||||
.expect("failed to send data to io task");
|
||||
|
||||
if use_cache {
|
||||
debug!(target: "process_upload", "receiving data into buffer");
|
||||
debug!("receiving data into buffer");
|
||||
if data.len() + chunk.len() > data.capacity() {
|
||||
error!(target: "process_upload", "the amount of data sent exceeds the content-length provided by the client! caching will be cancelled for this upload.");
|
||||
error!("the amount of data sent exceeds the content-length provided by the client! caching will be cancelled for this upload.");
|
||||
|
||||
// if we receive too much data, drop the buffer and stop using cache (it is still okay to use disk, probably)
|
||||
data = BytesMut::new();
|
||||
|
@ -180,10 +183,12 @@ impl Engine {
|
|||
if use_cache {
|
||||
let mut cache = self.cache.write().await;
|
||||
|
||||
info!(target: "process_upload", "caching upload!");
|
||||
info!("caching upload!");
|
||||
cache.insert(name, data.freeze());
|
||||
}
|
||||
|
||||
info!("finished processing upload!!");
|
||||
|
||||
// if all goes well, increment the cached upload counter
|
||||
self.upl_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
@ -228,7 +233,7 @@ impl Engine {
|
|||
let cached_data = self.read_cached_upload(&name).await;
|
||||
|
||||
if let Some(data) = cached_data {
|
||||
info!(target: "get_upload", "got upload from cache!!");
|
||||
info!("got upload from cache!!");
|
||||
|
||||
return Ok(ViewSuccess::FromCache(data));
|
||||
} else {
|
||||
|
@ -241,7 +246,7 @@ impl Engine {
|
|||
.expect("failed to read upload file metadata")
|
||||
.len() as usize;
|
||||
|
||||
debug!(target: "get_upload", "read upload from disk, size = {}", length);
|
||||
debug!("read upload from disk, size = {}", length);
|
||||
|
||||
// if the upload is okay to cache, recache it and send a fromcache response
|
||||
if self.will_use_cache(length) {
|
||||
|
@ -268,12 +273,12 @@ impl Engine {
|
|||
let mut cache = self.cache.write().await;
|
||||
cache.insert(name, data.clone());
|
||||
|
||||
info!(target: "get_upload", "recached upload from disk!");
|
||||
info!(/* */"recached upload from disk!");
|
||||
|
||||
return Ok(ViewSuccess::FromCache(data));
|
||||
}
|
||||
|
||||
info!(target: "get_upload", "got upload from disk!");
|
||||
info!("got upload from disk!");
|
||||
|
||||
return Ok(ViewSuccess::FromDisk(file));
|
||||
}
|
||||
|
|
11
src/main.rs
11
src/main.rs
|
@ -26,6 +26,7 @@ async fn main() {
|
|||
// read env vars
|
||||
let base_url = env::var("BRZ_BASE_URL").expect("missing BRZ_BASE_URL! base url for upload urls (ex: http://127.0.0.1:8000 for http://127.0.0.1:8000/p/abcdef.png, http://picture.wtf for http://picture.wtf/p/abcdef.png)");
|
||||
let save_path = env::var("BRZ_SAVE_PATH").expect("missing BRZ_SAVE_PATH! this should be a path where uploads are saved to disk (ex: /srv/uploads, C:\\brzuploads)");
|
||||
let upload_key = env::var("BRZ_UPLOAD_KEY").unwrap_or_default();
|
||||
let cache_max_length = env::var("BRZ_CACHE_UPL_MAX_LENGTH").expect("missing BRZ_CACHE_UPL_MAX_LENGTH! this is the max length an upload can be in bytes before it won't be cached (ex: 80000000 for 80MB)");
|
||||
let cache_upl_lifetime = env::var("BRZ_CACHE_UPL_LIFETIME").expect("missing BRZ_CACHE_UPL_LIFETIME! this indicates how long an upload will stay in cache (ex: 1800 for 30 minutes, 60 for 1 minute)");
|
||||
let cache_scan_freq = env::var("BRZ_CACHE_SCAN_FREQ").expect("missing BRZ_CACHE_SCAN_FREQ! this is the frequency of full cache scans, which scan for and remove expired uploads (ex: 60 for 1 minute)");
|
||||
|
@ -39,13 +40,19 @@ async fn main() {
|
|||
let cache_mem_capacity = usize::from_str_radix(&cache_mem_capacity, 10).expect("failed parsing BRZ_CACHE_MEM_CAPACITY! it should be a positive number without any separators");
|
||||
|
||||
if !save_path.exists() || !save_path.is_dir() {
|
||||
panic!("the save path does not exist or is not a directory. this is invalid");
|
||||
panic!("the save path does not exist or is not a directory! this is invalid");
|
||||
}
|
||||
|
||||
if upload_key.is_empty() {
|
||||
// i would prefer this to be a warning but the default log level hides those
|
||||
error!("upload key is empty! no key will be required for uploading new files");
|
||||
}
|
||||
|
||||
// create engine
|
||||
let engine = Engine::new(
|
||||
base_url,
|
||||
save_path,
|
||||
upload_key,
|
||||
cache_max_length,
|
||||
cache_upl_lifetime,
|
||||
cache_scan_freq,
|
||||
|
@ -71,7 +78,7 @@ async fn shutdown_signal() {
|
|||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("failed to add ctrl-c handler");
|
||||
.expect("failed to add SIGINT handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
|
|
19
src/new.rs
19
src/new.rs
|
@ -4,7 +4,7 @@ use axum::{
|
|||
extract::{BodyStream, Query, State},
|
||||
http::HeaderValue,
|
||||
};
|
||||
use hyper::{HeaderMap, StatusCode, header};
|
||||
use hyper::{header, HeaderMap, StatusCode};
|
||||
|
||||
#[axum::debug_handler]
|
||||
pub async fn new(
|
||||
|
@ -13,12 +13,21 @@ pub async fn new(
|
|||
Query(params): Query<HashMap<String, String>>,
|
||||
stream: BodyStream,
|
||||
) -> Result<String, StatusCode> {
|
||||
if !params.contains_key("name") {
|
||||
let original_name = params.get("name");
|
||||
|
||||
// the original file name wasn't given, so i can't work out what the extension should be
|
||||
if original_name.is_none() {
|
||||
return Err(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let original_name = params.get("name").unwrap();
|
||||
let original_path = PathBuf::from(original_name);
|
||||
let key = params.get("key");
|
||||
|
||||
// check upload key, if i need to
|
||||
if !engine.upload_key.is_empty() && key.unwrap_or(&String::new()) != &engine.upload_key {
|
||||
return Err(StatusCode::FORBIDDEN);
|
||||
}
|
||||
|
||||
let original_path = PathBuf::from(original_name.unwrap());
|
||||
|
||||
let path = engine.gen_path(&original_path).await;
|
||||
let name = path
|
||||
|
@ -29,6 +38,7 @@ pub async fn new(
|
|||
|
||||
let url = format!("{}/p/{}", engine.base_url, name);
|
||||
|
||||
// read and parse content-length, and if it fails just assume it's really high so it doesn't cache
|
||||
let content_length = headers
|
||||
.get(header::CONTENT_LENGTH)
|
||||
.unwrap_or(&HeaderValue::from_static(""))
|
||||
|
@ -37,6 +47,7 @@ pub async fn new(
|
|||
.unwrap()
|
||||
.unwrap_or(usize::MAX);
|
||||
|
||||
// pass it off to the engine to be processed!
|
||||
engine
|
||||
.process_upload(path, name, content_length, stream)
|
||||
.await;
|
||||
|
|
|
@ -20,8 +20,8 @@ pub enum ViewSuccess {
|
|||
}
|
||||
|
||||
pub enum ViewError {
|
||||
NotFound,
|
||||
InternalServerError,
|
||||
NotFound, // 404
|
||||
InternalServerError, // 500
|
||||
}
|
||||
|
||||
impl IntoResponse for ViewSuccess {
|
||||
|
@ -80,9 +80,10 @@ pub async fn view(
|
|||
.into_iter()
|
||||
.any(|x| !matches!(x, Component::Normal(_)))
|
||||
{
|
||||
warn!(target: "view", "a request attempted path traversal");
|
||||
warn!("a request attempted path traversal");
|
||||
return Err(ViewError::NotFound);
|
||||
}
|
||||
|
||||
// get result from the engine!
|
||||
engine.get_upload(&original_path).await
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue