Skip to content

Commit

Permalink
feat: implement access control
Browse files Browse the repository at this point in the history
  • Loading branch information
zensh committed Jul 11, 2024
1 parent ec5ad1f commit 1d146bf
Show file tree
Hide file tree
Showing 17 changed files with 1,420 additions and 251 deletions.
185 changes: 142 additions & 43 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ base64 = "0.21"
candid = "0.10"
ciborium = "0.2"
ciborium-io = "0.2"
coset = { git = "https://github.com/ldclabs/coset.git", rev = "5cab9381043000adc52b85f34822b8e446a7066e" }
futures = "0.3"
futures-util = "0.3"
log = "0.4"
Expand Down
15 changes: 14 additions & 1 deletion src/ic_oss_bucket/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,20 @@ ic-oss-cli -i debug/uploader.pem identity

dfx canister call ic_oss_bucket admin_set_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})"

dfx canister call ic_oss_bucket list_files '(0, null, null, null)'
dfx canister call ic_oss_bucket admin_update_bucket "(record {
name = null;
max_file_size = null;
max_folder_depth = null;
max_children = null;
max_custom_data_size = null;
enable_hash_index = null;
status = null;
visibility = null;
trusted_ecdsa_pub_keys = null;
trusted_eddsa_pub_keys = opt vec {vec {19; 152; 246; 44; 109; 26; 69; 124; 81; 186; 106; 75; 95; 61; 189; 47; 105; 252; 169; 50; 22; 33; 141; 200; 153; 126; 65; 107; 209; 125; 147; 202}};
}, null)"

dfx canister call ic_oss_bucket list_files '(2, null, null, opt vec{132; 67; 161; 1; 39; 160; 88; 142; 166; 2; 120; 63; 122; 55; 119; 106; 112; 45; 118; 54; 102; 101; 51; 45; 107; 107; 115; 117; 53; 45; 50; 54; 102; 54; 52; 45; 100; 101; 100; 116; 119; 45; 106; 55; 110; 100; 106; 45; 53; 55; 111; 110; 120; 45; 113; 103; 97; 54; 99; 45; 101; 116; 53; 101; 51; 45; 110; 106; 120; 53; 51; 45; 116; 97; 101; 3; 120; 27; 109; 109; 114; 120; 117; 45; 102; 113; 97; 97; 97; 45; 97; 97; 97; 97; 112; 45; 97; 104; 104; 110; 97; 45; 99; 97; 105; 4; 26; 102; 143; 124; 240; 5; 26; 102; 143; 110; 224; 6; 26; 102; 143; 110; 224; 9; 120; 24; 70; 111; 108; 100; 101; 114; 46; 42; 58; 49; 32; 66; 117; 99; 107; 101; 116; 46; 82; 101; 97; 100; 46; 42; 88; 64; 210; 38; 140; 40; 73; 180; 152; 145; 49; 12; 114; 27; 202; 202; 177; 163; 235; 140; 234; 54; 118; 79; 125; 78; 80; 204; 34; 220; 129; 8; 77; 2; 199; 210; 196; 189; 235; 130; 159; 138; 88; 162; 111; 191; 48; 61; 174; 99; 187; 110; 150; 149; 191; 43; 253; 25; 38; 53; 226; 80; 52; 158; 193; 7})'
dfx canister call ic_oss_bucket list_folders '(0, null)'

ic-oss-cli -i debug/uploader.pem upload -b mmrxu-fqaaa-aaaap-ahhna-cai --file README.md
Expand Down
63 changes: 39 additions & 24 deletions src/ic_oss_bucket/ic_oss_bucket.did
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ type BTreeMap = vec record {
Array : vec Value;
};
};
type Bucket = record {
type BucketInfo = record {
status : int8;
trusted_eddsa_pub_keys : vec blob;
managers : vec principal;
Expand Down Expand Up @@ -94,14 +94,17 @@ type InitArgs = record {
type MoveInput = record { id : nat32; to : nat32; from : nat32 };
type Result = variant { Ok; Err : text };
type Result_1 = variant { Ok : vec nat32; Err : text };
type Result_10 = variant { Ok : vec FolderInfo; Err : text };
type Result_11 = variant { Ok : UpdateFileOutput; Err : text };
type Result_12 = variant { Ok : UpdateFileChunkOutput; Err : text };
type Result_2 = variant { Ok : CreateFileOutput; Err : text };
type Result_3 = variant { Ok : bool; Err : text };
type Result_4 = variant { Ok : Bucket; Err };
type Result_5 = variant { Ok : vec record { nat32; blob }; Err : text };
type Result_6 = variant { Ok : FileInfo; Err : text };
type Result_7 = variant { Ok : FolderInfo; Err : text };
type Result_8 = variant { Ok : UpdateFileOutput; Err : text };
type Result_9 = variant { Ok : UpdateFileChunkOutput; Err : text };
type Result_4 = variant { Ok : BucketInfo; Err : text };
type Result_5 = variant { Ok : vec FolderName; Err : text };
type Result_6 = variant { Ok : vec record { nat32; blob }; Err : text };
type Result_7 = variant { Ok : FileInfo; Err : text };
type Result_8 = variant { Ok : FolderInfo; Err : text };
type Result_9 = variant { Ok : vec FileInfo; Err : text };
type StreamingCallbackHttpResponse = record {
token : opt StreamingCallbackToken;
body : blob;
Expand All @@ -120,6 +123,18 @@ type StreamingStrategy = variant {
) query;
};
};
type UpdateBucketInput = record {
status : opt int8;
trusted_eddsa_pub_keys : opt vec blob;
name : opt text;
max_custom_data_size : opt nat16;
max_children : opt nat16;
enable_hash_index : opt bool;
max_file_size : opt nat64;
visibility : opt nat8;
max_folder_depth : opt nat8;
trusted_ecdsa_pub_keys : opt vec blob;
};
type UpdateFileChunkInput = record {
id : nat32;
chunk_index : nat32;
Expand All @@ -142,12 +157,10 @@ type UpdateFolderInput = record {
name : opt text;
};
type UpgradeArgs = record {
name : opt text;
max_custom_data_size : opt nat16;
max_children : opt nat16;
enable_hash_index : opt bool;
max_file_size : opt nat64;
visibility : opt nat8;
max_folder_depth : opt nat8;
};
type Value = variant {
Expand All @@ -162,30 +175,32 @@ type Value = variant {
service : (opt CanisterArgs) -> {
admin_set_auditors : (vec principal) -> (Result);
admin_set_managers : (vec principal) -> (Result);
admin_update_bucket : (UpdateBucketInput) -> (Result);
api_version : () -> (nat16) query;
batch_delete_subfiles : (nat32, vec nat32, opt blob) -> (Result_1);
create_file : (CreateFileInput, opt blob) -> (Result_2);
create_folder : (CreateFolderInput, opt blob) -> (Result_2);
delete_file : (nat32, opt blob) -> (Result_3);
delete_folder : (nat32, opt blob) -> (Result_3);
get_bucket_info : (opt blob) -> (Result_4) query;
get_file_ancestors : (nat32, opt blob) -> (vec FolderName) query;
get_file_chunks : (nat32, nat32, opt nat32, opt blob) -> (Result_5) query;
get_file_info : (nat32, opt blob) -> (Result_6) query;
get_file_info_by_hash : (blob, opt blob) -> (Result_6) query;
get_folder_ancestors : (nat32, opt blob) -> (vec FolderName) query;
get_folder_info : (nat32, opt blob) -> (Result_7) query;
get_file_ancestors : (nat32, opt blob) -> (Result_5) query;
get_file_chunks : (nat32, nat32, opt nat32, opt blob) -> (Result_6) query;
get_file_info : (nat32, opt blob) -> (Result_7) query;
get_file_info_by_hash : (blob, opt blob) -> (Result_7) query;
get_folder_ancestors : (nat32, opt blob) -> (Result_5) query;
get_folder_info : (nat32, opt blob) -> (Result_8) query;
http_request : (HttpRequest) -> (HttpStreamingResponse) query;
http_request_streaming_callback : (StreamingCallbackToken) -> (
StreamingCallbackHttpResponse,
) query;
list_files : (nat32, opt nat32, opt nat32, opt blob) -> (vec FileInfo) query;
list_folders : (nat32, opt blob) -> (vec FolderInfo) query;
move_file : (MoveInput, opt blob) -> (Result_8);
move_folder : (MoveInput, opt blob) -> (Result_8);
update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_9);
update_file_info : (UpdateFileInput, opt blob) -> (Result_8);
update_folder_info : (UpdateFolderInput, opt blob) -> (Result_8);
validate_admin_set_auditors : (vec principal) -> (Result);
validate_admin_set_managers : (vec principal) -> (Result);
list_files : (nat32, opt nat32, opt nat32, opt blob) -> (Result_9) query;
list_folders : (nat32, opt blob) -> (Result_10) query;
move_file : (MoveInput, opt blob) -> (Result_11);
move_folder : (MoveInput, opt blob) -> (Result_11);
update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_12);
update_file_info : (UpdateFileInput, opt blob) -> (Result_11);
update_folder_info : (UpdateFolderInput, opt blob) -> (Result_11);
validate_admin_set_auditors : (vec principal) -> (Result) query;
validate_admin_set_managers : (vec principal) -> (Result) query;
validate_admin_update_bucket : (UpdateBucketInput) -> (Result) query;
}
50 changes: 48 additions & 2 deletions src/ic_oss_bucket/src/api_admin.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
use candid::Principal;
use ic_oss_types::bucket::UpdateBucketInput;
use std::collections::BTreeSet;

use crate::{is_controller, store, ANONYMOUS};

#[ic_cdk::update(guard = "is_controller")]
fn admin_set_managers(args: BTreeSet<Principal>) -> Result<(), String> {
validate_admin_set_managers(args.clone())?;
store::state::with_mut(|r| {
r.managers = args;
});
Ok(())
}

#[ic_cdk::update]
#[ic_cdk::query]
fn validate_admin_set_managers(args: BTreeSet<Principal>) -> Result<(), String> {
if args.is_empty() {
return Err("managers cannot be empty".to_string());
Expand All @@ -24,13 +26,14 @@ fn validate_admin_set_managers(args: BTreeSet<Principal>) -> Result<(), String>

#[ic_cdk::update(guard = "is_controller")]
fn admin_set_auditors(args: BTreeSet<Principal>) -> Result<(), String> {
validate_admin_set_auditors(args.clone())?;
store::state::with_mut(|r| {
r.auditors = args;
});
Ok(())
}

#[ic_cdk::update]
#[ic_cdk::query]
fn validate_admin_set_auditors(args: BTreeSet<Principal>) -> Result<(), String> {
if args.is_empty() {
return Err("auditors cannot be empty".to_string());
Expand All @@ -40,3 +43,46 @@ fn validate_admin_set_auditors(args: BTreeSet<Principal>) -> Result<(), String>
}
Ok(())
}

#[ic_cdk::update(guard = "is_controller")]
fn admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> {
args.validate()?;
store::state::with_mut(|s| {
if let Some(name) = args.name {
s.name = name;
}
if let Some(max_file_size) = args.max_file_size {
s.max_file_size = max_file_size;
}
if let Some(max_folder_depth) = args.max_folder_depth {
s.max_folder_depth = max_folder_depth;
}
if let Some(max_children) = args.max_children {
s.max_children = max_children;
}
if let Some(max_custom_data_size) = args.max_custom_data_size {
s.max_custom_data_size = max_custom_data_size;
}
if let Some(enable_hash_index) = args.enable_hash_index {
s.enable_hash_index = enable_hash_index;
}
if let Some(status) = args.status {
s.status = status;
}
if let Some(visibility) = args.visibility {
s.visibility = visibility;
}
if let Some(trusted_ecdsa_pub_keys) = args.trusted_ecdsa_pub_keys {
s.trusted_ecdsa_pub_keys = trusted_ecdsa_pub_keys;
}
if let Some(trusted_eddsa_pub_keys) = args.trusted_eddsa_pub_keys {
s.trusted_eddsa_pub_keys = trusted_eddsa_pub_keys;
}
});
Ok(())
}

#[ic_cdk::query]
fn validate_admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> {
args.validate()
}
55 changes: 42 additions & 13 deletions src/ic_oss_bucket/src/api_http.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use serde_bytes::ByteBuf;
use std::path::Path;
use std::str::FromStr;

use crate::store;
use crate::{permission, store, SECONDS};

#[derive(CandidType, Deserialize, Clone, Default)]
pub struct HttpStreamingResponse {
Expand Down Expand Up @@ -124,15 +124,44 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse {
param.file
};

let canister = ic_cdk::id();
let ps = match store::state::with(|s| {
s.read_permission(
&ic_cdk::caller(),
&canister,
param.token,
ic_cdk::api::time() / SECONDS,
)
}) {
Ok(ps) => ps,
Err((status_code, err)) => {
return HttpStreamingResponse {
status_code,
headers,
body: ByteBuf::from(err.as_bytes()),
..Default::default()
};
}
};

match store::fs::get_file(id) {
None => HttpStreamingResponse {
status_code: 404,
headers,
body: ByteBuf::from("file not found".as_bytes()),
..Default::default()
},
Some(metadata) => {
if metadata.size != metadata.filled {
Some(file) => {
if !permission::check_file_read(&ps, &canister, id, file.parent) {
return HttpStreamingResponse {
status_code: 403,
headers,
body: ByteBuf::from("permission denied".as_bytes()),
..Default::default()
};
}

if file.size != file.filled {
return HttpStreamingResponse {
status_code: 422,
headers,
Expand All @@ -141,14 +170,14 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse {
};
}

let etag = metadata
let etag = file
.hash
.as_ref()
.map(|hash| BASE64.encode(hash.as_ref()))
.unwrap_or_default();

headers.push(("accept-ranges".to_string(), "bytes".to_string()));
if let Some(range_req) = detect_range(&request.headers, metadata.size, &etag) {
if let Some(range_req) = detect_range(&request.headers, file.size, &etag) {
match range_req {
Err(err) => {
return HttpStreamingResponse {
Expand All @@ -162,26 +191,26 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse {
if !etag.is_empty() {
headers.push(("etag".to_string(), etag));
}
return range_response(headers, id, metadata, range);
return range_response(headers, id, file, range);
}
}
}
if !etag.is_empty() {
headers.push(("etag".to_string(), etag));
}

headers[0].1 = if metadata.content_type.is_empty() {
headers[0].1 = if file.content_type.is_empty() {
OCTET_STREAM.to_string()
} else {
metadata.content_type.clone()
file.content_type.clone()
};

let filename = if param.inline {
""
} else if let Some(ref name) = param.name {
name
} else {
&metadata.name
&file.name
};

headers.push((
Expand All @@ -190,9 +219,9 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse {
));

// return all chunks for small file
let (chunk_index, body) = if metadata.size <= MAX_FILE_SIZE_PER_CALL {
let (chunk_index, body) = if file.size <= MAX_FILE_SIZE_PER_CALL {
(
metadata.chunks.saturating_sub(1),
file.chunks.saturating_sub(1),
store::fs::get_full_chunks(id)
.map(ByteBuf::from)
.unwrap_or_default(),
Expand All @@ -210,8 +239,8 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse {
let streaming_strategy = create_strategy(StreamingCallbackToken {
id,
chunk_index,
chunks: metadata.chunks,
token: param.token,
chunks: file.chunks,
token: None, // TODO: access token for callback
});

// small file
Expand Down
Loading

0 comments on commit 1d146bf

Please sign in to comment.