diff --git a/Cargo.lock b/Cargo.lock index cf00a57..4bd510b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -111,6 +111,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +[[package]] +name = "arrayvec" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" + [[package]] name = "async-lock" version = "3.4.0" @@ -314,14 +320,14 @@ dependencies = [ "lazy_static", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] name = "cc" -version = "1.0.104" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74b6a57f98764a267ff415d50a25e6e166f3831a5071af4995296ea97d210490" +checksum = "eaff6f8ce506b9773fa786672d63fc7a191ffea1be33f72bbd4aeacefca9ffc8" [[package]] name = "cfb" @@ -384,9 +390,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" +checksum = "64acc1846d54c1fe936a78dc189c34e28d3f5afc348403f28ecf53660b9b8462" dependencies = [ "clap_builder", "clap_derive", @@ -394,9 +400,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.8" +version = "4.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" +checksum = "6fb8393d67ba2e7bfaf28a23458e4e2b543cc73a99595511eb207fdb8aede942" dependencies = [ "anstream", "anstyle", @@ -413,7 +419,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -459,6 +465,15 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +[[package]] +name = "coset" +version = "0.3.7" +source = "git+https://github.com/ldclabs/coset.git?rev=5cab9381043000adc52b85f34822b8e446a7066e#5cab9381043000adc52b85f34822b8e446a7066e" +dependencies = [ + "ciborium", + "ciborium-io", +] + [[package]] name = "cpufeatures" version = "0.2.12" @@ -511,6 +526,33 @@ dependencies = [ "typenum", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.70", +] + [[package]] name = "curve25519-dalek-ng" version = "4.1.1" @@ -585,6 +627,16 @@ dependencies = [ "spki", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + [[package]] name = "ed25519-consensus" version = "2.1.0" @@ -600,6 +652,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "serde", + "sha2 0.10.8", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" @@ -698,6 +764,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "fnv" version = "1.0.7" @@ -784,7 +856,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -930,6 +1002,15 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" +dependencies = [ + "arrayvec 0.7.4", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1009,9 +1090,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -1033,9 +1114,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", @@ -1058,7 +1139,7 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", "rustls", "rustls-pki-types", @@ -1075,7 +1156,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.29", + "hyper 0.14.30", "native-tls", "tokio", "tokio-native-tls", @@ -1092,7 +1173,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.4.0", + "hyper 1.4.1", "pin-project-lite", "socket2", "tokio", @@ -1204,7 +1285,7 @@ dependencies = [ "quote", "serde", "serde_tokenstream", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -1301,8 +1382,11 @@ dependencies = [ "base64 0.21.7", "candid", "ciborium", + "coset", "crc32fast", + "ed25519-dalek", "hex", + "hex-conservative", "icrc-ledger-types", "num-traits", "serde", @@ -1715,7 +1799,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -1844,7 +1928,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -1893,7 +1977,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b55c4d17d994b637e2f4daf6e5dc5d660d209d5642377d675d7a1c3ab69fa579" dependencies = [ - "arrayvec", + "arrayvec 0.5.2", "typed-arena", "unicode-width", ] @@ -2040,7 +2124,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-tls", "ipnet", "js-sys", @@ -2080,7 +2164,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-rustls", "hyper-util", "ipnet", @@ -2148,6 +2232,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "0.38.34" @@ -2163,9 +2256,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "4828ea528154ae444e5a642dbb7d5623354030dc9822b83fd9bb79683c7399d0" dependencies = [ "once_cell", "ring", @@ -2285,11 +2378,17 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" + [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -2315,13 +2414,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2344,7 +2443,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2356,7 +2455,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2530,9 +2629,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "2f0209b68b3613b093e0ec905354eccaedcfe83b8cb37cbdeae64026c3064c16" dependencies = [ "proc-macro2", "quote", @@ -2601,7 +2700,7 @@ checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2637,9 +2736,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -2677,7 +2776,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2771,7 +2870,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] @@ -2868,9 +2967,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" [[package]] name = "vcpkg" @@ -2920,7 +3019,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", "wasm-bindgen-shared", ] @@ -2954,7 +3053,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3204,7 +3303,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.70", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 3d1fbde..155f9f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,6 +29,7 @@ base64 = "0.21" candid = "0.10" ciborium = "0.2" ciborium-io = "0.2" +coset = { git = "https://github.com/ldclabs/coset.git", rev = "5cab9381043000adc52b85f34822b8e446a7066e" } futures = "0.3" futures-util = "0.3" log = "0.4" diff --git a/src/ic_oss_bucket/README.md b/src/ic_oss_bucket/README.md index 71f9de6..c32300d 100644 --- a/src/ic_oss_bucket/README.md +++ b/src/ic_oss_bucket/README.md @@ -30,7 +30,20 @@ ic-oss-cli -i debug/uploader.pem identity dfx canister call ic_oss_bucket admin_set_managers "(vec {principal \"$MYID\"; principal \"nprym-ylvyz-ig3fr-lgcmn-zzzt4-tyuix-3v6bm-fsel7-6lq6x-zh2w7-zqe\"})" -dfx canister call ic_oss_bucket list_files '(0, null, null, null)' +dfx canister call ic_oss_bucket admin_update_bucket "(record { + name = null; + max_file_size = null; + max_folder_depth = null; + max_children = null; + max_custom_data_size = null; + enable_hash_index = null; + status = null; + visibility = null; + trusted_ecdsa_pub_keys = null; + trusted_eddsa_pub_keys = opt vec {vec {19; 152; 246; 44; 109; 26; 69; 124; 81; 186; 106; 75; 95; 61; 189; 47; 105; 252; 169; 50; 22; 33; 141; 200; 153; 126; 65; 107; 209; 125; 147; 202}}; +}, null)" + +dfx canister call ic_oss_bucket list_files '(2, null, null, opt vec{132; 67; 161; 1; 39; 160; 88; 142; 166; 2; 120; 63; 122; 55; 119; 106; 112; 45; 118; 54; 102; 101; 51; 45; 107; 107; 115; 117; 53; 45; 50; 54; 102; 54; 52; 45; 100; 101; 100; 116; 119; 45; 106; 55; 110; 100; 106; 45; 53; 55; 111; 110; 120; 45; 113; 103; 97; 54; 99; 45; 101; 116; 53; 101; 51; 45; 110; 106; 120; 53; 51; 45; 116; 97; 101; 3; 120; 27; 109; 109; 114; 120; 117; 45; 102; 113; 97; 97; 97; 45; 97; 97; 97; 97; 112; 45; 97; 104; 104; 110; 97; 45; 99; 97; 105; 4; 26; 102; 143; 124; 240; 5; 26; 102; 143; 110; 224; 6; 26; 102; 143; 110; 224; 9; 120; 24; 70; 111; 108; 100; 101; 114; 46; 42; 58; 49; 32; 66; 117; 99; 107; 101; 116; 46; 82; 101; 97; 100; 46; 42; 88; 64; 210; 38; 140; 40; 73; 180; 152; 145; 49; 12; 114; 27; 202; 202; 177; 163; 235; 140; 234; 54; 118; 79; 125; 78; 80; 204; 34; 220; 129; 8; 77; 2; 199; 210; 196; 189; 235; 130; 159; 138; 88; 162; 111; 191; 48; 61; 174; 99; 187; 110; 150; 149; 191; 43; 253; 25; 38; 53; 226; 80; 52; 158; 193; 7})' dfx canister call ic_oss_bucket list_folders '(0, null)' ic-oss-cli -i debug/uploader.pem upload -b mmrxu-fqaaa-aaaap-ahhna-cai --file README.md diff --git a/src/ic_oss_bucket/ic_oss_bucket.did b/src/ic_oss_bucket/ic_oss_bucket.did index ce1c7d5..09287b9 100644 --- a/src/ic_oss_bucket/ic_oss_bucket.did +++ b/src/ic_oss_bucket/ic_oss_bucket.did @@ -10,7 +10,7 @@ type BTreeMap = vec record { Array : vec Value; }; }; -type Bucket = record { +type BucketInfo = record { status : int8; trusted_eddsa_pub_keys : vec blob; managers : vec principal; @@ -94,14 +94,17 @@ type InitArgs = record { type MoveInput = record { id : nat32; to : nat32; from : nat32 }; type Result = variant { Ok; Err : text }; type Result_1 = variant { Ok : vec nat32; Err : text }; +type Result_10 = variant { Ok : vec FolderInfo; Err : text }; +type Result_11 = variant { Ok : UpdateFileOutput; Err : text }; +type Result_12 = variant { Ok : UpdateFileChunkOutput; Err : text }; type Result_2 = variant { Ok : CreateFileOutput; Err : text }; type Result_3 = variant { Ok : bool; Err : text }; -type Result_4 = variant { Ok : Bucket; Err }; -type Result_5 = variant { Ok : vec record { nat32; blob }; Err : text }; -type Result_6 = variant { Ok : FileInfo; Err : text }; -type Result_7 = variant { Ok : FolderInfo; Err : text }; -type Result_8 = variant { Ok : UpdateFileOutput; Err : text }; -type Result_9 = variant { Ok : UpdateFileChunkOutput; Err : text }; +type Result_4 = variant { Ok : BucketInfo; Err : text }; +type Result_5 = variant { Ok : vec FolderName; Err : text }; +type Result_6 = variant { Ok : vec record { nat32; blob }; Err : text }; +type Result_7 = variant { Ok : FileInfo; Err : text }; +type Result_8 = variant { Ok : FolderInfo; Err : text }; +type Result_9 = variant { Ok : vec FileInfo; Err : text }; type StreamingCallbackHttpResponse = record { token : opt StreamingCallbackToken; body : blob; @@ -120,6 +123,18 @@ type StreamingStrategy = variant { ) query; }; }; +type UpdateBucketInput = record { + status : opt int8; + trusted_eddsa_pub_keys : opt vec blob; + name : opt text; + max_custom_data_size : opt nat16; + max_children : opt nat16; + enable_hash_index : opt bool; + max_file_size : opt nat64; + visibility : opt nat8; + max_folder_depth : opt nat8; + trusted_ecdsa_pub_keys : opt vec blob; +}; type UpdateFileChunkInput = record { id : nat32; chunk_index : nat32; @@ -142,12 +157,10 @@ type UpdateFolderInput = record { name : opt text; }; type UpgradeArgs = record { - name : opt text; max_custom_data_size : opt nat16; max_children : opt nat16; enable_hash_index : opt bool; max_file_size : opt nat64; - visibility : opt nat8; max_folder_depth : opt nat8; }; type Value = variant { @@ -162,6 +175,7 @@ type Value = variant { service : (opt CanisterArgs) -> { admin_set_auditors : (vec principal) -> (Result); admin_set_managers : (vec principal) -> (Result); + admin_update_bucket : (UpdateBucketInput) -> (Result); api_version : () -> (nat16) query; batch_delete_subfiles : (nat32, vec nat32, opt blob) -> (Result_1); create_file : (CreateFileInput, opt blob) -> (Result_2); @@ -169,23 +183,24 @@ service : (opt CanisterArgs) -> { delete_file : (nat32, opt blob) -> (Result_3); delete_folder : (nat32, opt blob) -> (Result_3); get_bucket_info : (opt blob) -> (Result_4) query; - get_file_ancestors : (nat32, opt blob) -> (vec FolderName) query; - get_file_chunks : (nat32, nat32, opt nat32, opt blob) -> (Result_5) query; - get_file_info : (nat32, opt blob) -> (Result_6) query; - get_file_info_by_hash : (blob, opt blob) -> (Result_6) query; - get_folder_ancestors : (nat32, opt blob) -> (vec FolderName) query; - get_folder_info : (nat32, opt blob) -> (Result_7) query; + get_file_ancestors : (nat32, opt blob) -> (Result_5) query; + get_file_chunks : (nat32, nat32, opt nat32, opt blob) -> (Result_6) query; + get_file_info : (nat32, opt blob) -> (Result_7) query; + get_file_info_by_hash : (blob, opt blob) -> (Result_7) query; + get_folder_ancestors : (nat32, opt blob) -> (Result_5) query; + get_folder_info : (nat32, opt blob) -> (Result_8) query; http_request : (HttpRequest) -> (HttpStreamingResponse) query; http_request_streaming_callback : (StreamingCallbackToken) -> ( StreamingCallbackHttpResponse, ) query; - list_files : (nat32, opt nat32, opt nat32, opt blob) -> (vec FileInfo) query; - list_folders : (nat32, opt blob) -> (vec FolderInfo) query; - move_file : (MoveInput, opt blob) -> (Result_8); - move_folder : (MoveInput, opt blob) -> (Result_8); - update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_9); - update_file_info : (UpdateFileInput, opt blob) -> (Result_8); - update_folder_info : (UpdateFolderInput, opt blob) -> (Result_8); - validate_admin_set_auditors : (vec principal) -> (Result); - validate_admin_set_managers : (vec principal) -> (Result); + list_files : (nat32, opt nat32, opt nat32, opt blob) -> (Result_9) query; + list_folders : (nat32, opt blob) -> (Result_10) query; + move_file : (MoveInput, opt blob) -> (Result_11); + move_folder : (MoveInput, opt blob) -> (Result_11); + update_file_chunk : (UpdateFileChunkInput, opt blob) -> (Result_12); + update_file_info : (UpdateFileInput, opt blob) -> (Result_11); + update_folder_info : (UpdateFolderInput, opt blob) -> (Result_11); + validate_admin_set_auditors : (vec principal) -> (Result) query; + validate_admin_set_managers : (vec principal) -> (Result) query; + validate_admin_update_bucket : (UpdateBucketInput) -> (Result) query; } diff --git a/src/ic_oss_bucket/src/api_admin.rs b/src/ic_oss_bucket/src/api_admin.rs index 9ad79ca..d563370 100644 --- a/src/ic_oss_bucket/src/api_admin.rs +++ b/src/ic_oss_bucket/src/api_admin.rs @@ -1,17 +1,19 @@ use candid::Principal; +use ic_oss_types::bucket::UpdateBucketInput; use std::collections::BTreeSet; use crate::{is_controller, store, ANONYMOUS}; #[ic_cdk::update(guard = "is_controller")] fn admin_set_managers(args: BTreeSet) -> Result<(), String> { + validate_admin_set_managers(args.clone())?; store::state::with_mut(|r| { r.managers = args; }); Ok(()) } -#[ic_cdk::update] +#[ic_cdk::query] fn validate_admin_set_managers(args: BTreeSet) -> Result<(), String> { if args.is_empty() { return Err("managers cannot be empty".to_string()); @@ -24,13 +26,14 @@ fn validate_admin_set_managers(args: BTreeSet) -> Result<(), String> #[ic_cdk::update(guard = "is_controller")] fn admin_set_auditors(args: BTreeSet) -> Result<(), String> { + validate_admin_set_auditors(args.clone())?; store::state::with_mut(|r| { r.auditors = args; }); Ok(()) } -#[ic_cdk::update] +#[ic_cdk::query] fn validate_admin_set_auditors(args: BTreeSet) -> Result<(), String> { if args.is_empty() { return Err("auditors cannot be empty".to_string()); @@ -40,3 +43,46 @@ fn validate_admin_set_auditors(args: BTreeSet) -> Result<(), String> } Ok(()) } + +#[ic_cdk::update(guard = "is_controller")] +fn admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> { + args.validate()?; + store::state::with_mut(|s| { + if let Some(name) = args.name { + s.name = name; + } + if let Some(max_file_size) = args.max_file_size { + s.max_file_size = max_file_size; + } + if let Some(max_folder_depth) = args.max_folder_depth { + s.max_folder_depth = max_folder_depth; + } + if let Some(max_children) = args.max_children { + s.max_children = max_children; + } + if let Some(max_custom_data_size) = args.max_custom_data_size { + s.max_custom_data_size = max_custom_data_size; + } + if let Some(enable_hash_index) = args.enable_hash_index { + s.enable_hash_index = enable_hash_index; + } + if let Some(status) = args.status { + s.status = status; + } + if let Some(visibility) = args.visibility { + s.visibility = visibility; + } + if let Some(trusted_ecdsa_pub_keys) = args.trusted_ecdsa_pub_keys { + s.trusted_ecdsa_pub_keys = trusted_ecdsa_pub_keys; + } + if let Some(trusted_eddsa_pub_keys) = args.trusted_eddsa_pub_keys { + s.trusted_eddsa_pub_keys = trusted_eddsa_pub_keys; + } + }); + Ok(()) +} + +#[ic_cdk::query] +fn validate_admin_update_bucket(args: UpdateBucketInput) -> Result<(), String> { + args.validate() +} diff --git a/src/ic_oss_bucket/src/api_http.rs b/src/ic_oss_bucket/src/api_http.rs index d772e27..59f1d50 100644 --- a/src/ic_oss_bucket/src/api_http.rs +++ b/src/ic_oss_bucket/src/api_http.rs @@ -14,7 +14,7 @@ use serde_bytes::ByteBuf; use std::path::Path; use std::str::FromStr; -use crate::store; +use crate::{permission, store, SECONDS}; #[derive(CandidType, Deserialize, Clone, Default)] pub struct HttpStreamingResponse { @@ -124,6 +124,26 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { param.file }; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + param.token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((status_code, err)) => { + return HttpStreamingResponse { + status_code, + headers, + body: ByteBuf::from(err.as_bytes()), + ..Default::default() + }; + } + }; + match store::fs::get_file(id) { None => HttpStreamingResponse { status_code: 404, @@ -131,8 +151,17 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { body: ByteBuf::from("file not found".as_bytes()), ..Default::default() }, - Some(metadata) => { - if metadata.size != metadata.filled { + Some(file) => { + if !permission::check_file_read(&ps, &canister, id, file.parent) { + return HttpStreamingResponse { + status_code: 403, + headers, + body: ByteBuf::from("permission denied".as_bytes()), + ..Default::default() + }; + } + + if file.size != file.filled { return HttpStreamingResponse { status_code: 422, headers, @@ -141,14 +170,14 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { }; } - let etag = metadata + let etag = file .hash .as_ref() .map(|hash| BASE64.encode(hash.as_ref())) .unwrap_or_default(); headers.push(("accept-ranges".to_string(), "bytes".to_string())); - if let Some(range_req) = detect_range(&request.headers, metadata.size, &etag) { + if let Some(range_req) = detect_range(&request.headers, file.size, &etag) { match range_req { Err(err) => { return HttpStreamingResponse { @@ -162,7 +191,7 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { if !etag.is_empty() { headers.push(("etag".to_string(), etag)); } - return range_response(headers, id, metadata, range); + return range_response(headers, id, file, range); } } } @@ -170,10 +199,10 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { headers.push(("etag".to_string(), etag)); } - headers[0].1 = if metadata.content_type.is_empty() { + headers[0].1 = if file.content_type.is_empty() { OCTET_STREAM.to_string() } else { - metadata.content_type.clone() + file.content_type.clone() }; let filename = if param.inline { @@ -181,7 +210,7 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { } else if let Some(ref name) = param.name { name } else { - &metadata.name + &file.name }; headers.push(( @@ -190,9 +219,9 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { )); // return all chunks for small file - let (chunk_index, body) = if metadata.size <= MAX_FILE_SIZE_PER_CALL { + let (chunk_index, body) = if file.size <= MAX_FILE_SIZE_PER_CALL { ( - metadata.chunks.saturating_sub(1), + file.chunks.saturating_sub(1), store::fs::get_full_chunks(id) .map(ByteBuf::from) .unwrap_or_default(), @@ -210,8 +239,8 @@ fn http_request(request: HttpRequest) -> HttpStreamingResponse { let streaming_strategy = create_strategy(StreamingCallbackToken { id, chunk_index, - chunks: metadata.chunks, - token: param.token, + chunks: file.chunks, + token: None, // TODO: access token for callback }); // small file diff --git a/src/ic_oss_bucket/src/api_init.rs b/src/ic_oss_bucket/src/api_init.rs index 0d5bd08..97d6a2a 100644 --- a/src/ic_oss_bucket/src/api_init.rs +++ b/src/ic_oss_bucket/src/api_init.rs @@ -17,29 +17,22 @@ pub struct InitArgs { max_file_size: u64, max_folder_depth: u8, max_children: u16, - visibility: u8, // 0: private; 1: public max_custom_data_size: u16, enable_hash_index: bool, + visibility: u8, // 0: private; 1: public } #[derive(Clone, Debug, CandidType, Deserialize)] pub struct UpgradeArgs { - name: Option, max_file_size: Option, max_folder_depth: Option, max_children: Option, - visibility: Option, // 0: private; 1: public max_custom_data_size: Option, enable_hash_index: Option, } impl UpgradeArgs { fn validate(&self) -> Result<(), String> { - if let Some(name) = &self.name { - if name.is_empty() { - return Err("name cannot be empty".to_string()); - } - } if let Some(max_file_size) = self.max_file_size { if max_file_size == 0 { return Err("max_file_size should be greater than 0".to_string()); @@ -61,11 +54,7 @@ impl UpgradeArgs { return Err("max_children should be greater than 0".to_string()); } } - if let Some(visibility) = self.visibility { - if visibility != 0 && visibility != 1 { - return Err("visibility should be 0 or 1".to_string()); - } - } + if let Some(max_custom_data_size) = self.max_custom_data_size { if max_custom_data_size == 0 { return Err("max_custom_data_size should be greater than 0".to_string()); @@ -136,9 +125,6 @@ fn post_upgrade(args: Option) { } store::state::with_mut(|s| { - if let Some(name) = args.name { - s.name = name; - } if let Some(max_file_size) = args.max_file_size { s.max_file_size = max_file_size; } @@ -148,9 +134,7 @@ fn post_upgrade(args: Option) { if let Some(max_children) = args.max_children { s.max_children = max_children; } - if let Some(visibility) = args.visibility { - s.visibility = visibility; - } + if let Some(max_custom_data_size) = args.max_custom_data_size { s.max_custom_data_size = max_custom_data_size; } diff --git a/src/ic_oss_bucket/src/api_query.rs b/src/ic_oss_bucket/src/api_query.rs index 618f1f5..fd0baf7 100644 --- a/src/ic_oss_bucket/src/api_query.rs +++ b/src/ic_oss_bucket/src/api_query.rs @@ -1,10 +1,12 @@ use ic_oss_types::{ + bucket::BucketInfo, file::{FileChunk, FileInfo}, folder::{FolderInfo, FolderName}, + ByteN, }; use serde_bytes::ByteBuf; -use crate::store; +use crate::{permission, store, SECONDS}; #[ic_cdk::query] fn api_version() -> u16 { @@ -12,39 +14,109 @@ fn api_version() -> u16 { } #[ic_cdk::query] -fn get_bucket_info(_access_token: Option) -> Result { - Ok(store::state::with(|r| r.clone())) +fn get_bucket_info(access_token: Option) -> Result { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_bucket_read(&ps, &canister) { + return Err("permission denied".to_string()); + } + + Ok(store::state::with(|r| BucketInfo { + name: r.name.clone(), + file_count: r.file_count, + file_id: r.file_id, + folder_count: r.folder_count, + folder_id: r.folder_id, + max_file_size: r.max_file_size, + max_folder_depth: r.max_folder_depth, + max_children: r.max_children, + max_custom_data_size: r.max_custom_data_size, + enable_hash_index: r.enable_hash_index, + status: r.status, + visibility: r.visibility, + managers: r.managers.clone(), + auditors: r.auditors.clone(), + trusted_ecdsa_pub_keys: r.trusted_ecdsa_pub_keys.clone(), + trusted_eddsa_pub_keys: r.trusted_eddsa_pub_keys.clone(), + })) } #[ic_cdk::query] -fn get_file_info(id: u32, _access_token: Option) -> Result { +fn get_file_info(id: u32, access_token: Option) -> Result { match store::fs::get_file(id) { - Some(meta) => Ok(meta.into_info(id)), None => Err("file not found".to_string()), + Some(file) => { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_read(&ps, &canister, id, file.parent) { + Err("permission denied".to_string())?; + } + + Ok(file.into_info(id)) + } } } #[ic_cdk::query] fn get_file_info_by_hash( - hash: ByteBuf, - _access_token: Option, + hash: ByteN<32>, + access_token: Option, ) -> Result { - if hash.len() != 32 { - return Err(format!("expected 32 bytes, got {}", hash.len())); - } - let mut result = [0u8; 32]; - result.copy_from_slice(&hash); - let id = store::fs::get_file_id(&result).ok_or("file not found")?; + let id = store::fs::get_file_id(&hash).ok_or("file not found")?; - match store::fs::get_file(id) { - Some(meta) => Ok(meta.into_info(id)), - None => Err("file not found".to_string()), - } + get_file_info(id, access_token) } #[ic_cdk::query] -fn get_file_ancestors(id: u32, _access_token: Option) -> Vec { - store::fs::get_file_ancestors(id) +fn get_file_ancestors(id: u32, access_token: Option) -> Result, String> { + let ancestors = store::fs::get_file_ancestors(id); + if let Some(parent) = ancestors.first() { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_read(&ps, &canister, id, parent.id) { + Err("permission denied".to_string())?; + } + } + Ok(ancestors) } #[ic_cdk::query] @@ -52,9 +124,33 @@ fn get_file_chunks( id: u32, index: u32, take: Option, - _access_token: Option, + access_token: Option, ) -> Result, String> { - Ok(store::fs::get_chunks(id, index, take.unwrap_or(10).min(8))) + match store::fs::get_file(id) { + None => Err("file not found".to_string()), + Some(file) => { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_read(&ps, &canister, id, file.parent) { + Err("permission denied".to_string())?; + } + + Ok(store::fs::get_chunks(id, index, take.unwrap_or(10).min(8))) + } + } } #[ic_cdk::query] @@ -62,28 +158,106 @@ fn list_files( parent: u32, prev: Option, take: Option, - _access_token: Option, -) -> Vec { + access_token: Option, +) -> Result, String> { let max_prev = store::state::with(|s| s.file_id).saturating_add(1); let prev = prev.unwrap_or(max_prev).min(max_prev); let take = take.unwrap_or(10).min(100); - store::fs::list_files(parent, prev, take) + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_list(&ps, &canister, parent) { + Err("permission denied".to_string())?; + } + Ok(store::fs::list_files(parent, prev, take)) } #[ic_cdk::query] -fn get_folder_info(id: u32, _access_token: Option) -> Result { +fn get_folder_info(id: u32, access_token: Option) -> Result { match store::fs::get_folder(id) { - Some(meta) => Ok(meta.into_info(id)), None => Err("folder not found".to_string()), + Some(meta) => { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_folder_read(&ps, &canister, id) { + Err("permission denied".to_string())?; + } + + Ok(meta.into_info(id)) + } } } #[ic_cdk::query] -fn get_folder_ancestors(id: u32, _access_token: Option) -> Vec { - store::fs::get_folder_ancestors(id) +fn get_folder_ancestors(id: u32, access_token: Option) -> Result, String> { + let ancestors = store::fs::get_folder_ancestors(id); + if !ancestors.is_empty() { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_folder_read(&ps, &canister, id) { + Err("permission denied".to_string())?; + } + } + Ok(ancestors) } #[ic_cdk::query] -fn list_folders(parent: u32, _access_token: Option) -> Vec { - store::fs::list_folders(parent) +fn list_folders(parent: u32, access_token: Option) -> Result, String> { + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.read_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_folder_list(&ps, &canister, parent) { + Err("permission denied".to_string())?; + } + Ok(store::fs::list_folders(parent)) } diff --git a/src/ic_oss_bucket/src/api_update.rs b/src/ic_oss_bucket/src/api_update.rs index 6e7f00b..6aa3767 100644 --- a/src/ic_oss_bucket/src/api_update.rs +++ b/src/ic_oss_bucket/src/api_update.rs @@ -2,12 +2,12 @@ use ic_oss_types::{crc32, file::*, folder::*, to_cbor_bytes}; use serde_bytes::ByteBuf; use std::collections::BTreeSet; -use crate::{is_controller_or_manager, store, MILLISECONDS}; +use crate::{permission, store, MILLISECONDS, SECONDS}; -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn create_file( input: CreateFileInput, - _access_token: Option, + access_token: Option, ) -> Result { input.validate()?; @@ -28,8 +28,22 @@ fn create_file( Ok(()) })?; + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_create(&ps, &canister, input.parent) { + Err("permission denied".to_string())?; + } + let res: Result = { - let now_ms = ic_cdk::api::time() / MILLISECONDS; let id = store::fs::add_file(store::FileMetadata { parent: input.parent, name: input.name, @@ -53,7 +67,7 @@ fn create_file( } for (i, chunk) in content.chunks(MAX_CHUNK_SIZE as usize).enumerate() { - store::fs::update_chunk(id, i as u32, now_ms, chunk.to_vec())?; + store::fs::update_chunk(id, i as u32, now_ms, chunk.to_vec(), |_| Ok(()))?; } if input.status.is_some() { @@ -64,6 +78,7 @@ fn create_file( ..Default::default() }, now_ms, + |_| Ok(()), )?; } } @@ -83,10 +98,10 @@ fn create_file( } } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn update_file_info( input: UpdateFileInput, - _access_token: Option, + access_token: Option, ) -> Result { input.validate()?; @@ -103,28 +118,64 @@ fn update_file_info( Ok(()) })?; - let updated_at = ic_cdk::api::time() / MILLISECONDS; - store::fs::update_file(input, updated_at)?; - Ok(UpdateFileOutput { updated_at }) + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + let id = input.id; + store::fs::update_file(input, now_ms, |file| { + match permission::check_file_update(&ps, &canister, id, file.parent) { + true => Ok(()), + false => Err("permission denied".to_string()), + } + })?; + Ok(UpdateFileOutput { updated_at: now_ms }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn update_file_chunk( input: UpdateFileChunkInput, - _access_token: Option, + access_token: Option, ) -> Result { - let now_ms = ic_cdk::api::time() / MILLISECONDS; if let Some(checksum) = input.crc32 { if crc32(&input.content) != checksum { Err("crc32 checksum mismatch".to_string())?; } } + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission( + &ic_cdk::caller(), + &canister, + access_token, + ic_cdk::api::time() / SECONDS, + ) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + let id = input.id; let filled = store::fs::update_chunk( input.id, input.chunk_index, now_ms, input.content.into_vec(), + |file| match permission::check_file_update(&ps, &canister, id, file.parent) { + true => Ok(()), + false => Err("permission denied".to_string()), + }, )?; Ok(UpdateFileChunkOutput { @@ -133,36 +184,98 @@ fn update_file_chunk( }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] -fn move_file(input: MoveInput, _access_token: Option) -> Result { - let updated_at = ic_cdk::api::time() / MILLISECONDS; - store::fs::move_file(input.id, input.from, input.to, updated_at)?; - Ok(UpdateFileOutput { updated_at }) +#[ic_cdk::update] +fn move_file(input: MoveInput, access_token: Option) -> Result { + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_delete(&ps, &canister, input.from) { + Err("permission denied".to_string())?; + } + + if !permission::check_file_create(&ps, &canister, input.to) { + Err("permission denied".to_string())?; + } + + store::fs::move_file(input.id, input.from, input.to, now_ms)?; + Ok(UpdateFileOutput { updated_at: now_ms }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] -fn delete_file(id: u32, _access_token: Option) -> Result { - store::fs::delete_file(id, ic_cdk::api::time() / MILLISECONDS) +#[ic_cdk::update] +fn delete_file(id: u32, access_token: Option) -> Result { + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + store::fs::delete_file(id, now_ms, |file| { + match permission::check_file_delete(&ps, &canister, file.parent) { + true => Ok(()), + false => Err("permission denied".to_string()), + } + }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn batch_delete_subfiles( parent: u32, ids: BTreeSet, - _access_token: Option, + access_token: Option, ) -> Result, String> { - store::fs::batch_delete_subfiles(parent, ids, ic_cdk::api::time() / MILLISECONDS) + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_file_delete(&ps, &canister, parent) { + Err("permission denied".to_string())?; + } + + store::fs::batch_delete_subfiles(parent, ids, now_ms) } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn create_folder( input: CreateFolderInput, - _access_token: Option, + access_token: Option, ) -> Result { input.validate()?; + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_folder_create(&ps, &canister, input.parent) { + Err("permission denied".to_string())?; + } let res: Result = { - let now_ms = ic_cdk::api::time() / MILLISECONDS; let id = store::fs::add_folder(store::FolderMetadata { parent: input.parent, name: input.name, @@ -186,30 +299,82 @@ fn create_folder( } } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn update_folder_info( input: UpdateFolderInput, - _access_token: Option, + access_token: Option, ) -> Result { input.validate()?; - let updated_at = ic_cdk::api::time() / MILLISECONDS; - store::fs::update_folder(input, updated_at)?; + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; - Ok(UpdateFolderOutput { updated_at }) + let id = input.id; + store::fs::update_folder( + input, + now_ms, + |folder| match permission::check_folder_update(&ps, &canister, id, folder.parent) { + true => Ok(()), + false => Err("permission denied".to_string()), + }, + )?; + + Ok(UpdateFolderOutput { updated_at: now_ms }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] +#[ic_cdk::update] fn move_folder( input: MoveInput, - _access_token: Option, + access_token: Option, ) -> Result { - let updated_at = ic_cdk::api::time() / MILLISECONDS; - store::fs::move_folder(input.id, input.from, input.to, updated_at)?; - Ok(UpdateFolderOutput { updated_at }) + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + if !permission::check_folder_delete(&ps, &canister, input.from) { + Err("permission denied".to_string())?; + } + + if !permission::check_folder_create(&ps, &canister, input.to) { + Err("permission denied".to_string())?; + } + + store::fs::move_folder(input.id, input.from, input.to, now_ms)?; + Ok(UpdateFolderOutput { updated_at: now_ms }) } -#[ic_cdk::update(guard = "is_controller_or_manager")] -fn delete_folder(id: u32, _access_token: Option) -> Result { - store::fs::delete_folder(id, ic_cdk::api::time() / MILLISECONDS) +#[ic_cdk::update] +fn delete_folder(id: u32, access_token: Option) -> Result { + let now_ms = ic_cdk::api::time() / MILLISECONDS; + let canister = ic_cdk::id(); + let ps = match store::state::with(|s| { + s.write_permission(&ic_cdk::caller(), &canister, access_token, now_ms / 1000) + }) { + Ok(ps) => ps, + Err((_, err)) => { + return Err(err); + } + }; + + store::fs::delete_folder(id, now_ms, |folder| { + match permission::check_file_delete(&ps, &canister, folder.parent) { + true => Ok(()), + false => Err("permission denied".to_string()), + } + }) } diff --git a/src/ic_oss_bucket/src/lib.rs b/src/ic_oss_bucket/src/lib.rs index f001908..7d1cb54 100644 --- a/src/ic_oss_bucket/src/lib.rs +++ b/src/ic_oss_bucket/src/lib.rs @@ -8,23 +8,18 @@ mod api_http; mod api_init; mod api_query; mod api_update; +mod permission; mod store; use api_http::*; use api_init::CanisterArgs; -use ic_oss_types::{file::*, folder::*}; +use ic_oss_types::{bucket::*, file::*, folder::*, ByteN}; const MILLISECONDS: u64 = 1_000_000; +const SECONDS: u64 = 1_000_000_000; static ANONYMOUS: Principal = Principal::anonymous(); -// pub fn unwrap_trap(res: Result, msg: &str) -> T { -// match res { -// Ok(v) => v, -// Err(err) => ic_cdk::trap(&format!("{}, {:?}", msg, err)), -// } -// } - fn is_controller() -> Result<(), String> { let caller = ic_cdk::caller(); if ic_cdk::api::is_controller(&caller) { @@ -34,21 +29,4 @@ fn is_controller() -> Result<(), String> { } } -fn is_controller_or_manager() -> Result<(), String> { - let caller = ic_cdk::caller(); - if ic_cdk::api::is_controller(&caller) || store::state::is_manager(&caller) { - Ok(()) - } else { - Err("user is not a controller or manager".to_string()) - } -} - -fn is_authenticated() -> Result<(), String> { - if ic_cdk::caller() == ANONYMOUS { - Err("anonymous user is not allowed".to_string()) - } else { - Ok(()) - } -} - ic_cdk::export_candid!(); diff --git a/src/ic_oss_bucket/src/permission.rs b/src/ic_oss_bucket/src/permission.rs new file mode 100644 index 0000000..65043c9 --- /dev/null +++ b/src/ic_oss_bucket/src/permission.rs @@ -0,0 +1,274 @@ +use candid::Principal; +use ic_oss_types::permission::{Operation, Permission, PermissionChecker, Policies, Resource}; + +use crate::store::fs; + +pub fn check_bucket_read(ps: &Policies, bucket: &Principal) -> bool { + ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Read, + constraint: Some(Resource::Other("Info".to_string())), + }, + bucket.to_string().as_str(), + ) +} + +pub fn check_folder_list(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::List, + constraint: Some(Resource::Folder), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::List, + constraint: None, + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_folder_read(ps: &Policies, bucket: &Principal, id: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Read, + constraint: Some(Resource::Folder), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(id) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Read, + constraint: Some(Resource::Folder), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_file_list(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::List, + constraint: Some(Resource::File), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::List, + constraint: Some(Resource::File), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_file_read(ps: &Policies, bucket: &Principal, id: u32, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::File, + operation: Operation::Read, + constraint: None, + }, + id.to_string().as_str(), + ) && !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Read, + constraint: Some(Resource::File), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Read, + constraint: Some(Resource::File), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_file_create(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Write, + constraint: Some(Resource::File), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Write, + constraint: Some(Resource::File), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_file_delete(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Delete, + constraint: Some(Resource::File), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Delete, + constraint: Some(Resource::File), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_file_update(ps: &Policies, bucket: &Principal, id: u32, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::File, + operation: Operation::Write, + constraint: None, + }, + id.to_string().as_str(), + ) { + return check_file_create(ps, bucket, parent); + } + true +} + +pub fn check_folder_create(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Write, + constraint: Some(Resource::Folder), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Write, + constraint: Some(Resource::Folder), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_folder_delete(ps: &Policies, bucket: &Principal, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Bucket, + operation: Operation::Delete, + constraint: Some(Resource::Folder), + }, + bucket.to_string().as_str(), + ) { + let ancestors: Vec = fs::get_ancestors(parent) + .into_iter() + .map(|f| f.id.to_string()) + .collect(); + let rs: Vec<&str> = ancestors.iter().map(|id| id.as_str()).collect(); + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Delete, + constraint: Some(Resource::Folder), + }, + rs.as_slice(), + ) { + return false; + } + } + true +} + +pub fn check_folder_update(ps: &Policies, bucket: &Principal, id: u32, parent: u32) -> bool { + if !ps.has_permission( + &Permission { + resource: Resource::Folder, + operation: Operation::Write, + constraint: None, + }, + id.to_string().as_str(), + ) { + return check_folder_create(ps, bucket, parent); + } + true +} diff --git a/src/ic_oss_bucket/src/store.rs b/src/ic_oss_bucket/src/store.rs index a9a9eb3..7379b0b 100644 --- a/src/ic_oss_bucket/src/store.rs +++ b/src/ic_oss_bucket/src/store.rs @@ -1,14 +1,16 @@ -use candid::{CandidType, Principal}; +use candid::Principal; use ciborium::{from_reader, into_writer}; use ic_http_certification::{ cel::{create_cel_expr, DefaultCelBuilder}, HttpCertification, HttpCertificationPath, HttpCertificationTree, HttpCertificationTreeEntry, }; use ic_oss_types::{ + cwt::{Token, BUCKET_TOKEN_AAD}, file::{ FileChunk, FileInfo, UpdateFileInput, MAX_CHUNK_SIZE, MAX_FILE_SIZE, MAX_FILE_SIZE_PER_CALL, }, folder::{FolderInfo, FolderName, UpdateFolderInput}, + permission::Policies, ByteN, MapValue, }; use ic_stable_structures::{ @@ -29,7 +31,7 @@ use std::{ type Memory = VirtualMemory; -#[derive(CandidType, Clone, Default, Deserialize, Serialize)] +#[derive(Clone, Default, Deserialize, Serialize)] pub struct Bucket { pub name: String, pub file_count: u64, @@ -39,17 +41,85 @@ pub struct Bucket { pub max_file_size: u64, pub max_folder_depth: u8, pub max_children: u16, - pub status: i8, // -1: archived; 0: readable and writable; 1: readonly - pub visibility: u8, // 0: private; 1: public pub max_custom_data_size: u16, pub enable_hash_index: bool, + pub status: i8, // -1: archived; 0: readable and writable; 1: readonly + pub visibility: u8, // 0: private; 1: public pub managers: BTreeSet, // managers can read and write // auditors can read and list even if the bucket is private pub auditors: BTreeSet, // used to verify the request token signed with SECP256K1 pub trusted_ecdsa_pub_keys: Vec, // used to verify the request token signed with ED25519 - pub trusted_eddsa_pub_keys: Vec, + pub trusted_eddsa_pub_keys: Vec>, +} + +impl Bucket { + pub fn read_permission( + &self, + caller: &Principal, + canister: &Principal, + sign1_token: Option, + now_sec: u64, + ) -> Result { + if self.status < 0 { + if self.managers.contains(caller) || self.auditors.contains(caller) { + return Ok(Policies::all()); + } + + Err((403, "bucket is archived".to_string()))?; + } + + if self.visibility > 0 || self.managers.contains(caller) || self.auditors.contains(caller) { + return Ok(Policies::all()); + } + + if let Some(token) = sign1_token { + let token = Token::from_ed25519_sign1( + &token, + &self.trusted_eddsa_pub_keys, + BUCKET_TOKEN_AAD, + now_sec as i64, + ) + .map_err(|err| (401, err))?; + if &token.subject == caller && &token.audience == canister { + return Ok(token.scope); + } + } + + Err((401, "Unauthorized".to_string())) + } + + pub fn write_permission( + &self, + caller: &Principal, + canister: &Principal, + sign1_token: Option, + now_sec: u64, + ) -> Result { + if self.status != 0 { + Err((403, "bucket is not writeable".to_string()))?; + } + + if self.managers.contains(caller) { + return Ok(Policies::all()); + } + + if let Some(token) = sign1_token { + let token = Token::from_ed25519_sign1( + &token, + &self.trusted_eddsa_pub_keys, + BUCKET_TOKEN_AAD, + now_sec as i64, + ) + .map_err(|err| (401, err))?; + if &token.subject == caller && &token.audience == canister { + return Ok(token.scope); + } + } + + Err((401, "Unauthorized".to_string())) + } } impl Storable for Bucket { @@ -498,7 +568,12 @@ impl FoldersTree { }); } - fn delete_folder(&mut self, id: u32, now_ms: u64) -> Result { + fn delete_folder( + &mut self, + id: u32, + now_ms: u64, + checker: impl FnOnce(&FolderMetadata) -> Result<(), String>, + ) -> Result { if id == 0 { Err("root folder cannot be deleted".to_string())?; } @@ -506,6 +581,7 @@ impl FoldersTree { let parent_id = match self.get(&id) { None => return Ok(false), Some(folder) => { + checker(folder)?; if folder.status > 0 { Err("folder is readonly".to_string())?; } @@ -595,10 +671,6 @@ pub mod state { pub static DEFAULT_CERT_ENTRY: Lazy = Lazy::new(|| HttpCertificationTreeEntry::new(&*DEFAULT_EXPR_PATH, *DEFAULT_CERTIFICATION)); - pub fn is_manager(caller: &Principal) -> bool { - BUCKET.with(|r| r.borrow().managers.contains(caller)) - } - pub fn with(f: impl FnOnce(&Bucket) -> R) -> R { BUCKET.with(|r| f(&r.borrow())) } @@ -686,6 +758,13 @@ pub mod fs { FS_METADATA.with(|r| r.borrow().get(&id)) } + pub fn get_ancestors(start: u32) -> Vec { + FOLDERS.with(|r| { + let m = r.borrow(); + m.ancestors(start) + }) + } + pub fn get_folder_ancestors(id: u32) -> Vec { FOLDERS.with(|r| { let m = r.borrow(); @@ -823,7 +902,11 @@ pub mod fs { }) } - pub fn update_folder(change: UpdateFolderInput, now_ms: u64) -> Result<(), String> { + pub fn update_folder( + change: UpdateFolderInput, + now_ms: u64, + checker: impl FnOnce(&FolderMetadata) -> Result<(), String>, + ) -> Result<(), String> { if change.id == 0 { Err("root folder cannot be updated".to_string())?; } @@ -833,6 +916,8 @@ pub mod fs { match m.get_mut(&change.id) { None => Err(format!("folder not found: {}", change.id)), Some(folder) => { + checker(folder)?; + let status = change.status.unwrap_or(folder.status); if folder.status > 0 && status > 0 { Err("folder is readonly".to_string())?; @@ -848,12 +933,17 @@ pub mod fs { }) } - pub fn update_file(change: UpdateFileInput, now_ms: u64) -> Result<(), String> { + pub fn update_file( + change: UpdateFileInput, + now_ms: u64, + checker: impl FnOnce(&FileMetadata) -> Result<(), String>, + ) -> Result<(), String> { FS_METADATA.with(|r| { let mut m = r.borrow_mut(); match m.get(&change.id) { None => Err(format!("file not found: {}", change.id)), Some(mut file) => { + checker(&file)?; let prev_hash = file.hash; let status = change.status.unwrap_or(file.status); if file.status > 0 && status > 0 { @@ -982,6 +1072,7 @@ pub mod fs { chunk_index: u32, now_ms: u64, chunk: Vec, + checker: impl FnOnce(&FileMetadata) -> Result<(), String>, ) -> Result { if chunk.is_empty() { Err("empty chunk".to_string())?; @@ -1004,6 +1095,8 @@ pub mod fs { Err(format!("file {} is not writeable", file_id))?; } + checker(&file)?; + file.updated_at = now_ms; file.filled += chunk.len() as u64; if file.filled > max { @@ -1036,11 +1129,19 @@ pub mod fs { }) } - pub fn delete_folder(id: u32, now_ms: u64) -> Result { - FOLDERS.with(|r| r.borrow_mut().delete_folder(id, now_ms)) + pub fn delete_folder( + id: u32, + now_ms: u64, + checker: impl FnOnce(&FolderMetadata) -> Result<(), String>, + ) -> Result { + FOLDERS.with(|r| r.borrow_mut().delete_folder(id, now_ms, checker)) } - pub fn delete_file(id: u32, now_ms: u64) -> Result { + pub fn delete_file( + id: u32, + now_ms: u64, + checker: impl FnOnce(&FileMetadata) -> Result<(), String>, + ) -> Result { FS_METADATA.with(|r| { let mut m = r.borrow_mut(); match m.get(&id) { @@ -1049,6 +1150,8 @@ pub mod fs { Err("file is readonly".to_string())?; } + checker(&file)?; + FOLDERS.with(|r| { let mut m = r.borrow_mut(); let parent = m.parent_to_update(file.parent)?; @@ -1166,9 +1269,9 @@ mod test { let f1_meta = fs::get_file(f1).unwrap(); assert_eq!(f1_meta.name, "f1.bin"); - assert!(fs::update_chunk(0, 0, 999, [0u8; 32].to_vec()).is_err()); - let _ = fs::update_chunk(f1, 0, 999, [0u8; 32].to_vec()).unwrap(); - let _ = fs::update_chunk(f1, 1, 1000, [0u8; 32].to_vec()).unwrap(); + assert!(fs::update_chunk(0, 0, 999, [0u8; 32].to_vec(), |_| Ok(())).is_err()); + let _ = fs::update_chunk(f1, 0, 999, [0u8; 32].to_vec(), |_| Ok(())).unwrap(); + let _ = fs::update_chunk(f1, 1, 1000, [0u8; 32].to_vec(), |_| Ok(())).unwrap(); let f1_data = fs::get_full_chunks(f1).unwrap(); assert_eq!(f1_data, [0u8; 64]); @@ -1192,11 +1295,11 @@ mod test { }) .unwrap(); assert_eq!(f2, 2); - fs::update_chunk(f2, 0, 999, [0u8; 16].to_vec()).unwrap(); - fs::update_chunk(f2, 1, 1000, [1u8; 16].to_vec()).unwrap(); - fs::update_chunk(f1, 3, 1000, [1u8; 16].to_vec()).unwrap(); - fs::update_chunk(f2, 2, 1000, [2u8; 16].to_vec()).unwrap(); - fs::update_chunk(f1, 2, 1000, [2u8; 16].to_vec()).unwrap(); + fs::update_chunk(f2, 0, 999, [0u8; 16].to_vec(), |_| Ok(())).unwrap(); + fs::update_chunk(f2, 1, 1000, [1u8; 16].to_vec(), |_| Ok(())).unwrap(); + fs::update_chunk(f1, 3, 1000, [1u8; 16].to_vec(), |_| Ok(())).unwrap(); + fs::update_chunk(f2, 2, 1000, [2u8; 16].to_vec(), |_| Ok(())).unwrap(); + fs::update_chunk(f1, 2, 1000, [2u8; 16].to_vec(), |_| Ok(())).unwrap(); let f1_data = fs::get_full_chunks(f1).unwrap(); assert_eq!(&f1_data[0..64], &[0u8; 64]); @@ -1343,10 +1446,10 @@ mod test { fs::batch_delete_subfiles(0, BTreeSet::from([2, 1]), 999).unwrap(), vec![1, 2] ); - assert!(fs::delete_folder(1, 999).is_err()); - assert!(fs::delete_folder(2, 999).unwrap()); - assert!(fs::delete_folder(1, 999).unwrap()); - assert!(fs::delete_folder(0, 999).is_err()); + assert!(fs::delete_folder(1, 999, |_| Ok(())).is_err()); + assert!(fs::delete_folder(2, 999, |_| Ok(())).unwrap()); + assert!(fs::delete_folder(1, 999, |_| Ok(())).unwrap()); + assert!(fs::delete_folder(0, 999, |_| Ok(())).is_err()); assert_eq!(FOLDERS.with(|r| r.borrow().len()), 1); assert_eq!(HASHS.with(|r| r.borrow().len()), 0); @@ -1762,11 +1865,11 @@ mod test { fn test_folders_delete_folder() { let mut tree = FoldersTree::new(); assert!(tree - .delete_folder(0, 99) + .delete_folder(0, 99, |_| Ok(())) .err() .unwrap() .contains("root folder cannot be deleted")); - assert!(!tree.delete_folder(1, 99).unwrap()); + assert!(!tree.delete_folder(1, 99, |_| Ok(())).unwrap()); tree.add_folder( FolderMetadata { parent: 0, @@ -1781,25 +1884,25 @@ mod test { ) .unwrap(); assert!(tree - .delete_folder(1, 99) + .delete_folder(1, 99, |_| Ok(())) .err() .unwrap() .contains("folder is readonly")); tree.get_mut(&1).unwrap().status = 0; assert!(tree - .delete_folder(1, 99) + .delete_folder(1, 99, |_| Ok(())) .err() .unwrap() .contains("folder is not empty")); tree.get_mut(&1).unwrap().files.clear(); tree.get_mut(&0).unwrap().status = 1; assert!(tree - .delete_folder(1, 99) + .delete_folder(1, 99, |_| Ok(())) .err() .unwrap() .contains("parent folder is not writeable")); tree.get_mut(&0).unwrap().status = 0; - assert!(tree.delete_folder(1, 99).unwrap()); + assert!(tree.delete_folder(1, 99, |_| Ok(())).unwrap()); assert_eq!(tree.len(), 1); assert_eq!(tree.get_mut(&0).unwrap().folders, BTreeSet::new()); assert_eq!(tree.get_mut(&0).unwrap().updated_at, 99); diff --git a/src/ic_oss_types/Cargo.toml b/src/ic_oss_types/Cargo.toml index 9737d1b..6d723f5 100644 --- a/src/ic_oss_types/Cargo.toml +++ b/src/ic_oss_types/Cargo.toml @@ -19,4 +19,11 @@ crc32fast = { workspace = true } num-traits = { workspace = true } url = { workspace = true } ciborium = { workspace = true } +coset = { workspace = true } icrc-ledger-types = { workspace = true } +ed25519-dalek = { workspace = true } + +[dev-dependencies] +hex = { package = "hex-conservative", version = "0.2", default-features = false, features = [ + "alloc", +] } diff --git a/src/ic_oss_types/src/bucket.rs b/src/ic_oss_types/src/bucket.rs index e69de29..f8c52e1 100644 --- a/src/ic_oss_types/src/bucket.rs +++ b/src/ic_oss_types/src/bucket.rs @@ -0,0 +1,95 @@ +use candid::{CandidType, Principal}; +use serde::{Deserialize, Serialize}; +use serde_bytes::ByteBuf; +use std::collections::BTreeSet; + +use crate::{file::MAX_FILE_SIZE, ByteN}; + +#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize, PartialEq, Eq)] +pub struct BucketInfo { + pub name: String, + pub file_count: u64, + pub file_id: u32, + pub folder_count: u64, + pub folder_id: u32, + pub max_file_size: u64, + pub max_folder_depth: u8, + pub max_children: u16, + pub max_custom_data_size: u16, + pub enable_hash_index: bool, + pub status: i8, // -1: archived; 0: readable and writable; 1: readonly + pub visibility: u8, // 0: private; 1: public + pub managers: BTreeSet, // managers can read and write + // auditors can read and list even if the bucket is private + pub auditors: BTreeSet, + // used to verify the request token signed with SECP256K1 + pub trusted_ecdsa_pub_keys: Vec, + // used to verify the request token signed with ED25519 + pub trusted_eddsa_pub_keys: Vec>, +} + +#[derive(CandidType, Clone, Debug, Default, Deserialize, Serialize)] +pub struct UpdateBucketInput { + pub name: Option, + pub max_file_size: Option, + pub max_folder_depth: Option, + pub max_children: Option, + pub max_custom_data_size: Option, + pub enable_hash_index: Option, + pub status: Option, // -1: archived; 0: readable and writable; 1: readonly + pub visibility: Option, // 0: private; 1: public + pub trusted_ecdsa_pub_keys: Option>, + pub trusted_eddsa_pub_keys: Option>>, +} + +impl UpdateBucketInput { + pub fn validate(&self) -> Result<(), String> { + if let Some(name) = &self.name { + if name.trim().is_empty() { + return Err("invalid bucket name".to_string()); + } + } + if let Some(max_file_size) = self.max_file_size { + if max_file_size == 0 { + return Err("max_file_size should be greater than 0".to_string()); + } + if max_file_size < MAX_FILE_SIZE { + return Err(format!( + "max_file_size should be greater than or equal to {}", + MAX_FILE_SIZE + )); + } + } + + if let Some(max_folder_depth) = self.max_folder_depth { + if max_folder_depth == 0 { + return Err("max_folder_depth should be greater than 0".to_string()); + } + } + + if let Some(max_children) = self.max_children { + if max_children == 0 { + return Err("max_children should be greater than 0".to_string()); + } + } + + if let Some(max_custom_data_size) = self.max_custom_data_size { + if max_custom_data_size == 0 { + return Err("max_custom_data_size should be greater than 0".to_string()); + } + } + + if let Some(status) = self.status { + if !(-1i8..=1i8).contains(&status) { + return Err("status should be -1, 0 or 1".to_string()); + } + } + + if let Some(visibility) = self.visibility { + if visibility != 0 && visibility != 1 { + return Err("visibility should be 0 or 1".to_string()); + } + } + Ok(()) + } +} diff --git a/src/ic_oss_types/src/cwt.rs b/src/ic_oss_types/src/cwt.rs new file mode 100644 index 0000000..bb03e28 --- /dev/null +++ b/src/ic_oss_types/src/cwt.rs @@ -0,0 +1,174 @@ +use candid::Principal; +use coset::{ + cwt::{ClaimName, ClaimsSet, Timestamp}, + iana::{Algorithm, CwtClaimName}, + CborSerializable, CoseSign1, CoseSign1Builder, HeaderBuilder, +}; +use ed25519_dalek::{Signature, VerifyingKey}; +use num_traits::ToPrimitive; + +use crate::{bytes::ByteN, permission::Policies}; + +static SCOPE_NAME: ClaimName = ClaimName::Assigned(CwtClaimName::Scope); +const CLOCK_SKEW: i64 = 5 * 60; // 5 minutes + +pub static BUCKET_TOKEN_AAD: &[u8] = b"ic_oss_bucket"; +pub static CLUSTER_TOKEN_AAD: &[u8] = b"ic_oss_cluster"; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Token { + pub subject: Principal, + pub audience: Principal, + pub scope: Policies, +} + +impl Token { + pub fn from_ed25519_sign1( + sign1_token: &[u8], + pub_keys: &[ByteN<32>], + aad: &[u8], + now_sec: i64, + ) -> Result { + let cs1 = CoseSign1::from_slice(sign1_token) + .map_err(|err| format!("invalid COSE sign1 token: {}", err))?; + let keys: Vec = pub_keys + .iter() + .map(|key| { + VerifyingKey::from_bytes(key).map_err(|_| "invalid verifying key".to_string()) + }) + .collect::>()?; + let tbs_data = cs1.tbs_data(aad); + let sig = Signature::from_slice(&cs1.signature).map_err(|_| "invalid signature")?; + if !keys + .iter() + .any(|key| key.verify_strict(&tbs_data, &sig).is_ok()) + { + Err("signature verification failed".to_string())?; + } + + let claims = ClaimsSet::from_slice(&cs1.payload.unwrap_or_default()) + .map_err(|err| format!("invalid claims: {}", err))?; + if let Some(ref exp) = claims.expiration_time { + let exp = match exp { + Timestamp::WholeSeconds(v) => *v, + Timestamp::FractionalSeconds(v) => (*v).to_i64().unwrap_or_default(), + }; + if exp < now_sec - CLOCK_SKEW { + return Err("token expired".to_string()); + } + } + if let Some(ref nbf) = claims.not_before { + let nbf = match nbf { + Timestamp::WholeSeconds(v) => *v, + Timestamp::FractionalSeconds(v) => (*v).to_i64().unwrap_or_default(), + }; + if nbf > now_sec + CLOCK_SKEW { + return Err("token not yet valid".to_string()); + } + } + Self::try_from(claims) + } + + pub fn to_claims_set(&self, now_sec: i64, expiration_sec: i64) -> ClaimsSet { + ClaimsSet { + issuer: None, + subject: Some(self.subject.to_text()), + audience: Some(self.audience.to_text()), + expiration_time: Some(Timestamp::WholeSeconds(now_sec + expiration_sec)), + not_before: Some(Timestamp::WholeSeconds(now_sec)), + issued_at: Some(Timestamp::WholeSeconds(now_sec)), + cwt_id: None, + rest: vec![(SCOPE_NAME.clone(), self.scope.to_string().into())], + } + } +} + +pub fn ed25519_sign1(cs: ClaimsSet, key_id: Option>) -> Result { + let payload = cs.to_vec().map_err(|err| err.to_string())?; + let mut protected = HeaderBuilder::new().algorithm(Algorithm::EdDSA); + if let Some(key_id) = key_id { + protected = protected.key_id(key_id); + } + + Ok(CoseSign1Builder::new() + .protected(protected.build()) + .payload(payload) + .build()) +} + +impl TryFrom for Token { + type Error = String; + + fn try_from(claims: ClaimsSet) -> Result { + let scope = claims + .rest + .iter() + .find(|(key, _)| key == &SCOPE_NAME) + .ok_or("missing scope")?; + let scope = scope.1.as_text().ok_or("invalid scope text")?; + + Ok(Token { + subject: Principal::from_text(claims.subject.as_ref().ok_or("missing subject")?) + .map_err(|err| format!("invalid subject: {}", err))?, + audience: Principal::from_text(claims.audience.as_ref().ok_or("missing audience")?) + .map_err(|err| format!("invalid audience: {}", err))?, + scope: Policies::try_from(scope)?, + }) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::permission::{Operation, Permission, Policies, Policy, Resource, Resources}; + use ed25519_dalek::Signer; + + #[test] + fn test_ed25519_token() { + let secret_key = [8u8; 32]; + let signing_key = ed25519_dalek::SigningKey::from_bytes(&secret_key); + let pub_key: &VerifyingKey = signing_key.as_ref(); + let pub_key = pub_key.to_bytes(); + let ps = Policies::from([ + Policy { + permission: Permission { + resource: Resource::Bucket, + operation: Operation::Read, + constraint: Some(Resource::All), + }, + resources: Resources::from([]), + }, + Policy { + permission: Permission { + resource: Resource::Folder, + operation: Operation::All, + constraint: None, + }, + resources: Resources::from(["1".to_string()]), + }, + ]); + let token = Token { + subject: Principal::from_text( + "z7wjp-v6fe3-kksu5-26f64-dedtw-j7ndj-57onx-qga6c-et5e3-njx53-tae", + ) + .unwrap(), + audience: Principal::from_text("mmrxu-fqaaa-aaaap-ahhna-cai").unwrap(), + scope: ps, + }; + let now_sec = 1720676064; + let claims = token.to_claims_set(now_sec, 3600); + let mut sign1 = ed25519_sign1(claims, None).unwrap(); + let tbs_data = sign1.tbs_data(BUCKET_TOKEN_AAD); + let sig = signing_key.sign(&tbs_data).to_bytes(); + sign1.signature = sig.to_vec(); + let sign1_token = sign1.to_vec().unwrap(); + println!("principal: {:?}", &Principal::anonymous().to_text()); + println!("pub_key: {:?}", &pub_key); + println!("sign1_token: {:?}", &sign1_token); + + let token2 = + Token::from_ed25519_sign1(&sign1_token, &[pub_key.into()], BUCKET_TOKEN_AAD, now_sec) + .unwrap(); + assert_eq!(token, token2); + } +} diff --git a/src/ic_oss_types/src/lib.rs b/src/ic_oss_types/src/lib.rs index 9b659f6..0d86eff 100644 --- a/src/ic_oss_types/src/lib.rs +++ b/src/ic_oss_types/src/lib.rs @@ -7,6 +7,7 @@ use serde::Serialize; pub mod bucket; pub mod cluster; +pub mod cwt; pub mod file; pub mod folder; pub mod permission; diff --git a/src/ic_oss_types/src/permission.rs b/src/ic_oss_types/src/permission.rs index 3ccf103..f337609 100644 --- a/src/ic_oss_types/src/permission.rs +++ b/src/ic_oss_types/src/permission.rs @@ -29,7 +29,7 @@ pub enum Resource { } impl Resource { - fn check(&self, value: &Resource) -> bool { + pub fn check(&self, value: &Resource) -> bool { match self { Self::All => true, other => value == other, @@ -80,7 +80,7 @@ pub enum Operation { } impl Operation { - fn check(&self, value: &Operation) -> bool { + pub fn check(&self, value: &Operation) -> bool { match self { Self::All => true, other => value == other, @@ -120,7 +120,7 @@ impl TryFrom<&str> for Operation { } /// Permission string format: Resource.Operation[.Constraint] -/// e.g. File.Read Folder.Write Bucket.Read Bucket.Read.BasicInfo +/// e.g. File.Read Folder.Write Bucket.Read Bucket.Read.Info #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] pub struct Permission { pub resource: Resource, @@ -135,10 +135,17 @@ impl Permission { && self.constraint.is_none() } - fn check(&self, value: &Permission) -> bool { + pub fn check(&self, value: &Permission) -> bool { self.resource.check(&value.resource) && self.operation.check(&value.operation) - && (self.constraint.is_none() || self.constraint == value.constraint) + && self.check_constraint(&value.constraint) + } + + pub fn check_constraint(&self, value: &Option) -> bool { + match self.constraint { + None | Some(Resource::All) => true, + Some(ref c) => value.as_ref().map_or(false, |v| c == v), + } } } @@ -304,7 +311,8 @@ impl PermissionChecker<[&str; N]> for Policy { impl PermissionChecker<&[&str]> for Policy { fn has_permission(&self, permission: &Permission, resources_any: &[&str]) -> bool { - self.permission.check(permission) && resources_any.iter().any(|r| self.resources.check(r)) + self.permission.check(permission) + && (self.resources.is_all() || resources_any.iter().any(|r| self.resources.check(r))) } } @@ -353,6 +361,12 @@ impl TryFrom<&str> for Policy { #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)] pub struct Policies(pub BTreeSet); +impl Policies { + pub fn all() -> Self { + Self(BTreeSet::from([Policy::default()])) + } +} + impl Deref for Policies { type Target = BTreeSet; @@ -439,8 +453,8 @@ mod tests { assert!(validate_name(" ").is_err()); assert!(validate_name(".").is_err()); assert!(validate_name(",").is_err()); - assert!(validate_name(".BasicInfo").is_err()); - assert!(validate_name("BasicInfo").is_ok()); + assert!(validate_name(".Info").is_err()); + assert!(validate_name("Info").is_ok()); assert!(validate_name("123").is_ok()); assert!(validate_name("Level_1").is_ok()); assert!(validate_name("mmrxu-fqaaa-aaaap-ahhna-cai").is_ok()); @@ -450,11 +464,11 @@ mod tests { fn test_permission() { for (s, p) in [ ( - "Bucket.Read.BasicInfo", + "Bucket.Read.Info", Permission { resource: Resource::Bucket, operation: Operation::Read, - constraint: Some(Resource::Other("BasicInfo".to_string())), + constraint: Some(Resource::Other("Info".to_string())), }, ), ( @@ -513,7 +527,7 @@ mod tests { assert!(Permission::try_from(".File").is_err()); assert!(Permission::try_from("File").is_err()); assert!(Permission::try_from("File.").is_err()); - assert!(Permission::try_from("File.Read.BasicInfo.BasicInfo").is_err()); + assert!(Permission::try_from("File.Read.Info.Info").is_err()); assert!(Permission::default().check(&Permission::default())); assert!(Permission::default().check(&Permission { @@ -534,13 +548,13 @@ mod tests { .check(&Permission { resource: Resource::Bucket, operation: Operation::Read, - constraint: Some(Resource::Other("BasicInfo".to_string())), + constraint: Some(Resource::Other("Info".to_string())), })); assert!(!Permission { resource: Resource::Bucket, operation: Operation::Read, - constraint: Some(Resource::Other("BasicInfo".to_string())), + constraint: Some(Resource::Other("Info".to_string())), } .check(&Permission { resource: Resource::Bucket, @@ -705,7 +719,7 @@ mod tests { "" )); - let ps = Policies::from([Policy::default()]); + let ps = Policies::all(); assert_eq!(Policies::try_from("*").unwrap(), ps); assert_eq!(Policies::try_from("*:*").unwrap(), ps); @@ -740,7 +754,7 @@ mod tests { permission: Permission { resource: Resource::Bucket, operation: Operation::Read, - constraint: Some(Resource::Other("BasicInfo".to_string())), + constraint: Some(Resource::All), }, resources: Resources::from([]), }, @@ -762,14 +776,11 @@ mod tests { }, ]); - println!("{}", ps.to_string()); - assert_eq!( - ps.to_string(), - "File.*:1 Folder.Read:* Bucket.Read.BasicInfo" - ); + // println!("{}", ps.to_string()); + assert_eq!(ps.to_string(), "File.*:1 Folder.Read:* Bucket.Read.Info"); assert_eq!( - Policies::try_from("File.*:1 Folder.Read:* Bucket.Read.BasicInfo").unwrap(), + Policies::try_from("File.*:1 Folder.Read:* Bucket.Read.Info").unwrap(), ps ); }