From d7fb60958d87f1133cb57001602beb043ed0066d Mon Sep 17 00:00:00 2001 From: qti3e Date: Tue, 1 Feb 2022 17:50:04 +0330 Subject: [PATCH 01/10] implement bucket serde support --- candid/root.did | 2 +- canisters/root/src/lib.rs | 11 +-- canisters/root/src/upgrade.rs | 21 ++++- common/src/bucket.rs | 141 +++++++++++++++++++--------------- 4 files changed, 103 insertions(+), 72 deletions(-) diff --git a/candid/root.did b/candid/root.did index 608ff2e..244a3a5 100644 --- a/candid/root.did +++ b/candid/root.did @@ -44,6 +44,7 @@ type WithIdArg = record { id : nat64; witness : bool }; type WithWitnessArg = record { witness : bool }; type Witness = record { certificate : vec nat8; tree : vec nat8 }; service : { + balance : () -> (nat64) query; contract_id : () -> (principal) query; get_bucket_for : (WithIdArg) -> (GetBucketResponse) query; get_next_canisters : (WithWitnessArg) -> (GetNextCanistersResponse) query; @@ -56,7 +57,6 @@ service : { ) query; insert : (IndefiniteEvent) -> (nat64); migrate : (vec Event) -> (); - balance : () -> (nat64) query; size : () -> (nat64) query; time : () -> (nat64) query; } \ No newline at end of file diff --git a/canisters/root/src/lib.rs b/canisters/root/src/lib.rs index 8d9e6f3..f61a30e 100644 --- a/canisters/root/src/lib.rs +++ b/canisters/root/src/lib.rs @@ -5,7 +5,7 @@ use cap_common::Bucket; use ic_certified_map::{fork, fork_hash, AsHashTree, HashTree}; use ic_kit::candid::{candid_method, export_service}; use ic_kit::{ic, Principal}; -use serde::Serialize; +use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use cap_common::did::*; @@ -23,7 +23,7 @@ mod upgrade; /// / \ /// / \ 2 /// 0 1 -#[derive(Serialize)] +#[derive(Serialize, Deserialize)] struct Data { bucket: Bucket, buckets: BucketLookupTable, @@ -39,7 +39,7 @@ struct Data { impl Default for Data { fn default() -> Self { Self { - bucket: Bucket::new(0), + bucket: Bucket::new(Principal::management_canister(), 0), buckets: { let mut table = BucketLookupTable::default(); table.insert(0, ic::id()); @@ -61,6 +61,7 @@ fn init(contract: Principal, writers: BTreeSet) { data.cap_id = ic::caller(); data.contract = contract; data.writers = writers; + data.bucket = Bucket::new(contract, 0); } #[query] @@ -246,7 +247,7 @@ fn insert(event: IndefiniteEvent) -> TransactionId { new_users, )); - let id = data.bucket.insert(&data.contract, event); + let id = data.bucket.insert(event); data.allow_migration = false; @@ -281,7 +282,7 @@ fn migrate(events: Vec) { } } - data.bucket.insert(&data.contract, event); + data.bucket.insert(event); } ic_cdk::block_on(write_new_users_to_cap( diff --git a/canisters/root/src/upgrade.rs b/canisters/root/src/upgrade.rs index 3c9ddfb..c1eb9f4 100644 --- a/canisters/root/src/upgrade.rs +++ b/canisters/root/src/upgrade.rs @@ -30,6 +30,19 @@ fn pre_upgrade() { serde_cbor::to_writer(writer, &data).expect("Failed to serialize data."); } +fn next_post_upgrade() { + let reader = StableReader::default(); + let data: Data = match serde_cbor::from_reader(reader) { + Ok(t) => t, + Err(err) => { + let limit = err.offset() - 1; + let reader = StableReader::default().take(limit); + serde_cbor::from_reader(reader).expect("Failed to deserialize.") + } + }; + ic::store(data); +} + #[post_upgrade] fn post_upgrade() { let reader = StableReader::default(); @@ -44,9 +57,9 @@ fn post_upgrade() { let contract = data.contract; - let mut bucket = Bucket::new(0); + let mut bucket = Bucket::new(contract, 0); for event in data.bucket { - bucket.insert(&contract, event); + bucket.insert(event); } ic::store(Data { @@ -90,11 +103,11 @@ mod tests { operation: "mint".to_string(), details: vec![("amount".into(), DetailValue::U64(i as u64))], }; - data.bucket.insert(&contract_id, e); + data.bucket.insert(e); } let serialized = serde_cbor::to_vec(data).expect("Failed to serialize."); - let actual: DataDe = serde_cbor::from_slice(&serialized).expect("Failed to deserialize."); + let actual: Data = serde_cbor::from_slice(&serialized).expect("Failed to deserialize."); assert_eq!(actual.bucket.len(), 100); } diff --git a/common/src/bucket.rs b/common/src/bucket.rs index 3022eba..c0b03ae 100644 --- a/common/src/bucket.rs +++ b/common/src/bucket.rs @@ -3,8 +3,8 @@ use crate::transaction::Event; use ic_certified_map::HashTree::Pruned; use ic_certified_map::{fork, fork_hash, leaf_hash, AsHashTree, Hash, HashTree, RbTree}; use ic_kit::Principal; -use serde::ser::SerializeSeq; -use serde::{Serialize, Serializer}; +use serde::ser::{SerializeSeq, SerializeTuple}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::alloc::{dealloc, Layout}; use std::ptr; use std::ptr::NonNull; @@ -29,6 +29,8 @@ use std::ptr::NonNull; pub struct Bucket { /// Map each local Transaction ID to its hash. event_hashes: RbTree, + /// ID of the current contract. + contract: Principal, /// The offset of this bucket, i.e the actual id of the first event in the bucket. global_offset: u64, /// Same as `global_offset` but is the encoded big endian, this struct should own this data @@ -37,7 +39,7 @@ pub struct Bucket { global_offset_be: [u8; 8], /// Maps each user principal id to the vector of events they have. user_indexer: Index, - /// Maps each token contract principal id to the vector of events inserted by that token. + /// Maps contract id to each transaction page. contract_indexer: Index, /// All of the events in this bucket, we store a pointer to an allocated memory. Which is used /// only internally in this struct. And this Vec should be considered the actual owner of this @@ -65,9 +67,10 @@ impl AsRef<[u8]> for EventKey { impl Bucket { /// Create a new bucket with the given global offset. #[inline] - pub fn new(offset: u64) -> Self { + pub fn new(contract: Principal, offset: u64) -> Self { Bucket { events: vec![], + contract, event_hashes: RbTree::new(), global_offset: offset, global_offset_be: offset.to_be_bytes(), @@ -95,14 +98,14 @@ impl Bucket { } /// Try to insert an event into the bucket. - pub fn insert(&mut self, contract: &Principal, event: Event) -> u64 { + pub fn insert(&mut self, event: Event) -> u64 { let local_index = self.events.len() as u32; let hash = event.hash(); let event: NonNull = Box::leak(Box::new(event)).into(); let eve = unsafe { event.as_ref() }; // Update the indexers for the transaction. - self.contract_indexer.insert(contract, event, &hash); + self.contract_indexer.insert(&self.contract, event, &hash); for user in eve.extract_principal_ids() { self.user_indexer.insert(user, event, &hash); } @@ -261,14 +264,47 @@ impl Serialize for Bucket { where S: Serializer, { - let mut s = serializer.serialize_seq(Some(self.events.len()))?; - for ev in &self.events { - s.serialize_element(unsafe { ev.as_ref() })?; + struct Events<'a>(&'a Vec>); + impl<'a> Serialize for Events<'a> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut s = serializer.serialize_seq(Some(self.0.len()))?; + for ev in self.0 { + s.serialize_element(unsafe { ev.as_ref() })?; + } + s.end() + } } + + let mut s = serializer.serialize_tuple(3)?; + s.serialize_element(&self.global_offset)?; + s.serialize_element(&self.contract)?; + s.serialize_element(&Events(&self.events))?; s.end() } } +impl<'de> Deserialize<'de> for Bucket { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + struct BucketDe(u64, Principal, Vec); + + let data = BucketDe::deserialize(deserializer)?; + let mut bucket = Bucket::new(data.1, data.0); + + for event in data.2 { + bucket.insert(event); + } + + Ok(bucket) + } +} + #[cfg(test)] mod tests { use super::*; @@ -286,11 +322,11 @@ mod tests { /// root_hash and as_hash_tree should use the same tree layout. #[test] fn test_hash_tree() { - let mut bucket = Bucket::new(0); - bucket.insert(&mock_principals::xtc(), e(0, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(1, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(2, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(3, mock_principals::alice())); + let mut bucket = Bucket::new(mock_principals::xtc(), 0); + bucket.insert(e(0, mock_principals::alice())); + bucket.insert(e(1, mock_principals::alice())); + bucket.insert(e(2, mock_principals::alice())); + bucket.insert(e(3, mock_principals::alice())); assert_eq!(bucket.as_hash_tree().reconstruct(), bucket.root_hash()); } @@ -298,11 +334,11 @@ mod tests { /// and reconstructs to the root_hash. #[test] fn test_witness_transaction() { - let mut bucket = Bucket::new(0); - bucket.insert(&mock_principals::xtc(), e(0, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(1, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(2, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(3, mock_principals::alice())); + let mut bucket = Bucket::new(mock_principals::xtc(), 0); + bucket.insert(e(0, mock_principals::alice())); + bucket.insert(e(1, mock_principals::alice())); + bucket.insert(e(2, mock_principals::alice())); + bucket.insert(e(3, mock_principals::alice())); let event = bucket.get_transaction(1).unwrap(); let witness = bucket.witness_transaction(1); @@ -312,11 +348,11 @@ mod tests { #[test] fn test_witness_transaction_large() { - let mut bucket = Bucket::new(0); - bucket.insert(&mock_principals::xtc(), e(0, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(1, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(2, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(3, mock_principals::alice())); + let mut bucket = Bucket::new(mock_principals::xtc(), 0); + bucket.insert(e(0, mock_principals::alice())); + bucket.insert(e(1, mock_principals::alice())); + bucket.insert(e(2, mock_principals::alice())); + bucket.insert(e(3, mock_principals::alice())); assert_eq!(bucket.get_transaction(4).is_none(), true); @@ -326,11 +362,11 @@ mod tests { #[test] fn test_witness_transaction_below_offset() { - let mut bucket = Bucket::new(10); - bucket.insert(&mock_principals::xtc(), e(10, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(11, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(12, mock_principals::alice())); - bucket.insert(&mock_principals::xtc(), e(13, mock_principals::alice())); + let mut bucket = Bucket::new(mock_principals::xtc(), 10); + bucket.insert(e(10, mock_principals::alice())); + bucket.insert(e(11, mock_principals::alice())); + bucket.insert(e(12, mock_principals::alice())); + bucket.insert(e(13, mock_principals::alice())); assert_eq!(bucket.get_transaction(5).is_none(), true); let witness = bucket.witness_transaction(5); @@ -339,13 +375,13 @@ mod tests { #[test] fn test_witness_user_transactions() { - let mut bucket = Bucket::new(0); + let mut bucket = Bucket::new(mock_principals::xtc(), 0); for i in 0..5000 { if i % 27 == 0 { - bucket.insert(&mock_principals::xtc(), e(i, mock_principals::bob())); + bucket.insert(e(i, mock_principals::bob())); } else { - bucket.insert(&mock_principals::xtc(), e(i, mock_principals::alice())); + bucket.insert(e(i, mock_principals::alice())); } } @@ -371,35 +407,16 @@ mod tests { } #[test] - fn test_witness_token_transactions() { - let mut bucket = Bucket::new(0); - - for i in 0..2500 { - if i % 13 == 0 { - bucket.insert(&mock_principals::bob(), e(i, mock_principals::xtc())); - } else { - bucket.insert(&mock_principals::xtc(), e(i, mock_principals::alice())); - } - } - - let mut count = 0; - - for page in 0.. { - let principal = mock_principals::bob(); - let data = bucket.get_transactions_for_contract(&principal, page); - let witness = bucket.witness_transactions_for_contract(&principal, page); - let len = data.len(); - - assert_eq!(witness.reconstruct(), bucket.root_hash()); - - count += len; - - if len == 0 { - break; - } - } - - // floor(2500 / 13) + 1 = 193 - assert_eq!(count, 193); + fn serde() { + let mut bucket = Bucket::new(mock_principals::xtc(), 0); + bucket.insert(e(0, mock_principals::alice())); + bucket.insert(e(1, mock_principals::alice())); + bucket.insert(e(2, mock_principals::alice())); + bucket.insert(e(3, mock_principals::alice())); + let expected = bucket.root_hash(); + + let data: Vec = serde_cbor::to_vec(&bucket).unwrap(); + let bucket: Bucket = serde_cbor::from_slice(&data).unwrap(); + assert_eq!(bucket.root_hash(), expected); } } From f18c9b48287f489ed8c4bac6f0a285b2251a7f4e Mon Sep 17 00:00:00 2001 From: qti3e Date: Tue, 8 Feb 2022 19:30:28 +0330 Subject: [PATCH 02/10] migrate to certified-vars --- Cargo.lock | 32 +++- canisters/root/Cargo.toml | 2 +- canisters/root/src/lib.rs | 26 +-- canisters/root/src/upgrade.rs | 31 ++-- canisters/router/Cargo.toml | 2 +- canisters/router/src/installer.rs | 5 +- canisters/router/src/lib.rs | 44 +++-- canisters/router/src/upgrade.rs | 96 +++++------ common/Cargo.toml | 1 + common/src/bucket.rs | 115 +++---------- common/src/bucket_lookup_table.rs | 268 ------------------------------ common/src/canister_list.rs | 76 --------- common/src/canister_map.rs | 117 ------------- common/src/did.rs | 2 +- common/src/index.rs | 154 ----------------- common/src/lib.rs | 5 - common/src/transaction.rs | 45 +++++ common/src/user_canisters.rs | 141 ---------------- 18 files changed, 206 insertions(+), 956 deletions(-) delete mode 100644 common/src/bucket_lookup_table.rs delete mode 100644 common/src/canister_list.rs delete mode 100644 common/src/canister_map.rs delete mode 100644 common/src/index.rs delete mode 100644 common/src/user_canisters.rs diff --git a/Cargo.lock b/Cargo.lock index a1d9a7d..fdf9223 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -306,9 +306,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "candid" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7577605c33073dcafc17a5ed6373aa0cb7005e7d4e4b7cd40ca01cb2385533" +checksum = "12970d8d0620d2bdb7e81a5b13ed11e41fcdfeba53d61e45b5853afcbf9611fd" dependencies = [ "anyhow", "binread", @@ -348,6 +348,7 @@ name = "cap-common" version = "0.1.0" dependencies = [ "async-std", + "certified-vars", "ic-cdk", "ic-certified-map", "ic-kit", @@ -401,6 +402,21 @@ version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +[[package]] +name = "certified-vars" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5355bc98273ac7230620fe5b23eb5c3714666c8c353f0e22ad8e0bcb0dc9be" +dependencies = [ + "candid", + "hex", + "ic-cdk", + "serde", + "serde_bytes", + "serde_cbor", + "sha2", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -437,9 +453,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.2" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3825b1e8580894917dc4468cb634a1b4e9745fddc854edad72d9c04644c0319f" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if", ] @@ -767,8 +783,8 @@ version = "0.1.0" dependencies = [ "async-std", "cap-common", + "certified-vars", "ic-cdk", - "ic-certified-map", "ic-kit", "serde", "serde_cbor", @@ -780,8 +796,8 @@ version = "0.1.0" dependencies = [ "async-std", "cap-common", + "certified-vars", "ic-cdk", - "ic-certified-map", "ic-kit", "lazy_static", "serde", @@ -805,9 +821,9 @@ dependencies = [ [[package]] name = "ic-types" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2c021c11ae1d716f45d783f5764f418a11f12aea1fdc4fc8a2b2242e0dae708" +checksum = "0e78ec6f58886cdc252d6f912dc794211bd6bbc39ddc9dcda434b2dc16c335b3" dependencies = [ "base32", "crc32fast", diff --git a/canisters/root/Cargo.toml b/canisters/root/Cargo.toml index 3a73918..10f50bd 100644 --- a/canisters/root/Cargo.toml +++ b/canisters/root/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] cap-common = { path= "../../common" } -ic-certified-map = "0.1.0" +certified-vars = "0.1.2" ic-kit = "0.4.3" ic-cdk = "0.3.1" serde = "1.0.116" diff --git a/canisters/root/src/lib.rs b/canisters/root/src/lib.rs index f61a30e..8c9ceaf 100644 --- a/canisters/root/src/lib.rs +++ b/canisters/root/src/lib.rs @@ -1,8 +1,9 @@ -use cap_common::bucket_lookup_table::BucketLookupTable; -use cap_common::canister_list::CanisterList; use cap_common::transaction::{Event, IndefiniteEvent}; use cap_common::Bucket; -use ic_certified_map::{fork, fork_hash, AsHashTree, HashTree}; +use certified_vars::{ + hashtree::{fork, fork_hash}, + AsHashTree, HashTree, Map, Seq, +}; use ic_kit::candid::{candid_method, export_service}; use ic_kit::{ic, Principal}; use serde::{Deserialize, Serialize}; @@ -26,8 +27,8 @@ mod upgrade; #[derive(Serialize, Deserialize)] struct Data { bucket: Bucket, - buckets: BucketLookupTable, - next_canisters: CanisterList, + buckets: Map, + next_canisters: Seq, /// List of all the users in this token contract. users: BTreeSet, cap_id: Principal, @@ -41,11 +42,11 @@ impl Default for Data { Self { bucket: Bucket::new(Principal::management_canister(), 0), buckets: { - let mut table = BucketLookupTable::default(); + let mut table = Map::new(); table.insert(0, ic::id()); table }, - next_canisters: CanisterList::new(), + next_canisters: Seq::new(), users: BTreeSet::new(), cap_id: Principal::management_canister(), contract: Principal::management_canister(), @@ -83,7 +84,7 @@ fn get_next_canisters(arg: WithWitnessArg) -> GetNextCanistersResponse { ), }; - let canisters = data.next_canisters.to_vec(); + let canisters = data.next_canisters.as_vec().clone(); GetNextCanistersResponse { canisters, witness } } @@ -191,7 +192,7 @@ fn get_bucket_for(arg: WithIdArg) -> GetBucketResponse { fork( fork( HashTree::Pruned(data.bucket.root_hash()), - data.buckets.gen_witness(arg.id), + data.buckets.witness(&arg.id), ), HashTree::Pruned(data.next_canisters.root_hash()), ) @@ -199,9 +200,10 @@ fn get_bucket_for(arg: WithIdArg) -> GetBucketResponse { ), }; - let canister = *data.buckets.get_bucket_for(arg.id); - - GetBucketResponse { canister, witness } + GetBucketResponse { + canister: ic::id(), + witness, + } } #[query] diff --git a/canisters/root/src/upgrade.rs b/canisters/root/src/upgrade.rs index c1eb9f4..489cdf8 100644 --- a/canisters/root/src/upgrade.rs +++ b/canisters/root/src/upgrade.rs @@ -1,20 +1,19 @@ use crate::Data; -use cap_common::bucket_lookup_table::BucketLookupTable; -use cap_common::canister_list::CanisterList; use cap_common::transaction::Event; -use cap_common::{Bucket, TokenContractId}; +use cap_common::{Bucket, TokenContractId, TransactionId}; +use certified_vars::{Hash, Map, Seq}; use ic_cdk::api::stable::{StableReader, StableWriter}; use ic_kit::macros::{post_upgrade, pre_upgrade}; use ic_kit::{ic, Principal}; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use std::io::Read; #[derive(Deserialize)] -struct DataDe { +struct DataV0 { bucket: Vec, - buckets: BucketLookupTable, - next_canisters: CanisterList, + buckets: Vec<(TransactionId, Principal)>, + next_canisters: CanisterListV0, /// List of all the users in this token contract. users: BTreeSet, cap_id: Principal, @@ -23,6 +22,12 @@ struct DataDe { allow_migration: bool, } +#[derive(Deserialize)] +pub struct CanisterListV0 { + data: Vec, + hash: Hash, +} + #[pre_upgrade] fn pre_upgrade() { let data = ic::get::(); @@ -46,7 +51,7 @@ fn next_post_upgrade() { #[post_upgrade] fn post_upgrade() { let reader = StableReader::default(); - let data: DataDe = match serde_cbor::from_reader(reader) { + let data: DataV0 = match serde_cbor::from_reader(reader) { Ok(t) => t, Err(err) => { let limit = err.offset() - 1; @@ -64,8 +69,14 @@ fn post_upgrade() { ic::store(Data { bucket, - buckets: data.buckets, - next_canisters: data.next_canisters, + buckets: { + let mut table = Map::new(); + table.insert(0, ic::id()); + table + }, + // For now we never had next_canisters, + // so this is safe. + next_canisters: Seq::new(), users: data.users, cap_id: data.cap_id, contract, diff --git a/canisters/router/Cargo.toml b/canisters/router/Cargo.toml index ca9434e..f459ad7 100644 --- a/canisters/router/Cargo.toml +++ b/canisters/router/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parsa Ghadimi "] [dependencies] cap-common = { path= "../../common" } -ic-certified-map = "0.1.0" +certified-vars = "0.1.2" ic-kit = "0.4.3" ic-cdk = "0.3.1" serde = "1.0.116" diff --git a/canisters/router/src/installer.rs b/canisters/router/src/installer.rs index 2bf00b1..74e0d36 100644 --- a/canisters/router/src/installer.rs +++ b/canisters/router/src/installer.rs @@ -1,4 +1,5 @@ use crate::Data; +use certified_vars::Seq; use ic_kit::candid::candid_method; use ic_kit::candid::encode_args; use ic_kit::candid::CandidType; @@ -86,5 +87,7 @@ pub async fn install_code(canister_id: Principal, contract_id: Principal, writer data.root_buckets.insert(contract_id, canister_id); data.user_canisters - .insert(Principal::management_canister(), canister_id); + .entry(Principal::management_canister()) + .or_insert(Seq::new()) + .append(canister_id); } diff --git a/canisters/router/src/lib.rs b/canisters/router/src/lib.rs index 5331394..41f4a12 100644 --- a/canisters/router/src/lib.rs +++ b/canisters/router/src/lib.rs @@ -1,9 +1,9 @@ -use cap_common::canister_list::CanisterList; -use cap_common::canister_map::CanisterMap; -use cap_common::user_canisters::UserCanisters; -use ic_certified_map::{fork, HashTree}; -use ic_certified_map::{fork_hash, AsHashTree}; -use ic_kit::candid::{candid_method, export_service}; +use certified_vars::Map; +use certified_vars::{ + hashtree::{fork, fork_hash}, + AsHashTree, HashTree, Seq, +}; +use ic_kit::candid::{candid_method, export_service, CandidType}; use ic_kit::ic; use serde::{Deserialize, Serialize}; @@ -26,24 +26,24 @@ mod upgrade; /// / \ /// / \ 2 /// 0 1 -#[derive(Serialize, Deserialize)] +#[derive(CandidType, Serialize, Deserialize)] pub struct Data { /// Map: TokenContractId -> RootBucketId - pub root_buckets: CanisterMap, + pub root_buckets: Map, /// Map each user to RootBucketId - pub user_canisters: UserCanisters, + pub user_canisters: Map>, /// List of the index canisters. - pub index_canisters: CanisterList, + pub index_canisters: Seq, } impl Default for Data { fn default() -> Self { Data { - root_buckets: CanisterMap::default(), - user_canisters: UserCanisters::default(), + root_buckets: Map::new(), + user_canisters: Map::new(), index_canisters: { - let mut list = CanisterList::new(); - list.push(ic::id()); + let mut list = Seq::new(); + list.append(ic::id()); list }, } @@ -62,7 +62,7 @@ fn get_token_contract_root_bucket( true => Some( fork( fork( - data.root_buckets.gen_witness(&arg.canister), + data.root_buckets.witness(&arg.canister), HashTree::Pruned(data.user_canisters.root_hash()), ), HashTree::Pruned(data.index_canisters.root_hash()), @@ -95,7 +95,12 @@ fn get_user_root_buckets(arg: GetUserRootBucketsArg) -> GetUserRootBucketsRespon ), }; - let contracts = data.user_canisters.get(&arg.user).to_vec(); + let contracts = data + .user_canisters + .get(&arg.user) + .unwrap_or(&Seq::new()) + .as_vec() + .clone(); GetUserRootBucketsResponse { contracts, witness } } @@ -119,7 +124,7 @@ fn get_index_canisters(arg: WithWitnessArg) -> GetIndexCanistersResponse { ), }; - let canisters = data.index_canisters.to_vec(); + let canisters = data.index_canisters.as_vec().clone(); GetIndexCanistersResponse { canisters, witness } } @@ -137,7 +142,10 @@ fn insert_new_users(contract_id: Principal, users: Vec) { ); for user in users { - data.user_canisters.insert(user, root_bucket); + data.user_canisters + .entry(user) + .or_insert(Seq::new()) + .append(root_bucket); } } diff --git a/canisters/router/src/upgrade.rs b/canisters/router/src/upgrade.rs index 4ba14d7..6cef45e 100644 --- a/canisters/router/src/upgrade.rs +++ b/canisters/router/src/upgrade.rs @@ -1,26 +1,42 @@ use crate::{get_user_root_buckets, Data}; use cap_common::{GetUserRootBucketsArg, RootBucketId}; +use certified_vars::{Hash, Seq}; use ic_cdk::api::stable::{StableReader, StableWriter}; use ic_kit::candid::{candid_method, encode_args}; use ic_kit::ic; use ic_kit::macros::{post_upgrade, pre_upgrade, update}; use ic_kit::Principal; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; use std::io::Read; #[derive(Default)] struct RootBucketsToUpgrade(Vec); +#[derive(Serialize, Deserialize)] +pub struct DataV0 { + pub root_buckets: BTreeMap, Vec>, + /// Map each user to RootBucketId + pub user_canisters: BTreeMap, CanisterListV0>, + /// List of the index canisters. + pub index_canisters: CanisterListV0, +} + +#[derive(Default, Deserialize, Serialize)] +pub struct CanisterListV0 { + data: Vec, + hash: Hash, +} + #[pre_upgrade] fn pre_upgrade() { - let data = ic::get::(); - let writer = StableWriter::default(); - serde_cbor::to_writer(writer, &data).expect("Failed to serialize data."); + ic::stable_store((ic::get::(),)).expect("Failed to serialize data."); } #[post_upgrade] fn post_upgrade() { let reader = StableReader::default(); - let data: Data = match serde_cbor::from_reader(reader) { + let data: DataV0 = match serde_cbor::from_reader(reader) { Ok(t) => t, Err(err) => { let limit = err.offset() - 1; @@ -29,7 +45,30 @@ fn post_upgrade() { } }; - ic::store::(data); + let mut deserialized = Data::default(); + + for (key, value) in data.root_buckets { + let key = Principal::from_slice(&key); + let value = Principal::from_slice(&value); + deserialized.root_buckets.insert(key, value); + } + + for (key, value) in data.user_canisters { + let key = Principal::from_slice(&key); + let value = { + let mut r = Seq::new(); + for v in value.data { + r.append(v); + } + r + }; + + deserialized.user_canisters.insert(key, value); + } + + deserialized.index_canisters = data.index_canisters.data.into_iter().collect(); + + ic::store::(deserialized); let root_buckets = get_user_root_buckets(GetUserRootBucketsArg { user: Principal::management_canister(), @@ -88,50 +127,3 @@ async fn upgrade_root_bucket(canister_id: Principal) { trigger_upgrade(); } - -#[cfg(test)] -mod tests { - use super::*; - use ic_certified_map::AsHashTree; - use ic_kit::MockContext; - - // TODO(qti3e) Move this to ic-kit. - const fn p(id: u8) -> Principal { - Principal::from_slice(&[id, 0x00]) - } - - #[test] - fn test() { - let contract_1 = p(0); - let rb_1 = p(1); - let contract_2 = p(2); - let rb_2 = p(3); - let alice = p(4); - let bob = p(5); - - MockContext::new().with_id(p(17)).inject(); - - let mut data = Data::default(); - data.root_buckets.insert(contract_1, rb_1); - data.root_buckets.insert(contract_2, rb_2); - data.user_canisters.insert(alice, rb_1); - data.user_canisters.insert(alice, rb_2); - data.user_canisters.insert(bob, rb_2); - - let serialized: Vec = serde_cbor::to_vec(&data).expect("Failed to serialize."); - let actual: Data = serde_cbor::from_slice(&serialized).expect("Failed to deserialize."); - - assert_eq!( - actual.user_canisters.root_hash(), - data.user_canisters.root_hash() - ); - assert_eq!( - actual.root_buckets.root_hash(), - data.root_buckets.root_hash() - ); - assert_eq!( - actual.index_canisters.root_hash(), - data.index_canisters.root_hash() - ); - } -} diff --git a/common/Cargo.toml b/common/Cargo.toml index 40032a2..1530ea2 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -10,6 +10,7 @@ repository = "https://github.com/Psychedelic/cap" [dependencies] ic-certified-map = "0.1.0" +certified-vars = "0.1.2" ic-kit = "0.4.2" ic-cdk = "0.3.1" sha2 = "0.9" diff --git a/common/src/bucket.rs b/common/src/bucket.rs index c0b03ae..c60d02e 100644 --- a/common/src/bucket.rs +++ b/common/src/bucket.rs @@ -1,7 +1,6 @@ -use crate::index::Index; use crate::transaction::Event; -use ic_certified_map::HashTree::Pruned; -use ic_certified_map::{fork, fork_hash, leaf_hash, AsHashTree, Hash, HashTree, RbTree}; +use certified_vars::Paged; +use certified_vars::{rbtree::RbTree, AsHashTree, Hash, HashTree}; use ic_kit::Principal; use serde::ser::{SerializeSeq, SerializeTuple}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -28,7 +27,7 @@ use std::ptr::NonNull; /// ``` pub struct Bucket { /// Map each local Transaction ID to its hash. - event_hashes: RbTree, + event_hashes: RbTree, /// ID of the current contract. contract: Principal, /// The offset of this bucket, i.e the actual id of the first event in the bucket. @@ -38,9 +37,9 @@ pub struct Bucket { /// value of the `global_offset` we can use this slice. global_offset_be: [u8; 8], /// Maps each user principal id to the vector of events they have. - user_indexer: Index, + user_indexer: Paged, 64>, /// Maps contract id to each transaction page. - contract_indexer: Index, + contract_indexer: Paged, 64>, /// All of the events in this bucket, we store a pointer to an allocated memory. Which is used /// only internally in this struct. And this Vec should be considered the actual owner of this /// pointers. @@ -48,22 +47,6 @@ pub struct Bucket { events: Vec>, } -pub struct EventKey([u8; 4]); - -impl From for EventKey { - #[inline(always)] - fn from(n: u32) -> Self { - EventKey(n.to_be_bytes()) - } -} - -impl AsRef<[u8]> for EventKey { - #[inline(always)] - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - impl Bucket { /// Create a new bucket with the given global offset. #[inline] @@ -74,8 +57,8 @@ impl Bucket { event_hashes: RbTree::new(), global_offset: offset, global_offset_be: offset.to_be_bytes(), - user_indexer: Index::default(), - contract_indexer: Index::default(), + user_indexer: Paged::new(), + contract_indexer: Paged::new(), } } @@ -105,38 +88,22 @@ impl Bucket { let eve = unsafe { event.as_ref() }; // Update the indexers for the transaction. - self.contract_indexer.insert(&self.contract, event, &hash); + self.contract_indexer.insert(self.contract, event); for user in eve.extract_principal_ids() { - self.user_indexer.insert(user, event, &hash); + self.user_indexer.insert(*user, event); } // Insert the event itself. - self.event_hashes.insert(local_index.into(), hash); + self.event_hashes.insert(local_index, hash); self.events.push(event); self.global_offset + (local_index as u64) } - /// Create the hash of the left virtual node. - #[inline] - fn left_v_hash(&self) -> Hash { - let offset_hash = leaf_hash(&self.global_offset_be); - fork_hash(&self.event_hashes.root_hash(), &offset_hash) - } - - /// Create the hash of the right virtual node. - #[inline] - fn right_v_hash(&self) -> Hash { - fork_hash( - &self.user_indexer.root_hash(), - &self.contract_indexer.root_hash(), - ) - } - /// Return the transactions associated with a user's principal id at the given page. #[inline] pub fn get_transactions_for_user(&self, principal: &Principal, page: u32) -> Vec<&Event> { - if let Some(data) = self.user_indexer.get(principal, page) { + if let Some(data) = self.user_indexer.get(principal, page as usize) { data.iter().map(|v| unsafe { v.as_ref() }).collect() } else { vec![] @@ -146,47 +113,39 @@ impl Bucket { /// Return the last page number associated with the given user. #[inline] pub fn last_page_for_user(&self, principal: &Principal) -> u32 { - self.user_indexer.last_page(principal) + self.user_indexer + .get_last_page_number(principal) + .unwrap_or(0) as u32 } /// Return the transactions associated with a token's principal id at the given page. #[inline] pub fn get_transactions_for_contract(&self, principal: &Principal, page: u32) -> Vec<&Event> { - if let Some(data) = self.contract_indexer.get(principal, page) { + if let Some(data) = self.contract_indexer.get(principal, page as usize) { data.iter().map(|v| unsafe { v.as_ref() }).collect() } else { vec![] } } - /// Return the last page number associated with the given token. + /// Return the last page number associated with the given token contract. #[inline] pub fn last_page_for_contract(&self, principal: &Principal) -> u32 { - self.contract_indexer.last_page(principal) + self.contract_indexer + .get_last_page_number(principal) + .unwrap_or(0) as u32 } /// Return the witness that can be used to prove the response from get_transactions_for_user. #[inline] pub fn witness_transactions_for_user(&self, principal: &Principal, page: u32) -> HashTree { - fork( - Pruned(self.left_v_hash()), - fork( - self.user_indexer.witness(principal, page), - Pruned(self.contract_indexer.root_hash()), - ), - ) + todo!() } /// Return the witness that can be used to prove the response from get_transactions_for_token. #[inline] pub fn witness_transactions_for_contract(&self, principal: &Principal, page: u32) -> HashTree { - fork( - Pruned(self.left_v_hash()), - fork( - Pruned(self.user_indexer.root_hash()), - self.contract_indexer.witness(principal, page), - ), - ) + todo!() } /// Return a transaction by its global id. @@ -207,43 +166,17 @@ impl Bucket { /// Return a witness which proves the response returned by get_transaction. #[inline] pub fn witness_transaction(&self, id: u64) -> HashTree { - if id < self.global_offset { - fork( - fork( - Pruned(self.event_hashes.root_hash()), - HashTree::Leaf(&self.global_offset_be), - ), - Pruned(self.right_v_hash()), - ) - } else { - let local = (id - self.global_offset) as u32; - fork( - fork( - self.event_hashes.witness(&local.to_be_bytes()), - HashTree::Leaf(&self.global_offset_be), - ), - Pruned(self.right_v_hash()), - ) - } + todo!() } } impl AsHashTree for Bucket { fn root_hash(&self) -> Hash { - fork_hash(&self.left_v_hash(), &self.right_v_hash()) + todo!() } fn as_hash_tree(&self) -> HashTree<'_> { - fork( - fork( - self.event_hashes.as_hash_tree(), - HashTree::Leaf(&self.global_offset_be), - ), - fork( - self.user_indexer.as_hash_tree(), - self.contract_indexer.as_hash_tree(), - ), - ) + todo!() } } diff --git a/common/src/bucket_lookup_table.rs b/common/src/bucket_lookup_table.rs deleted file mode 100644 index d664845..0000000 --- a/common/src/bucket_lookup_table.rs +++ /dev/null @@ -1,268 +0,0 @@ -use crate::did::TransactionId; -use ic_certified_map::{AsHashTree, Hash, HashTree, RbTree}; -use ic_kit::Principal; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use sha2::{Digest, Sha256}; - -/// A data structure to store a linear list of buckets, each bucket has a starting offset which -/// is a transaction id and determine the starting range of transactions that this bucket contains, -/// this data structure can be used to answer the question: "Which bucket contains transaction N?" -/// and also issue a witness proving the result. -#[derive(Default)] -pub struct BucketLookupTable { - data: Vec<(TransactionId, Principal)>, - certified_map: RbTree, -} - -struct TransactionIdKey([u8; 8]); - -impl From for TransactionIdKey { - #[inline(always)] - fn from(n: TransactionId) -> Self { - TransactionIdKey(n.to_be_bytes()) - } -} - -impl AsRef<[u8]> for TransactionIdKey { - #[inline(always)] - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -impl BucketLookupTable { - /// Insert a new bucket to the list of buckets. - /// - /// # Panics - /// If the provided transaction id is not larger than the previous starting offset. - #[inline(always)] - pub fn insert(&mut self, starting_offset: TransactionId, canister: Principal) { - if !self.data.is_empty() { - let ending_offset = self.data[self.data.len() - 1].0; - assert!(starting_offset > ending_offset, "Invalid starting offset."); - } - - let mut h = Sha256::new(); - h.update(canister.as_slice()); - let hash = h.finalize().into(); - self.data.push((starting_offset, canister)); - self.certified_map.insert(starting_offset.into(), hash); - } - - /// Remove the last bucket from the list, and return the data that was associated with it. - pub fn pop(&mut self) -> Option<(TransactionId, Principal)> { - let data = self.data.pop(); - - if let Some((id, _)) = &data { - let id = TransactionIdKey::from(*id); - self.certified_map.delete(id.as_ref()); - } - - data - } - - /// Return the bucket that should contain the given offset. - /// - /// # Panics - /// If the offset provided is smaller than the smallest offset in the buckets. This implies - /// that this method will also panic if there are no buckets inserted yet. - #[inline] - pub fn get_bucket_for(&self, offset: TransactionId) -> &Principal { - match self.data.binary_search_by(|probe| probe.0.cmp(&offset)) { - Ok(index) => &self.data[index].1, - Err(0) => panic!("Given offset is smaller than the starting offset of the chain."), - Err(index) => &self.data[index - 1].1, - } - } - - /// Generate the HashTree witness for that proves the result returned from `get_bucket_for` - /// method. - #[inline] - pub fn gen_witness(&self, offset: TransactionId) -> HashTree { - self.certified_map - .witness(TransactionIdKey::from(offset).as_ref()) - } -} - -impl AsHashTree for BucketLookupTable { - #[inline(always)] - fn root_hash(&self) -> Hash { - self.certified_map.root_hash() - } - - #[inline(always)] - fn as_hash_tree(&self) -> HashTree<'_> { - self.certified_map.as_hash_tree() - } -} - -impl Serialize for BucketLookupTable { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - self.data.serialize(serializer) - } -} - -impl<'de> Deserialize<'de> for BucketLookupTable { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - type T = Vec<(TransactionId, Principal)>; - let data = T::deserialize(deserializer)?; - let mut certified_map = RbTree::new(); - - for (id, principal) in &data { - let mut h = Sha256::new(); - h.update(principal.as_slice()); - let hash = h.finalize().into(); - - certified_map.insert(TransactionIdKey::from(*id), hash); - } - - Ok(Self { - data, - certified_map, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ic_kit::mock_principals; - - #[test] - fn lookup() { - let mut table = BucketLookupTable::default(); - table.insert(0, mock_principals::bob()); - table.insert(500, mock_principals::alice()); - table.insert(750, mock_principals::john()); - - assert_eq!(table.get_bucket_for(0), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(50), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(150), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(499), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(500), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(600), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(749), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(750), &mock_principals::john()); - assert_eq!(table.get_bucket_for(751), &mock_principals::john()); - assert_eq!(table.get_bucket_for(10000), &mock_principals::john()); - } - - #[test] - #[should_panic] - fn lookup_before() { - let mut table = BucketLookupTable::default(); - table.insert(100, mock_principals::bob()); - table.insert(500, mock_principals::alice()); - table.insert(750, mock_principals::john()); - - table.get_bucket_for(10); - } - - #[test] - #[should_panic] - fn lookup_empty() { - let table = BucketLookupTable::default(); - table.get_bucket_for(0); - } - - #[test] - #[should_panic] - fn invalid_start_position() { - let mut table = BucketLookupTable::default(); - table.insert(100, mock_principals::bob()); - table.insert(50, mock_principals::alice()); - } - - #[test] - fn pop() { - let mut table = BucketLookupTable::default(); - table.insert(0, mock_principals::bob()); - table.insert(500, mock_principals::alice()); - table.insert(750, mock_principals::john()); - - assert_eq!(table.pop(), Some((750, mock_principals::john()))); - - let id = TransactionIdKey::from(750); - assert_eq!(table.certified_map.get(id.as_ref()), None); - - table.insert(600, mock_principals::xtc()); - assert_eq!(table.get_bucket_for(599), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(600), &mock_principals::xtc()); - } - - #[test] - fn witness() { - let mut table = BucketLookupTable::default(); - - table.insert(0, mock_principals::bob()); - let hash_0 = table.root_hash(); - assert_eq!(table.gen_witness(0).reconstruct(), hash_0); - assert_eq!(table.gen_witness(10).reconstruct(), hash_0); - - table.insert(500, mock_principals::alice()); - let hash_500 = table.root_hash(); - assert_ne!(hash_0, hash_500, "Hash of the table should change."); - - assert_eq!(table.gen_witness(0).reconstruct(), hash_500); - assert_eq!(table.gen_witness(10).reconstruct(), hash_500); - assert_eq!(table.gen_witness(499).reconstruct(), hash_500); - assert_eq!(table.gen_witness(500).reconstruct(), hash_500); - assert_eq!(table.gen_witness(501).reconstruct(), hash_500); - - // The same table should have the same hash. - table.pop(); - assert_eq!(hash_0, table.root_hash()); - table.insert(500, mock_principals::alice()); - - table.insert(750, mock_principals::john()); - let hash_750 = table.root_hash(); - assert_ne!(hash_0, hash_750, "Hash of the table should change."); - assert_ne!(hash_500, hash_750, "Hash of the table should change."); - - assert_eq!(table.gen_witness(0).reconstruct(), hash_750); - assert_eq!(table.gen_witness(10).reconstruct(), hash_750); - assert_eq!(table.gen_witness(499).reconstruct(), hash_750); - assert_eq!(table.gen_witness(500).reconstruct(), hash_750); - assert_eq!(table.gen_witness(501).reconstruct(), hash_750); - assert_eq!(table.gen_witness(749).reconstruct(), hash_750); - assert_eq!(table.gen_witness(750).reconstruct(), hash_750); - assert_eq!(table.gen_witness(751).reconstruct(), hash_750); - } - - #[test] - fn serde() { - let mut table = BucketLookupTable::default(); - table.insert(0, mock_principals::bob()); - table.insert(500, mock_principals::alice()); - table.insert(750, mock_principals::john()); - - let expected_hashtree = table.gen_witness(730); - - let encoded = serde_cbor::to_vec(&table).expect("Failed to serialize."); - let table = - serde_cbor::from_slice::(&encoded).expect("Failed to deserialize."); - - assert_eq!(table.get_bucket_for(0), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(50), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(150), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(499), &mock_principals::bob()); - assert_eq!(table.get_bucket_for(500), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(600), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(749), &mock_principals::alice()); - assert_eq!(table.get_bucket_for(750), &mock_principals::john()); - assert_eq!(table.get_bucket_for(751), &mock_principals::john()); - assert_eq!(table.get_bucket_for(10000), &mock_principals::john()); - - let actual_hashtree = table.gen_witness(730); - assert_eq!( - format!("{:?}", actual_hashtree), - format!("{:?}", expected_hashtree) - ); - } -} diff --git a/common/src/canister_list.rs b/common/src/canister_list.rs deleted file mode 100644 index 739c080..0000000 --- a/common/src/canister_list.rs +++ /dev/null @@ -1,76 +0,0 @@ -use ic_certified_map::{AsHashTree, Hash, HashTree}; -use ic_kit::Principal; -use serde::{Deserialize, Serialize}; -use sha2::{Digest, Sha256}; - -/// An array of Canister IDs with incremental hashing, this can be used as a leaf node in a -/// certified RbTree. -#[derive(Default, Deserialize, Serialize)] -pub struct CanisterList { - data: Vec, - hash: Hash, -} - -impl CanisterList { - pub fn new() -> Self { - Self::default() - } - - /// Insert the given principal id to the list, and update the hash. - #[inline] - pub fn push(&mut self, id: Principal) { - let mut h = Sha256::new(); - h.update(&self.hash); - h.update(id.as_slice()); - self.hash = h.finalize().into(); - self.data.push(id); - } - - /// Return the list as slice. - #[inline(always)] - pub fn as_slice(&self) -> &[Principal] { - self.data.as_slice() - } - - /// Return the list as a vector. - #[inline(always)] - pub fn to_vec(&self) -> Vec { - self.data.clone() - } -} - -impl AsHashTree for CanisterList { - #[inline(always)] - fn root_hash(&self) -> Hash { - self.hash.root_hash() - } - - #[inline(always)] - fn as_hash_tree(&self) -> HashTree<'_> { - self.hash.as_hash_tree() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ic_kit::mock_principals; - - #[test] - fn push() { - let mut list = CanisterList::new(); - assert_eq!(list.hash, [0; 32]); - - list.push(mock_principals::alice()); - let hash1 = list.hash; - - list.push(mock_principals::bob()); - let hash2 = list.hash; - - assert_ne!(hash1, hash2); - assert_eq!( - list.to_vec(), - vec![mock_principals::alice(), mock_principals::bob()] - ); - } -} diff --git a/common/src/canister_map.rs b/common/src/canister_map.rs deleted file mode 100644 index 5b7fe44..0000000 --- a/common/src/canister_map.rs +++ /dev/null @@ -1,117 +0,0 @@ -use ic_certified_map::HashTree::Leaf; -use ic_certified_map::{leaf_hash, AsHashTree, Hash, HashTree, RbTree}; -use ic_kit::Principal; -use serde::de::{MapAccess, Visitor}; -use serde::ser::SerializeMap; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt::Formatter; - -/// A data structure that maps a canister id to another canister id and -#[derive(Default)] -pub struct CanisterMap { - data: RbTree, -} - -struct PrincipalBytes(Principal); - -impl From for PrincipalBytes { - #[inline] - fn from(p: Principal) -> Self { - Self(p) - } -} - -impl AsHashTree for PrincipalBytes { - #[inline] - fn root_hash(&self) -> Hash { - leaf_hash(self.0.as_ref()) - } - - #[inline] - fn as_hash_tree(&self) -> HashTree<'_> { - Leaf(self.0.as_ref()) - } -} - -impl CanisterMap { - /// Insert the given relation into the map. - #[inline] - pub fn insert(&mut self, key: Principal, value: Principal) { - self.data.insert(key, value.into()); - } - - /// Return the principal id associated with the given principal id. - #[inline] - pub fn get(&self, key: &Principal) -> Option<&Principal> { - match self.data.get(key.as_ref()) { - Some(bytes) => Some(&bytes.0), - None => None, - } - } - - /// Create a HashTree witness for the value associated with the given key. - #[inline] - pub fn gen_witness(&self, key: &Principal) -> HashTree { - self.data.witness(key.as_ref()) - } -} - -impl AsHashTree for CanisterMap { - fn root_hash(&self) -> Hash { - self.data.root_hash() - } - - fn as_hash_tree(&self) -> HashTree<'_> { - self.data.as_hash_tree() - } -} - -impl Serialize for CanisterMap { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut s = serializer.serialize_map(None)?; - - self.data.for_each(|key, value| { - s.serialize_entry(key, value.0.as_ref()) - .expect("Serialization failed."); - }); - - s.end() - } -} - -impl<'de> Deserialize<'de> for CanisterMap { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_map(CanisterMapVisitor) - } -} - -struct CanisterMapVisitor; - -impl<'de> Visitor<'de> for CanisterMapVisitor { - type Value = CanisterMap; - - fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result { - write!(formatter, "a map of principal id to principal id") - } - - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut data = CanisterMap::default(); - - while let Some((key, value)) = map.next_entry::, Vec>()? { - let key = Principal::from_slice(&key); - let value = Principal::from_slice(&value); - data.insert(key, value); - } - - Ok(data) - } -} diff --git a/common/src/did.rs b/common/src/did.rs index ebb4553..ab7e1b0 100644 --- a/common/src/did.rs +++ b/common/src/did.rs @@ -2,7 +2,7 @@ //! files across the different canisters and the services. use crate::transaction::Event; -use ic_certified_map::{Hash, HashTree}; +use certified_vars::{Hash, HashTree}; use ic_kit::candid::{CandidType, Deserialize}; use ic_kit::ic; use ic_kit::Principal; diff --git a/common/src/index.rs b/common/src/index.rs deleted file mode 100644 index c9950ac..0000000 --- a/common/src/index.rs +++ /dev/null @@ -1,154 +0,0 @@ -use crate::transaction::Event; -use ic_certified_map::{AsHashTree, Hash, HashTree, RbTree}; -use ic_kit::Principal; -use sha2::{Digest, Sha256}; -use std::collections::BTreeMap; -use std::ptr::NonNull; - -/// How many Transaction IDs per page. -pub const PAGE_CAPACITY: usize = 64; - -/// Type used for representing the page number. -type PageNumber = u32; - -#[derive(Default)] -pub struct Index { - pager: BTreeMap, - data: RbTree, -} - -/// The key in the indexer which points to a page for a principal id. -/// structure: -/// u8 Principal length -/// u8;29 Principal inner -/// u8;4 Page number, u32 as Big Endian -struct IndexKey([u8; 34]); - -#[derive(Default)] -struct IndexPage { - data: Vec>, - hash: Hash, -} - -impl Index { - /// Insert a new transaction into the lookup table of the given principal id. - /// The second parameter should be the hash of the passed event. - pub fn insert(&mut self, principal: &Principal, event: NonNull, hash: &Hash) { - let mut inserted = false; - - let next_page = if let Some(&page_no) = self.pager.get(principal) { - let key = IndexKey::new(principal, page_no); - - self.data.modify(key.as_ref(), |page| { - inserted = page.insert(event, hash); - }); - - page_no + 1 - } else { - 0 - }; - - // Create a new page. - if !inserted { - let mut page = IndexPage::default(); - page.insert(event, hash); - - let key = IndexKey::new(principal, next_page); - self.data.insert(key, page); - self.pager.insert(*principal, next_page); - } - } - - /// Create a witness proving the data returned by get. - #[inline] - pub fn witness(&self, principal: &Principal, page: u32) -> HashTree { - let key = IndexKey::new(principal, page); - self.data.witness(key.as_ref()) - } - - /// Return the data associated with the given page. - #[inline] - pub fn get(&self, principal: &Principal, page: u32) -> Option<&Vec>> { - let key = IndexKey::new(principal, page); - if let Some(page) = self.data.get(key.as_ref()) { - Some(&page.data) - } else { - None - } - } - - /// Returns the last page associated with the principal id. - #[inline] - pub fn last_page(&self, principal: &Principal) -> u32 { - self.pager.get(principal).cloned().unwrap_or(0) - } -} - -impl AsHashTree for Index { - #[inline(always)] - fn root_hash(&self) -> Hash { - self.data.root_hash() - } - - #[inline(always)] - fn as_hash_tree(&self) -> HashTree<'_> { - self.data.as_hash_tree() - } -} - -impl IndexKey { - /// Construct a new index-key from a principal id and a page number. - #[inline(always)] - pub fn new(principal: &Principal, page: PageNumber) -> Self { - let mut buffer = [0u8; 34]; - let principal_slice = principal.as_slice(); - let page_slice = page.to_be_bytes(); - - buffer[0] = principal_slice.len() as u8; - - buffer[1..(principal_slice.len() + 1)].clone_from_slice(principal_slice); - - buffer[30..(4 + 30)].clone_from_slice(&page_slice[..4]); - - IndexKey(buffer) - } -} - -impl AsRef<[u8]> for IndexKey { - #[inline(always)] - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -impl IndexPage { - /// Try to insert a local transaction id into the page, returns the success status. - #[inline] - pub fn insert(&mut self, event: NonNull, hash: &Hash) -> bool { - if self.data.len() == PAGE_CAPACITY { - return false; - } - - self.data.push(event); - - // Compute the new hash. - let mut h = Sha256::new(); - h.update(&self.hash); - h.update(hash); - self.hash = h.finalize().into(); - - true - } -} - -impl AsHashTree for IndexPage { - #[inline(always)] - fn root_hash(&self) -> Hash { - self.hash.root_hash() - } - - #[inline(always)] - fn as_hash_tree(&self) -> HashTree<'_> { - self.hash.as_hash_tree() - } -} diff --git a/common/src/lib.rs b/common/src/lib.rs index e15fd3f..de8130a 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,13 +1,8 @@ #![allow(clippy::from_over_into)] pub mod bucket; -pub mod bucket_lookup_table; -pub mod canister_list; -pub mod canister_map; pub mod did; -pub mod index; pub mod transaction; -pub mod user_canisters; pub use bucket::Bucket; pub use did::*; diff --git a/common/src/transaction.rs b/common/src/transaction.rs index e7936df..0e28d1b 100644 --- a/common/src/transaction.rs +++ b/common/src/transaction.rs @@ -1,4 +1,6 @@ use crate::did::EventHash; +use certified_vars::HashTree::Pruned; +use certified_vars::{AsHashTree, Hash, HashTree}; use ic_kit::candid::{CandidType, Deserialize, Nat}; use ic_kit::Principal; use serde::Serialize; @@ -40,6 +42,7 @@ pub enum DetailValue { #[serde(with = "serde_bytes")] Slice(Vec), Vec(Vec), + TokenIdU64(u64), } impl Event { @@ -71,6 +74,32 @@ impl Event { principals } + /// Return a set containing all of the token ids involved in an event. + #[inline] + pub fn extract_token_ids(&self) -> BTreeSet { + let mut tokens = BTreeSet::new(); + + fn visit(tokens: &mut BTreeSet, value: &DetailValue) { + match value { + DetailValue::TokenIdU64(id) => { + tokens.insert(*id); + } + DetailValue::Vec(items) => { + for item in items { + visit(tokens, item); + } + } + _ => {} + } + } + + for (_, value) in &self.details { + visit(&mut tokens, value); + } + + tokens + } + /// Compute the hash for the given event. pub fn hash(&self) -> EventHash { let mut h = domain_sep(&self.operation); @@ -131,6 +160,12 @@ impl Event { hash_value(h, item); } } + DetailValue::TokenIdU64(val) => { + let bytes = val.to_be_bytes(); + h.update(&[9]); + h.update(&bytes.len().to_be_bytes() as &[u8]); + h.update(bytes); + } } } @@ -291,4 +326,14 @@ fn domain_sep(s: &str) -> sha2::Sha256 { h } +impl AsHashTree for Event { + fn root_hash(&self) -> Hash { + self.hash() + } + + fn as_hash_tree(&self) -> HashTree<'_> { + Pruned(self.hash()) + } +} + // TODO(qti3e) Test diff --git a/common/src/user_canisters.rs b/common/src/user_canisters.rs deleted file mode 100644 index f5b893f..0000000 --- a/common/src/user_canisters.rs +++ /dev/null @@ -1,141 +0,0 @@ -use crate::canister_list::CanisterList; -use crate::{RootBucketId, UserId}; -use ic_certified_map::{AsHashTree, Hash, HashTree, RbTree}; -use ic_kit::Principal; -use serde::de::{MapAccess, Visitor}; -use serde::ser::SerializeMap; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt::Formatter; - -#[derive(Default)] -pub struct UserCanisters { - data: RbTree, - len: usize, -} - -impl UserCanisters { - /// Insert the RootBucketId of a token contract to a user's list. - pub fn insert(&mut self, user: UserId, canister: RootBucketId) { - let mut modified = false; - self.data.modify(user.as_ref(), |list| { - list.push(canister); - modified = true; - }); - if !modified { - let mut list = CanisterList::new(); - list.push(canister); - self.data.insert(user, list); - self.len += 1; - } - } - - /// Return the list of canisters associated with a user. - pub fn get(&self, user: &UserId) -> &[RootBucketId] { - self.data - .get(user.as_ref()) - .map(|l| l.as_slice()) - .unwrap_or_default() - } - - /// Generate the HashTree witness for the `get` call. - pub fn witness(&self, user: &UserId) -> HashTree { - self.data.witness(user.as_ref()) - } -} - -impl AsHashTree for UserCanisters { - #[inline(always)] - fn root_hash(&self) -> Hash { - self.data.root_hash() - } - - #[inline(always)] - fn as_hash_tree(&self) -> HashTree<'_> { - self.data.as_hash_tree() - } -} - -impl Serialize for UserCanisters { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut s = serializer.serialize_map(Some(self.len))?; - - self.data.for_each(|key, value| { - s.serialize_entry(key, value) - .expect("Serialization failed."); - }); - - s.end() - } -} - -impl<'de> Deserialize<'de> for UserCanisters { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - deserializer.deserialize_map(UserCanistersVisitor) - } -} - -struct UserCanistersVisitor; - -impl<'de> Visitor<'de> for UserCanistersVisitor { - type Value = UserCanisters; - - fn expecting(&self, formatter: &mut Formatter) -> std::fmt::Result { - write!(formatter, "expected a map") - } - - fn visit_map(self, mut map: A) -> Result - where - A: MapAccess<'de>, - { - let mut data = RbTree::default(); - let mut len = 0; - - loop { - if let Some((key, value)) = map.next_entry::, CanisterList>()? { - let principal = Principal::from_slice(&key); - data.insert(principal, value); - len += 1; - continue; - } - - break; - } - - Ok(UserCanisters { data, len }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ic_kit::mock_principals; - - #[test] - fn serde() { - let mut data = UserCanisters::default(); - data.insert(mock_principals::alice(), mock_principals::xtc()); - data.insert(mock_principals::alice(), mock_principals::bob()); - data.insert(mock_principals::john(), mock_principals::alice()); - data.insert(mock_principals::john(), mock_principals::xtc()); - data.insert(mock_principals::john(), mock_principals::bob()); - - let serialized = serde_cbor::to_vec(&data).expect("Failed to serialize"); - let actual = - serde_cbor::from_slice::(&serialized).expect("Failed to deserialize"); - - assert_eq!( - actual.get(&mock_principals::alice()), - data.get(&mock_principals::alice()) - ); - assert_eq!( - actual.get(&mock_principals::john()), - data.get(&mock_principals::john()) - ); - } -} From 9464566fac367feb5260b6e7b25e3d15967b6994 Mon Sep 17 00:00:00 2001 From: qti3e Date: Fri, 11 Feb 2022 18:28:49 +0330 Subject: [PATCH 03/10] implement bucket's hashtree witness generation and index the tokens by id --- common/src/bucket.rs | 136 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 123 insertions(+), 13 deletions(-) diff --git a/common/src/bucket.rs b/common/src/bucket.rs index c60d02e..fb7bb35 100644 --- a/common/src/bucket.rs +++ b/common/src/bucket.rs @@ -1,6 +1,7 @@ use crate::transaction::Event; -use certified_vars::Paged; +use certified_vars::hashtree::{fork, fork_hash}; use certified_vars::{rbtree::RbTree, AsHashTree, Hash, HashTree}; +use certified_vars::{GroupBuilder, Paged}; use ic_kit::Principal; use serde::ser::{SerializeSeq, SerializeTuple}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -15,7 +16,8 @@ use std::ptr::NonNull; /// 0: event_hashes /// 1: offset /// 3: user_indexer -/// 4: token_indexer +/// 4: contract_indexer +/// 5: token_indexer /// /// ```text /// ROOT @@ -23,7 +25,9 @@ use std::ptr::NonNull; /// / \ /// V V /// / \ / \ -/// 0 1 3 4 +/// 0 1 3 V +/// / \ +/// 4 5 /// ``` pub struct Bucket { /// Map each local Transaction ID to its hash. @@ -32,14 +36,12 @@ pub struct Bucket { contract: Principal, /// The offset of this bucket, i.e the actual id of the first event in the bucket. global_offset: u64, - /// Same as `global_offset` but is the encoded big endian, this struct should own this data - /// since it is used in the HashTree, so whenever we want to pass a reference to a BE encoded - /// value of the `global_offset` we can use this slice. - global_offset_be: [u8; 8], /// Maps each user principal id to the vector of events they have. user_indexer: Paged, 64>, /// Maps contract id to each transaction page. contract_indexer: Paged, 64>, + /// Map each token id to a map of transactions for that token. + token_indexer: Paged, 64>, /// All of the events in this bucket, we store a pointer to an allocated memory. Which is used /// only internally in this struct. And this Vec should be considered the actual owner of this /// pointers. @@ -56,9 +58,9 @@ impl Bucket { contract, event_hashes: RbTree::new(), global_offset: offset, - global_offset_be: offset.to_be_bytes(), user_indexer: Paged::new(), contract_indexer: Paged::new(), + token_indexer: Paged::new(), } } @@ -92,6 +94,9 @@ impl Bucket { for user in eve.extract_principal_ids() { self.user_indexer.insert(*user, event); } + for token_id in eve.extract_token_ids() { + self.token_indexer.insert(token_id, event); + } // Insert the event itself. self.event_hashes.insert(local_index, hash); @@ -136,16 +141,75 @@ impl Bucket { .unwrap_or(0) as u32 } + /// Return the transactions for a specific token. + #[inline] + pub fn get_transactions_for_token(&self, token_id: &u64, page: u32) -> Vec<&Event> { + if let Some(data) = self.token_indexer.get(token_id, page as usize) { + data.iter().map(|v| unsafe { v.as_ref() }).collect() + } else { + vec![] + } + } + + #[inline] + pub fn last_page_for_token(&self, token_id: &u64) -> u32 { + self.token_indexer + .get_last_page_number(token_id) + .unwrap_or(0) as u32 + } + /// Return the witness that can be used to prove the response from get_transactions_for_user. #[inline] pub fn witness_transactions_for_user(&self, principal: &Principal, page: u32) -> HashTree { - todo!() + fork( + HashTree::Pruned(fork_hash( + &self.event_hashes.root_hash(), + &self.global_offset.root_hash(), + )), + fork( + self.user_indexer.witness(principal, page as usize), + HashTree::Pruned(fork_hash( + &self.contract_indexer.root_hash(), + &self.token_indexer.root_hash(), + )), + ), + ) } /// Return the witness that can be used to prove the response from get_transactions_for_token. #[inline] pub fn witness_transactions_for_contract(&self, principal: &Principal, page: u32) -> HashTree { - todo!() + fork( + HashTree::Pruned(fork_hash( + &self.event_hashes.root_hash(), + &self.global_offset.root_hash(), + )), + fork( + HashTree::Pruned(self.user_indexer.root_hash()), + fork( + self.contract_indexer.witness(principal, page as usize), + HashTree::Pruned(self.token_indexer.root_hash()), + ), + ), + ) + } + + /// Return the witness that can be used to prove the response from get_transactions_for_token. + #[inline] + pub fn witness_transactions_for_token(&self, token_id: &u64, page: u32) -> HashTree { + fork( + HashTree::Pruned(fork_hash( + &self.event_hashes.root_hash(), + &self.global_offset.root_hash(), + )), + fork( + HashTree::Pruned(self.user_indexer.root_hash()), + fork( + HashTree::Pruned(self.contract_indexer.root_hash()), + self.token_indexer.witness(token_id, page as usize), + ), + ), + ) } /// Return a transaction by its global id. @@ -166,17 +230,63 @@ impl Bucket { /// Return a witness which proves the response returned by get_transaction. #[inline] pub fn witness_transaction(&self, id: u64) -> HashTree { - todo!() + let left = if id < self.global_offset { + fork( + HashTree::Pruned(self.event_hashes.root_hash()), + self.global_offset.as_hash_tree(), + ) + } else { + let local = (id - self.global_offset) as u32; + fork( + self.event_hashes.witness(&local), + self.global_offset.as_hash_tree(), + ) + }; + + fork( + left, + HashTree::Pruned(fork_hash( + &self.user_indexer.root_hash(), + &fork_hash( + &self.contract_indexer.root_hash(), + &self.token_indexer.root_hash(), + ), + )), + ) } } impl AsHashTree for Bucket { fn root_hash(&self) -> Hash { - todo!() + fork_hash( + &fork_hash( + &self.event_hashes.root_hash(), + &self.global_offset.root_hash(), + ), + &fork_hash( + &self.user_indexer.root_hash(), + &fork_hash( + &self.contract_indexer.root_hash(), + &self.token_indexer.root_hash(), + ), + ), + ) } fn as_hash_tree(&self) -> HashTree<'_> { - todo!() + fork( + fork( + self.event_hashes.as_hash_tree(), + self.global_offset.as_hash_tree(), + ), + fork( + self.user_indexer.as_hash_tree(), + fork( + self.contract_indexer.as_hash_tree(), + self.token_indexer.as_hash_tree(), + ), + ), + ) } } From 40148ce319f1c4ad7db926574eb2b09620fb54f6 Mon Sep 17 00:00:00 2001 From: qti3e Date: Mon, 14 Feb 2022 06:50:04 +0330 Subject: [PATCH 04/10] add get_token_transactions query on root --- candid/root.did | 9 +++++++++ candid/sdk_example.did | 1 + canisters/root/src/lib.rs | 35 +++++++++++++++++++++++++++++++++++ common/src/did.rs | 7 +++++++ 4 files changed, 52 insertions(+) diff --git a/candid/root.did b/candid/root.did index 244a3a5..fe8dc5d 100644 --- a/candid/root.did +++ b/candid/root.did @@ -3,6 +3,7 @@ type DetailValue = variant { U64 : nat64; Vec : vec DetailValue; Slice : vec nat8; + TokenIdU64 : nat64; Text : text; True; False; @@ -20,6 +21,11 @@ type GetNextCanistersResponse = record { witness : opt Witness; canisters : vec principal; }; +type GetTokenTransactionsArg = record { + token_id : nat64; + page : opt nat32; + witness : bool; +}; type GetTransactionResponse = variant { Delegate : record { principal; opt Witness }; Found : record { opt Event; opt Witness }; @@ -48,6 +54,9 @@ service : { contract_id : () -> (principal) query; get_bucket_for : (WithIdArg) -> (GetBucketResponse) query; get_next_canisters : (WithWitnessArg) -> (GetNextCanistersResponse) query; + get_token_transactions : (GetTokenTransactionsArg) -> ( + GetTransactionsResponseBorrowed, + ) query; get_transaction : (WithIdArg) -> (GetTransactionResponse) query; get_transactions : (GetTransactionsArg) -> ( GetTransactionsResponseBorrowed, diff --git a/candid/sdk_example.did b/candid/sdk_example.did index 9c60f9f..74ea5cb 100644 --- a/candid/sdk_example.did +++ b/candid/sdk_example.did @@ -3,6 +3,7 @@ type DetailValue = variant { U64 : nat64; Vec : vec DetailValue; Slice : vec nat8; + TokenIdU64 : nat64; Text : text; True; False; diff --git a/canisters/root/src/lib.rs b/canisters/root/src/lib.rs index 8c9ceaf..a48ca2b 100644 --- a/canisters/root/src/lib.rs +++ b/canisters/root/src/lib.rs @@ -181,6 +181,41 @@ fn get_user_transactions(arg: GetUserTransactionsArg) -> GetTransactionsResponse } } +#[query] +#[candid_method(query)] +fn get_token_transactions( + arg: GetTokenTransactionsArg, +) -> GetTransactionsResponseBorrowed<'static> { + let data = ic::get::(); + + let page = arg + .page + .unwrap_or_else(|| data.bucket.last_page_for_token(&arg.token_id)); + + let witness = match arg.witness { + false => None, + true => Some( + fork( + fork( + data.bucket + .witness_transactions_for_token(&arg.token_id, page), + HashTree::Pruned(data.buckets.root_hash()), + ), + HashTree::Pruned(data.next_canisters.root_hash()), + ) + .into(), + ), + }; + + let events = data.bucket.get_transactions_for_token(&arg.token_id, page); + + GetTransactionsResponseBorrowed { + data: events, + page, + witness, + } +} + #[query] #[candid_method(query)] fn get_bucket_for(arg: WithIdArg) -> GetBucketResponse { diff --git a/common/src/did.rs b/common/src/did.rs index ab7e1b0..a27df59 100644 --- a/common/src/did.rs +++ b/common/src/did.rs @@ -117,6 +117,13 @@ pub struct GetUserTransactionsArg { pub witness: bool, } +#[derive(Serialize, Deserialize, CandidType)] +pub struct GetTokenTransactionsArg { + pub token_id: u64, + pub page: Option, + pub witness: bool, +} + #[derive(Serialize, Deserialize, CandidType)] pub struct GetBucketResponse { pub canister: BucketId, From 1aa32c5daa56eaf309bdef119ebfbfdeaa1105d2 Mon Sep 17 00:00:00 2001 From: qti3e Date: Tue, 15 Feb 2022 18:33:03 +0330 Subject: [PATCH 05/10] clippy fixes --- canisters/root/src/upgrade.rs | 2 +- canisters/router/src/upgrade.rs | 2 +- common/src/bucket.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/canisters/root/src/upgrade.rs b/canisters/root/src/upgrade.rs index 489cdf8..eea3894 100644 --- a/canisters/root/src/upgrade.rs +++ b/canisters/root/src/upgrade.rs @@ -5,7 +5,7 @@ use certified_vars::{Hash, Map, Seq}; use ic_cdk::api::stable::{StableReader, StableWriter}; use ic_kit::macros::{post_upgrade, pre_upgrade}; use ic_kit::{ic, Principal}; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize}; use std::collections::BTreeSet; use std::io::Read; diff --git a/canisters/router/src/upgrade.rs b/canisters/router/src/upgrade.rs index 6cef45e..f0e00bc 100644 --- a/canisters/router/src/upgrade.rs +++ b/canisters/router/src/upgrade.rs @@ -1,7 +1,7 @@ use crate::{get_user_root_buckets, Data}; use cap_common::{GetUserRootBucketsArg, RootBucketId}; use certified_vars::{Hash, Seq}; -use ic_cdk::api::stable::{StableReader, StableWriter}; +use ic_cdk::api::stable::{StableReader}; use ic_kit::candid::{candid_method, encode_args}; use ic_kit::ic; use ic_kit::macros::{post_upgrade, pre_upgrade, update}; diff --git a/common/src/bucket.rs b/common/src/bucket.rs index fb7bb35..d63607e 100644 --- a/common/src/bucket.rs +++ b/common/src/bucket.rs @@ -1,7 +1,7 @@ use crate::transaction::Event; use certified_vars::hashtree::{fork, fork_hash}; use certified_vars::{rbtree::RbTree, AsHashTree, Hash, HashTree}; -use certified_vars::{GroupBuilder, Paged}; +use certified_vars::{Paged}; use ic_kit::Principal; use serde::ser::{SerializeSeq, SerializeTuple}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; From 668dbf4422bbf17ef49b61e2044b467d20c17598 Mon Sep 17 00:00:00 2001 From: qti3e Date: Tue, 15 Feb 2022 18:35:37 +0330 Subject: [PATCH 06/10] cargo fmt --- canisters/root/src/upgrade.rs | 2 +- canisters/router/src/upgrade.rs | 2 +- common/src/bucket.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/canisters/root/src/upgrade.rs b/canisters/root/src/upgrade.rs index eea3894..477100c 100644 --- a/canisters/root/src/upgrade.rs +++ b/canisters/root/src/upgrade.rs @@ -5,7 +5,7 @@ use certified_vars::{Hash, Map, Seq}; use ic_cdk::api::stable::{StableReader, StableWriter}; use ic_kit::macros::{post_upgrade, pre_upgrade}; use ic_kit::{ic, Principal}; -use serde::{Deserialize}; +use serde::Deserialize; use std::collections::BTreeSet; use std::io::Read; diff --git a/canisters/router/src/upgrade.rs b/canisters/router/src/upgrade.rs index f0e00bc..59939fb 100644 --- a/canisters/router/src/upgrade.rs +++ b/canisters/router/src/upgrade.rs @@ -1,7 +1,7 @@ use crate::{get_user_root_buckets, Data}; use cap_common::{GetUserRootBucketsArg, RootBucketId}; use certified_vars::{Hash, Seq}; -use ic_cdk::api::stable::{StableReader}; +use ic_cdk::api::stable::StableReader; use ic_kit::candid::{candid_method, encode_args}; use ic_kit::ic; use ic_kit::macros::{post_upgrade, pre_upgrade, update}; diff --git a/common/src/bucket.rs b/common/src/bucket.rs index d63607e..69244c1 100644 --- a/common/src/bucket.rs +++ b/common/src/bucket.rs @@ -1,7 +1,7 @@ use crate::transaction::Event; use certified_vars::hashtree::{fork, fork_hash}; +use certified_vars::Paged; use certified_vars::{rbtree::RbTree, AsHashTree, Hash, HashTree}; -use certified_vars::{Paged}; use ic_kit::Principal; use serde::ser::{SerializeSeq, SerializeTuple}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; From e521d6b2fa366c0c630e6700f02ec6baf2c85a04 Mon Sep 17 00:00:00 2001 From: qti3e Date: Mon, 21 Feb 2022 16:30:31 +0330 Subject: [PATCH 07/10] fix warnings --- canisters/root/src/lib.rs | 2 +- canisters/root/src/upgrade.rs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/canisters/root/src/lib.rs b/canisters/root/src/lib.rs index a48ca2b..0358819 100644 --- a/canisters/root/src/lib.rs +++ b/canisters/root/src/lib.rs @@ -12,7 +12,7 @@ use std::collections::BTreeSet; use cap_common::did::*; use ic_kit::macros::*; -mod upgrade; +pub mod upgrade; /// Merkle tree of the canister. /// diff --git a/canisters/root/src/upgrade.rs b/canisters/root/src/upgrade.rs index 477100c..470af0f 100644 --- a/canisters/root/src/upgrade.rs +++ b/canisters/root/src/upgrade.rs @@ -12,8 +12,8 @@ use std::io::Read; #[derive(Deserialize)] struct DataV0 { bucket: Vec, - buckets: Vec<(TransactionId, Principal)>, - next_canisters: CanisterListV0, + _buckets: Vec<(TransactionId, Principal)>, + _next_canisters: CanisterListV0, /// List of all the users in this token contract. users: BTreeSet, cap_id: Principal, @@ -24,8 +24,8 @@ struct DataV0 { #[derive(Deserialize)] pub struct CanisterListV0 { - data: Vec, - hash: Hash, + _data: Vec, + _hash: Hash, } #[pre_upgrade] @@ -35,7 +35,7 @@ fn pre_upgrade() { serde_cbor::to_writer(writer, &data).expect("Failed to serialize data."); } -fn next_post_upgrade() { +pub fn next_post_upgrade() { let reader = StableReader::default(); let data: Data = match serde_cbor::from_reader(reader) { Ok(t) => t, From 2b82bd17a1b9602a3b509d6d50b1b9c5f119f713 Mon Sep 17 00:00:00 2001 From: qti3e Date: Mon, 21 Feb 2022 16:33:45 +0330 Subject: [PATCH 08/10] add doc for get_token_transactions --- spec/bucket-interface.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/spec/bucket-interface.md b/spec/bucket-interface.md index ee2764d..e0896d8 100644 --- a/spec/bucket-interface.md +++ b/spec/bucket-interface.md @@ -3,4 +3,9 @@ The bucket interface describes the interface for canisters that can be queried to return a certain portion of transactions for a token contract. This is a read-only interface. -// TODO(qti3e) \ No newline at end of file +// TODO(qti3e) + +## query get_token_transactions + +Returns the transactions for a certain token id. It will return +all the transactions that involve a `TokenIdU64(n)`. From 6d021545a6b3ef448a56e8c4980d20a2c59f1df4 Mon Sep 17 00:00:00 2001 From: BotchM Date: Mon, 21 Feb 2022 16:15:55 +0200 Subject: [PATCH 09/10] add get_token_transactions func --- sdk/rust/core/src/bucket.rs | 22 +++++++++++++++++++++- sdk/rust/src/lib.rs | 3 +++ sdk/rust/src/token/mod.rs | 2 ++ sdk/rust/src/token/query.rs | 31 +++++++++++++++++++++++++++++++ sdk/rust/src/transactions/mod.rs | 4 ++-- 5 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 sdk/rust/src/token/mod.rs create mode 100644 sdk/rust/src/token/query.rs diff --git a/sdk/rust/core/src/bucket.rs b/sdk/rust/core/src/bucket.rs index 40adbbf..12a6cef 100644 --- a/sdk/rust/core/src/bucket.rs +++ b/sdk/rust/core/src/bucket.rs @@ -4,7 +4,7 @@ use ic_kit::{Principal, RejectionCode}; use crate::root::RootBucket; use cap_common::{ GetIndexCanistersResponse, GetTransactionResponse, GetTransactionsArg, GetTransactionsResponse, - GetUserTransactionsArg, WithIdArg, WithWitnessArg, + GetUserTransactionsArg, WithIdArg, WithWitnessArg, GetTokenTransactionsArg, }; /// A contract-specific bucket canister. @@ -85,6 +85,26 @@ impl Bucket { Ok(result.0) } + + /// Returns paged transactions for a specific [`token_id`] + pub async fn get_token_transactions( + &self, + token_id: u64, + page: Option, + ) -> Result { + let result: (GetTransactionsResponse,) = call( + self.0, + "get_token_transactions", + (GetTokenTransactionsArg { + token_id, + page, + witness: false, + },), + ) + .await?; + + Ok(result.0) + } } impl From for Bucket { diff --git a/sdk/rust/src/lib.rs b/sdk/rust/src/lib.rs index 786c20e..37eaf1f 100644 --- a/sdk/rust/src/lib.rs +++ b/sdk/rust/src/lib.rs @@ -26,6 +26,9 @@ pub use env::*; mod env; +mod token; +pub use token::*; + mod transactions; pub use transactions::*; diff --git a/sdk/rust/src/token/mod.rs b/sdk/rust/src/token/mod.rs new file mode 100644 index 0000000..9b345df --- /dev/null +++ b/sdk/rust/src/token/mod.rs @@ -0,0 +1,2 @@ +mod query; +pub use query::get_token_transactions; \ No newline at end of file diff --git a/sdk/rust/src/token/query.rs b/sdk/rust/src/token/query.rs new file mode 100644 index 0000000..187b38f --- /dev/null +++ b/sdk/rust/src/token/query.rs @@ -0,0 +1,31 @@ +use cap_sdk_core::Bucket; + +use crate::{AsTransactionsPage, CapEnv, GetTransactionsError, GetTransactionsResponse}; + +/// Gets the transaction with the given token_id [`u64`] +/// and `page` accepts any [`Into`]. +/// +/// # Panics +/// Panics if cap is using a multi-canister system, as it +/// is currently unsupported. In this **alpha** release. +/// +/// # Examples +/// TODO +pub async fn get_token_transactions( + token_id: u64, + page: impl AsTransactionsPage, +) -> Result { + let context = CapEnv::get().await; + + let as_bucket: Bucket = context.root.into(); + + let transactions = as_bucket + .get_token_transactions(token_id, page.page()) + .await + .map_err(|(code, details)| GetTransactionsError::Unexpected(code, details))?; + + Ok(GetTransactionsResponse { + transactions: transactions.data, + next_page: transactions.page + 1, + }) +} diff --git a/sdk/rust/src/transactions/mod.rs b/sdk/rust/src/transactions/mod.rs index 6568980..3557e93 100644 --- a/sdk/rust/src/transactions/mod.rs +++ b/sdk/rust/src/transactions/mod.rs @@ -15,8 +15,8 @@ pub use user_query::get_user_transactions_page; /// The response given from a [`get_transactions`] call. #[derive(Debug, Clone)] pub struct GetTransactionsResponse { - transactions: Vec, - next_page: u32, + pub(crate) transactions: Vec, + pub(crate) next_page: u32, } impl GetTransactionsResponse { From e07cd04d6c92b66331ad301a9d5a28c69e7aacb6 Mon Sep 17 00:00:00 2001 From: BotchM Date: Mon, 21 Feb 2022 16:20:44 +0200 Subject: [PATCH 10/10] cargo fmt --- sdk/rust/core/src/bucket.rs | 4 ++-- sdk/rust/src/token/mod.rs | 2 +- sdk/rust/src/token/query.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/rust/core/src/bucket.rs b/sdk/rust/core/src/bucket.rs index 12a6cef..42af8ae 100644 --- a/sdk/rust/core/src/bucket.rs +++ b/sdk/rust/core/src/bucket.rs @@ -3,8 +3,8 @@ use ic_kit::{Principal, RejectionCode}; use crate::root::RootBucket; use cap_common::{ - GetIndexCanistersResponse, GetTransactionResponse, GetTransactionsArg, GetTransactionsResponse, - GetUserTransactionsArg, WithIdArg, WithWitnessArg, GetTokenTransactionsArg, + GetIndexCanistersResponse, GetTokenTransactionsArg, GetTransactionResponse, GetTransactionsArg, + GetTransactionsResponse, GetUserTransactionsArg, WithIdArg, WithWitnessArg, }; /// A contract-specific bucket canister. diff --git a/sdk/rust/src/token/mod.rs b/sdk/rust/src/token/mod.rs index 9b345df..d7473c1 100644 --- a/sdk/rust/src/token/mod.rs +++ b/sdk/rust/src/token/mod.rs @@ -1,2 +1,2 @@ mod query; -pub use query::get_token_transactions; \ No newline at end of file +pub use query::get_token_transactions; diff --git a/sdk/rust/src/token/query.rs b/sdk/rust/src/token/query.rs index 187b38f..c28098b 100644 --- a/sdk/rust/src/token/query.rs +++ b/sdk/rust/src/token/query.rs @@ -2,7 +2,7 @@ use cap_sdk_core::Bucket; use crate::{AsTransactionsPage, CapEnv, GetTransactionsError, GetTransactionsResponse}; -/// Gets the transaction with the given token_id [`u64`] +/// Gets the transaction with the given token_id [`u64`] /// and `page` accepts any [`Into`]. /// /// # Panics