From c8a2d7f0ae2411b5e44a86837bc43dda5256f6be Mon Sep 17 00:00:00 2001 From: sifnoc Date: Thu, 7 Dec 2023 12:15:53 +0000 Subject: [PATCH] chore: update README; move csv folder to root --- .gitignore | 1 - bin/README.md | 2 + {src/orchestrator/csv => csv}/entry_16.csv | 0 {src/orchestrator/csv => csv}/entry_16_1.csv | 0 {src/orchestrator/csv => csv}/entry_16_2.csv | 0 {src/orchestrator/csv => csv}/entry_16_3.csv | 0 {src/orchestrator/csv => csv}/entry_16_4.csv | 0 .../csv => csv}/entry_16_no_overflow.csv | 0 {src/orchestrator/csv => csv}/entry_64.csv | 0 examples/aggregation_flow.rs | 6 +-- src/aggregation_merkle_sum_tree.rs | 37 +++++-------------- src/executor/cloud_spawner.rs | 14 +++---- src/executor/local_spawner.rs | 10 ++--- src/executor/mock_spawner.rs | 2 +- src/executor/test.rs | 9 ++--- src/json_mst.rs | 27 ++++++++++++++ src/mini_tree_generator.rs | 2 +- src/orchestrator/mod.rs | 10 +++++ src/orchestrator/test.rs | 22 +++++------ 19 files changed, 77 insertions(+), 65 deletions(-) rename {src/orchestrator/csv => csv}/entry_16.csv (100%) rename {src/orchestrator/csv => csv}/entry_16_1.csv (100%) rename {src/orchestrator/csv => csv}/entry_16_2.csv (100%) rename {src/orchestrator/csv => csv}/entry_16_3.csv (100%) rename {src/orchestrator/csv => csv}/entry_16_4.csv (100%) rename {src/orchestrator/csv => csv}/entry_16_no_overflow.csv (100%) rename {src/orchestrator/csv => csv}/entry_64.csv (100%) diff --git a/.gitignore b/.gitignore index 714de26..ea8c4bf 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1 @@ /target -/src/data diff --git a/bin/README.md b/bin/README.md index 3ee93be..e252863 100644 --- a/bin/README.md +++ b/bin/README.md @@ -24,6 +24,8 @@ Second, send two entries to the Mini Tree Server, execute the following script: bash ./scripts/test_sending_entry.sh ``` +Note: Execute this command from the project's root folder to ensure proper functioning of scripts. + Upon successful execution, you will receive a response similar to the following:
Click View Response diff --git a/src/orchestrator/csv/entry_16.csv b/csv/entry_16.csv similarity index 100% rename from src/orchestrator/csv/entry_16.csv rename to csv/entry_16.csv diff --git a/src/orchestrator/csv/entry_16_1.csv b/csv/entry_16_1.csv similarity index 100% rename from src/orchestrator/csv/entry_16_1.csv rename to csv/entry_16_1.csv diff --git a/src/orchestrator/csv/entry_16_2.csv b/csv/entry_16_2.csv similarity index 100% rename from src/orchestrator/csv/entry_16_2.csv rename to csv/entry_16_2.csv diff --git a/src/orchestrator/csv/entry_16_3.csv b/csv/entry_16_3.csv similarity index 100% rename from src/orchestrator/csv/entry_16_3.csv rename to csv/entry_16_3.csv diff --git a/src/orchestrator/csv/entry_16_4.csv b/csv/entry_16_4.csv similarity index 100% rename from src/orchestrator/csv/entry_16_4.csv rename to csv/entry_16_4.csv diff --git a/src/orchestrator/csv/entry_16_no_overflow.csv b/csv/entry_16_no_overflow.csv similarity index 100% rename from src/orchestrator/csv/entry_16_no_overflow.csv rename to csv/entry_16_no_overflow.csv diff --git a/src/orchestrator/csv/entry_64.csv b/csv/entry_64.csv similarity index 100% rename from src/orchestrator/csv/entry_64.csv rename to csv/entry_64.csv diff --git a/examples/aggregation_flow.rs b/examples/aggregation_flow.rs index 5e49ac8..7a3ec44 100644 --- a/examples/aggregation_flow.rs +++ b/examples/aggregation_flow.rs @@ -73,8 +73,8 @@ async fn main() -> Result<(), Box> { let orchestrator = Orchestrator::::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16_1.csv".to_string(), - "./src/orchestrator/csv/entry_16_2.csv".to_string(), + "csv/entry_16_1.csv".to_string(), + "csv/entry_16_2.csv".to_string(), ], ); @@ -99,7 +99,7 @@ async fn main() -> Result<(), Box> { // // Here, we demonstrate generating the proof of inclusion for User 0. let inclusion_proof_of_user0 = round.get_proof_of_inclusion(0).unwrap(); - assert!(inclusion_proof_of_user0.get_public_inputs().len() > 0); // Check public input counts + assert!(!inclusion_proof_of_user0.get_public_inputs().is_empty()); // Check public input counts println!("Generated User 0 proof of inclusion"); Ok(()) diff --git a/src/aggregation_merkle_sum_tree.rs b/src/aggregation_merkle_sum_tree.rs index 586c290..9dec788 100644 --- a/src/aggregation_merkle_sum_tree.rs +++ b/src/aggregation_merkle_sum_tree.rs @@ -1,7 +1,6 @@ use halo2_proofs::halo2curves::bn256::Fr as Fp; -use num_bigint::BigUint; use std::error::Error; -use summa_backend::merkle_sum_tree::utils::{build_merkle_tree_from_leaves, fp_to_big_uint}; +use summa_backend::merkle_sum_tree::utils::build_merkle_tree_from_leaves; use summa_backend::merkle_sum_tree::{ Cryptocurrency, Entry, MerkleProof, MerkleSumTree, Node, Tree, }; @@ -162,19 +161,6 @@ impl } } - // Iterate through the balance accumulator and throw error if any balance is not in range 0, 2 ^ (8 * N_BYTES): - for balance in &balances_acc { - // transform the balance to a BigUint - let balance_big_uint = fp_to_big_uint(*balance); - - if balance_big_uint >= BigUint::from(2_usize).pow(8 * N_BYTES as u32) { - return Err( - "Accumulated balance is not in the expected range, proof generation will fail!" - .into(), - ); - } - } - let mut nodes = vec![]; let root = build_merkle_tree_from_leaves(&roots, depth, &mut nodes)?; @@ -218,12 +204,10 @@ mod test { fn test_aggregation_mst() { // create new mini merkle sum tree let mini_tree_1 = - MerkleSumTree::::from_csv("src/orchestrator/csv/entry_16_1.csv") - .unwrap(); + MerkleSumTree::::from_csv("csv/entry_16_1.csv").unwrap(); let mini_tree_2 = - MerkleSumTree::::from_csv("src/orchestrator/csv/entry_16_2.csv") - .unwrap(); + MerkleSumTree::::from_csv("csv/entry_16_2.csv").unwrap(); let aggregation_mst = AggregationMerkleSumTree::::new( vec![mini_tree_1.clone(), mini_tree_2.clone()], @@ -271,7 +255,7 @@ mod test { let mut mini_trees = Vec::new(); for i in 1..=4 { let mini_tree = MerkleSumTree::::from_csv(&format!( - "src/orchestrator/csv/entry_16_{}.csv", + "csv/entry_16_{}.csv", i )) .unwrap(); @@ -286,8 +270,7 @@ mod test { // The entry_64.csv file is the aggregation of entry_16_1, entry_16_2, entry_16_3, entry_16_4 let single_merkle_sum_tree = - MerkleSumTree::::from_csv("src/orchestrator/csv/entry_64.csv") - .unwrap(); + MerkleSumTree::::from_csv("csv/entry_64.csv").unwrap(); assert_eq!( aggregation_mst_root.hash, @@ -300,13 +283,11 @@ mod test { // create new mini merkle sum tree. The accumulated balance for each mini tree is in the expected range // note that the accumulated balance of the tree generated from entry_16_4 is just in the expected range for 1 unit let merkle_sum_tree_1 = - MerkleSumTree::::from_csv("src/orchestrator/csv/entry_16.csv") - .unwrap(); + MerkleSumTree::::from_csv("csv/entry_16.csv").unwrap(); - let merkle_sum_tree_2 = MerkleSumTree::::from_csv( - "src/orchestrator/csv/entry_16_no_overflow.csv", - ) - .unwrap(); + let merkle_sum_tree_2 = + MerkleSumTree::::from_csv("csv/entry_16_no_overflow.csv") + .unwrap(); // When creating the aggregation merkle sum tree, the accumulated balance of the two mini trees is not in the expected range, an error is thrown let result = AggregationMerkleSumTree::::new( diff --git a/src/executor/cloud_spawner.rs b/src/executor/cloud_spawner.rs index ccd8c60..d98d975 100644 --- a/src/executor/cloud_spawner.rs +++ b/src/executor/cloud_spawner.rs @@ -20,16 +20,16 @@ pub struct CloudSpawner { } /// CloudSpawner -/// -/// Designed for cloud-based resources and Docker Swarm, CloudSpawner is optimized for scalability and high availability. +/// +/// Designed for cloud-based resources and Docker Swarm, CloudSpawner is optimized for scalability and high availability. /// While functioning similarly to LocalSpawner, it extends its capabilities by initializing workers on remote machines, making it particularly suitable for Swarm network setups. -/// +/// /// CloudSpawner can be utilized in two ways: -/// -/// - Without `service_info`, CloudSpawner does not directly manage Worker instances. +/// +/// - Without `service_info`, CloudSpawner does not directly manage Worker instances. /// In this mode, it does not control or interact with the Docker API for worker management. -/// -/// - With `service_info`, CloudSpawner requires a `docker-compose` file. When provided with `service_info`, +/// +/// - With `service_info`, CloudSpawner requires a `docker-compose` file. When provided with `service_info`, /// it manages Docker services and networks, enabling dynamic scaling and orchestration of workers. impl CloudSpawner { pub fn new( diff --git a/src/executor/local_spawner.rs b/src/executor/local_spawner.rs index 736a084..7d12b72 100644 --- a/src/executor/local_spawner.rs +++ b/src/executor/local_spawner.rs @@ -17,9 +17,9 @@ use tokio::sync::oneshot; use crate::executor::{Executor, ExecutorSpawner}; /// LocalSpawner -/// -/// The LocalSpawner is tailored for use cases closer to actual deployment. It enables the initialization of Executors -/// and Workers within a local Docker environment. This spawner is ideal for development and testing phases, +/// +/// The LocalSpawner is to use cases closer to actual deployment. It enables the initialization of Executors +/// and Workers within a local Docker environment. This spawner is ideal for development and testing phases, /// where simplicity and direct control over the containers are beneficial. pub struct LocalSpawner { docker: Docker, @@ -66,8 +66,6 @@ impl LocalSpawner { platform: None, }; - println!("docker-info: {:?}", docker.info().await?); - docker .create_container(Some(create_container_options), config.clone()) .await?; @@ -82,8 +80,6 @@ impl LocalSpawner { let container_info: ContainerInspectResponse = docker.inspect_container(&container_name, None).await?; - println!("container_info: {:?}", container_info); - Ok(container_info) } } diff --git a/src/executor/mock_spawner.rs b/src/executor/mock_spawner.rs index 7e78e76..d94f0a8 100644 --- a/src/executor/mock_spawner.rs +++ b/src/executor/mock_spawner.rs @@ -13,7 +13,7 @@ use crate::executor::{Executor, ExecutorSpawner}; use crate::mini_tree_generator::create_mst; /// MockSpawner -/// +/// /// Primarily used for testing purposes, the MockSpawner initializes Executors suitable for various test scenarios, /// including negative test cases. It runs the `mini-tree-server` locally, allowing for a controlled testing environment. pub struct MockSpawner { diff --git a/src/executor/test.rs b/src/executor/test.rs index 1c89835..e415ff2 100644 --- a/src/executor/test.rs +++ b/src/executor/test.rs @@ -35,8 +35,7 @@ async fn test_executor() -> Result<(), Box> { let spawner = MockSpawner::new(None); let executor = spawner.spawn_executor().await; - let (_, entries) = - parse_csv_to_entries::<_, 2, 14>("./src/orchestrator/csv/entry_16.csv").unwrap(); + let (_, entries) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap(); let json_entries = entries .iter() .map(JsonEntry::from_entry) @@ -55,10 +54,8 @@ async fn test_executor_block() -> Result<(), Box> { let executor = spawner.spawn_executor().await; // Parse two csv files - let (_, entries_1) = - parse_csv_to_entries::<_, 2, 14>("./src/orchestrator/csv/entry_16.csv").unwrap(); - let (_, entries_2) = - parse_csv_to_entries::<_, 2, 14>("./src/orchestrator/csv/entry_16.csv").unwrap(); + let (_, entries_1) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap(); + let (_, entries_2) = parse_csv_to_entries::<_, 2, 14>("csv/entry_16.csv").unwrap(); // Convert entries to json_entries let json_entries_1 = entries_1 diff --git a/src/json_mst.rs b/src/json_mst.rs index 25e4ed9..82f4790 100644 --- a/src/json_mst.rs +++ b/src/json_mst.rs @@ -6,18 +6,27 @@ use halo2_proofs::halo2curves::{bn256::Fr as Fp, group::ff::PrimeField}; use summa_backend::merkle_sum_tree::{Cryptocurrency, Entry, MerkleSumTree, Node, Tree}; +/// JsonNode +/// Represents a entry in the Merkle Sum Tree in JSON format. +/// The balance in the Merkle Sum Tree was presented BigUint format, but in the JSON format, it is presented as a string. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct JsonEntry { pub username: String, pub balances: Vec, } +/// JsonNode +/// Represents a node in the Merkle Sum Tree in JSON format. +/// The balance in the Merkle Sum Tree was presented BigUint format, but in the JSON format, it is presented as a string. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct JsonNode { pub hash: String, pub balances: Vec, } +/// JsonMerkleSumTree +/// Represents the entire Merkle Sum Tree in JSON format. +/// It is used for transmitting tree data between the executor and mini-tree-server. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct JsonMerkleSumTree { pub root: JsonNode, @@ -44,6 +53,10 @@ impl JsonEntry { JsonEntry { username, balances } } + /// Converts an `Entry` to a `JsonEntry`. + /// + /// This method translates an `Entry` into its JSON format. + /// It is used by the Executor to send Entry data to the mini-tree-server in JSON format. pub fn from_entry(entry: &Entry) -> Self { JsonEntry::new( entry.username().to_string(), @@ -55,6 +68,10 @@ impl JsonEntry { ) } + /// Converts a `JsonEntry` back to an `Entry`. + /// + /// This method is utilized by the mini-tree-server when processing data received from the executor in JSON format. + /// It converts `JsonEntry` objects back to the `Entry` struct, facilitating the construction of the Merkle Sum Tree. pub fn to_entry(&self) -> Entry { let mut balances: [BigUint; N_CURRENCIES] = std::array::from_fn(|_| BigUint::from(0u32)); self.balances.iter().enumerate().for_each(|(i, balance)| { @@ -65,6 +82,7 @@ impl JsonEntry { } } +/// Converts a `JsonNode` back to a `Node` for reconstructing the Merkle Sum Tree from JSON data. impl JsonNode { pub fn to_node(&self) -> Node { let hash = parse_fp_from_hex(&self.hash); @@ -81,6 +99,10 @@ impl JsonNode { } impl JsonMerkleSumTree { + /// Converts a MerkleSumTree to its JSON representation. + /// + /// This function is essential for the mini-tree-server to send the Merkle Sum Tree results back to the executor in JSON format, + /// facilitating the translation of the tree structure into a universally readable JSON form. pub fn from_tree( tree: MerkleSumTree, ) -> Self { @@ -110,6 +132,11 @@ impl JsonMerkleSumTree { } } + /// Converts a JsonMerkleSumTree back to a MerkleSumTree. + /// + /// This function is crucial when handling data received in JSON format from the mini-tree-server. + /// It rebuilds the MerkleSumTree on the main machine using the `from_params` method. + /// This method is preferred over `from_entries` as the nodes are pre-computed by the mini-tree-server, thus the tree doesn't need to be recomputed from scratch. pub fn to_mst( &self, ) -> Result, Box> diff --git a/src/mini_tree_generator.rs b/src/mini_tree_generator.rs index 286f280..bfb84df 100644 --- a/src/mini_tree_generator.rs +++ b/src/mini_tree_generator.rs @@ -4,7 +4,7 @@ use const_env::from_env; use crate::json_mst::{JsonEntry, JsonMerkleSumTree}; use summa_backend::merkle_sum_tree::{Cryptocurrency, Entry, MerkleSumTree}; -/// Mini Tree Generator is designed to create Merkle Sum Tree using the Axum web framework. +/// Mini Tree Generator is designed to create Merkle Sum Tree using the Axum web framework. /// It primarily handles HTTP requests to generate tree based on provided JSON entries. /// /// Constants: diff --git a/src/orchestrator/mod.rs b/src/orchestrator/mod.rs index e10daa2..d2e9b05 100644 --- a/src/orchestrator/mod.rs +++ b/src/orchestrator/mod.rs @@ -37,7 +37,17 @@ impl Orchestrator::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16_1.csv".to_string(), - "./src/orchestrator/csv/entry_16_2.csv".to_string(), + "csv/entry_16_1.csv".to_string(), + "csv/entry_16_2.csv".to_string(), ], ); let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(1).await.unwrap(); @@ -26,8 +26,8 @@ async fn test_none_exist_csv() { let orchestrator = Orchestrator::<2, 14>::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16.csv".to_string(), - "./src/orchestrator/csv/no_exist.csv".to_string(), + "csv/entry_16.csv".to_string(), + "csv/no_exist.csv".to_string(), ], ); match orchestrator.create_aggregation_mst(2).await { @@ -48,8 +48,8 @@ async fn test_none_exist_worker() { let orchestrator = Orchestrator::<2, 14>::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16_1.csv".to_string(), - "./src/orchestrator/csv/entry_16_2.csv".to_string(), + "csv/entry_16_1.csv".to_string(), + "csv/entry_16_2.csv".to_string(), ], ); @@ -63,7 +63,7 @@ async fn test_none_exist_worker() { } } -#[cfg(feature = "docker")] +// #[cfg(feature = "docker")] #[tokio::test] async fn test_with_containers() { let spawner = LocalSpawner::new( @@ -74,8 +74,8 @@ async fn test_with_containers() { let orchestrator = Orchestrator::<2, 14>::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16_1.csv".to_string(), - "./src/orchestrator/csv/entry_16_2.csv".to_string(), + "csv/entry_16_1.csv".to_string(), + "csv/entry_16_2.csv".to_string(), ], ); let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(2).await.unwrap(); @@ -96,8 +96,8 @@ async fn test_with_swarm_service() { let orchestrator = Orchestrator::<2, 14>::new( Box::new(spawner), vec![ - "./src/orchestrator/csv/entry_16_1.csv".to_string(), - "./src/orchestrator/csv/entry_16_2.csv".to_string(), + "csv/entry_16_1.csv".to_string(), + "csv/entry_16_2.csv".to_string(), ], ); let aggregation_merkle_sum_tree = orchestrator.create_aggregation_mst(2).await.unwrap();