From 54a9c586bf2bd8ec1002f106edc30dfd0e8ddf53 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 15:09:58 +0200 Subject: [PATCH 01/74] wallet: display `TopoHeight` --- xelis_wallet/src/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 986bcd13..011d45e1 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -97,9 +97,9 @@ async fn run_prompt(prompt: Arc, wallet: Arc, network: Network) }; let closure = || async { let storage = wallet.get_storage().read().await; - let height_str = format!( + let topoheight_str = format!( "{}: {}", - Prompt::colorize_str(Color::Yellow, "Height"), + Prompt::colorize_str(Color::Yellow, "TopoHeight"), Prompt::colorize_string(Color::Green, &format!("{}", storage.get_daemon_topoheight().unwrap_or(0))) ); let balance = format!( @@ -123,7 +123,7 @@ async fn run_prompt(prompt: Arc, wallet: Arc, network: Network) "{} | {} | {} | {} | {} {}{} ", Prompt::colorize_str(Color::Blue, "XELIS Wallet"), addr_str, - height_str, + topoheight_str, balance, status, network_str, From 414f897d3541c7a5637028632b3c9adb1ceb4f4b Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 15:14:11 +0200 Subject: [PATCH 02/74] daemon: ping packet countdown verification --- xelis_daemon/src/p2p/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 9c421b69..1f6eedc1 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -873,11 +873,13 @@ impl P2pServer { trace!("Received a ping packet from {}", peer); let last_ping = peer.get_last_ping(); let current_time = get_current_time(); - peer.set_last_ping(current_time); // verify the respect of the coutdown to prevent massive packet incoming - if last_ping != 0 && current_time - last_ping < P2P_PING_DELAY { + // if he send 2x faster than rules, throw error (because of connection latency / packets being queued) + if last_ping != 0 && current_time - last_ping < P2P_PING_DELAY / 2 { return Err(P2pError::PeerInvalidPingCoutdown) } + // update the last ping only if he respect the protocol rules + peer.set_last_ping(current_time); // we verify the respect of the countdown of peer list updates to prevent any spam if ping.get_peers().len() > 0 { From e3091669fbb4b01a49891fd7562aab14d346c28a Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 15:47:14 +0200 Subject: [PATCH 03/74] update README.md --- README.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f8a26bb1..108140c8 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Others objectives in mind are: ## BlockDAG -XELIS try to implement & use a blockDAG which the rules are the following: +XELIS use a blockDAG with following rules: - A block is considered `Sync Block` when the block height is less than `TOP_HEIGHT - STABLE_LIMIT` and it's the unique block at a specific height (or only ordered block at its height and don't have lower cumulative difficulty than previous blocks). - A block is considered `Side Block` when block height is less than or equal to height of past 8 topographical blocks. - A block is considered `Orphaned` when the block is not ordered in DAG (no topological height for it). @@ -62,6 +62,15 @@ XELIS try to implement & use a blockDAG which the rules are the following: - Supply is re-calculated each time the block is re-ordered because its based on topo order. - Transactions and miner rewards are re-computed when a new block is added and the block there linked to is not yet in stable topo height. +Topoheight represents how many unique blocks there is in the blockchain, and its ordered by DAG. + +## Homomorphic Encryption + +Homomorphic Encryption (HE) will allow to add privacy on transactions and accounts by doing computation while staying in encrypted form. +Each balances, transaction assets values are in encrypted form and nobody can determine the real value of it except involved parties. + +**NOTE**: This part is not yet deployed and is under heavy work. + ## Mining Mining capabilities of XELIS are a bit differents from others chains because of standards being not implemented. From ce0b50472ce5ae0a5e1d3ca76322d8a10ee443f9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 15:56:09 +0200 Subject: [PATCH 04/74] wallet: `FeeBuilder` allows to set direct fee value or a multiplier --- xelis_wallet/src/transaction_builder.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index c46fc1d6..859b17ba 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -9,20 +9,25 @@ use xelis_common::{ use crate::wallet::WalletError; +pub enum FeeBuilder { + Multiplier(f64), // calculate tx fees based on its size and multiply by this value + Value(u64) // set a direct value of how much fees you want to pay +} + pub struct TransactionBuilder { owner: PublicKey, data: TransactionType, nonce: u64, - fee_multiplier: f64, + fee_builder: FeeBuilder, } impl TransactionBuilder { - pub fn new(owner: PublicKey, data: TransactionType, nonce: u64, fee_multiplier: f64) -> Self { + pub fn new(owner: PublicKey, data: TransactionType, nonce: u64, fee_builder: FeeBuilder) -> Self { Self { owner, data, nonce, - fee_multiplier + fee_builder } } @@ -34,10 +39,14 @@ impl TransactionBuilder { } fn estimate_fees_internal(&self, writer: &Writer) -> u64 { - // 8 represent the field 'fee' in bytes size - let total_bytes = SIGNATURE_LENGTH + 8 + writer.total_write(); - let fee = (calculate_tx_fee(total_bytes) as f64 * self.fee_multiplier) as u64; - fee + match &self.fee_builder { + FeeBuilder::Multiplier(multiplier) => { + // 8 represent the field 'fee' in bytes size + let total_bytes = SIGNATURE_LENGTH + 8 + writer.total_write(); + (calculate_tx_fee(total_bytes) as f64 * multiplier) as u64 + }, + FeeBuilder::Value(value) => *value + } } pub fn total_spent(&self) -> HashMap<&Hash, u64> { From b47cf8ef9ffc6dc52ae68bb710b54574d70f7211 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 16:29:07 +0200 Subject: [PATCH 05/74] daemon: clean up --- xelis_daemon/src/core/blockchain.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 2022fea4..c7b1a7cd 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -884,7 +884,7 @@ impl Blockchain { } if tips_count > 1 { - let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(&*storage, block.get_tips()).await?; + let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, block.get_tips()).await?; debug!("Best tip selected for this new block is {}", best_tip); for hash in block.get_tips() { if best_tip != hash { @@ -899,7 +899,7 @@ impl Blockchain { // verify PoW and get difficulty for this block based on tips let pow_hash = block.get_pow_hash(); debug!("POW hash: {}", pow_hash); - let difficulty = self.verify_proof_of_work(&*storage, &pow_hash, block.get_tips()).await?; + let difficulty = self.verify_proof_of_work(storage, &pow_hash, block.get_tips()).await?; debug!("PoW is valid for difficulty {}", difficulty); let mut total_tx_size: usize = 0; @@ -1215,7 +1215,7 @@ impl Blockchain { tips = HashSet::new(); debug!("find best tip by cumulative difficulty"); - let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(&*storage, &new_tips).await?.clone(); + let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &new_tips).await?.clone(); for hash in new_tips { if best_tip != hash { if !self.validate_tips(&storage, &best_tip, &hash).await? { @@ -1264,7 +1264,7 @@ impl Blockchain { for hash in tips { tips_vec.push(hash); } - let difficulty = self.get_difficulty_at_tips(&*storage, &tips_vec).await?; + let difficulty = self.get_difficulty_at_tips(storage, &tips_vec).await?; self.difficulty.store(difficulty, Ordering::SeqCst); } From 6ceee37409aa7cf974eb6588cca4ac86f9db4a20 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 18 Apr 2023 20:06:25 +0200 Subject: [PATCH 06/74] wallet: use `FeeBuilder` to build transaction --- xelis_wallet/src/wallet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 5c723b77..3f887c73 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -17,7 +17,7 @@ use crate::mnemonics; use crate::network_handler::{NetworkHandler, SharedNetworkHandler}; use crate::rpc::WalletRpcServer; use crate::storage::{EncryptedStorage, Storage}; -use crate::transaction_builder::TransactionBuilder; +use crate::transaction_builder::{TransactionBuilder, FeeBuilder}; use chacha20poly1305::{aead::OsRng, Error as CryptoError}; use rand::RngCore; use thiserror::Error; @@ -269,7 +269,7 @@ impl Wallet { // also check that we have enough funds for the transaction pub fn create_transaction(&self, storage: &EncryptedStorage, transaction_type: TransactionType) -> Result { let nonce = storage.get_nonce().unwrap_or(0); - let builder = TransactionBuilder::new(self.keypair.get_public_key().clone(), transaction_type, nonce, 1f64); + let builder = TransactionBuilder::new(self.keypair.get_public_key().clone(), transaction_type, nonce, FeeBuilder::Multiplier(1f64)); let assets_spent: HashMap<&Hash, u64> = builder.total_spent(); // check that we have enough balance for every assets spent From fdc46a4e302021cc46b55daebf351106e9477a55 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 19 Apr 2023 11:26:55 +0200 Subject: [PATCH 07/74] wallet: build_transaction API --- xelis_common/src/api/daemon.rs | 7 +---- xelis_common/src/api/mod.rs | 13 +++++++-- xelis_common/src/api/wallet.rs | 25 +++++++++++++++- xelis_common/src/rpc_server/mod.rs | 1 + xelis_common/src/rpc_server/rpc_handler.rs | 5 ++++ xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/rpc/rpc.rs | 12 ++------ xelis_wallet/src/main.rs | 5 ++-- xelis_wallet/src/rpc/rpc.rs | 33 ++++++++++++++++++++-- xelis_wallet/src/transaction_builder.rs | 7 +---- xelis_wallet/src/wallet.rs | 7 +++-- 11 files changed, 83 insertions(+), 34 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 9b68d68b..725c935f 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -4,12 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{crypto::{hash::Hash, address::Address}, account::VersionedBalance, network::Network}; -#[derive(Serialize, Deserialize)] -pub struct DataHash<'a, T: Clone> { - pub hash: Cow<'a, Hash>, - #[serde(flatten)] - pub data: Cow<'a, T> -} +use super::DataHash; #[derive(Serialize, Deserialize)] pub enum BlockType { diff --git a/xelis_common/src/api/mod.rs b/xelis_common/src/api/mod.rs index 5122c70b..edf73199 100644 --- a/xelis_common/src/api/mod.rs +++ b/xelis_common/src/api/mod.rs @@ -1,7 +1,7 @@ -use std::{collections::HashMap, hash::Hash, borrow::Cow}; +use std::{collections::HashMap, borrow::Cow}; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::serializer::{Serializer, Reader, ReaderError, Writer}; +use crate::{serializer::{Serializer, Reader, ReaderError, Writer}, crypto::hash::Hash}; pub mod wallet; pub mod daemon; @@ -140,4 +140,11 @@ pub struct EventResult<'a, E: Clone> { pub event: Cow<'a, E>, #[serde(flatten)] pub value: Value -} \ No newline at end of file +} + +#[derive(Serialize, Deserialize)] +pub struct DataHash<'a, T: Clone> { + pub hash: Cow<'a, Hash>, + #[serde(flatten)] + pub data: Cow<'a, T> +} diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 0ffdd02f..d7281342 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -1 +1,24 @@ -// TODO \ No newline at end of file +use serde::{Deserialize, Serialize}; + +use crate::transaction::{TransactionType, Transaction}; + +use super::DataHash; + + +#[derive(Serialize, Deserialize)] +pub enum FeeBuilder { + Multiplier(f64), // calculate tx fees based on its size and multiply by this value + Value(u64) // set a direct value of how much fees you want to pay +} + +#[derive(Serialize, Deserialize)] +pub struct BuildTransactionParams { + pub tx_type: TransactionType, + pub fee: Option, + pub broadcast: bool +} + +#[derive(Serialize, Deserialize)] +pub struct TransactionResponse<'a> { + pub tx: DataHash<'a, Transaction>, +} \ No newline at end of file diff --git a/xelis_common/src/rpc_server/mod.rs b/xelis_common/src/rpc_server/mod.rs index b8a114ac..1113651d 100644 --- a/xelis_common/src/rpc_server/mod.rs +++ b/xelis_common/src/rpc_server/mod.rs @@ -6,6 +6,7 @@ use std::borrow::Cow; pub use error::{RpcResponseError, InternalRpcError}; pub use rpc_handler::{RPCHandler, Handler}; +pub use rpc_handler::parse_params; use actix_web::{HttpResponse, web::{self, Data, Payload}, Responder, HttpRequest}; use serde::{Deserialize, Serialize}; diff --git a/xelis_common/src/rpc_server/rpc_handler.rs b/xelis_common/src/rpc_server/rpc_handler.rs index 76fd9f7b..118d96f1 100644 --- a/xelis_common/src/rpc_server/rpc_handler.rs +++ b/xelis_common/src/rpc_server/rpc_handler.rs @@ -1,4 +1,5 @@ use std::{collections::HashMap, pin::Pin, future::Future}; +use serde::de::DeserializeOwned; use serde_json::{Value, json}; use super::{InternalRpcError, RpcResponseError, RpcRequest, JSON_RPC_VERSION}; use log::{error, trace}; @@ -59,4 +60,8 @@ where pub fn get_data(&self) -> &T { &self.data } +} + +pub fn parse_params(value: Value) -> Result { + serde_json::from_value(value).map_err(|e| InternalRpcError::InvalidParams(e)) } \ No newline at end of file diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index c7b1a7cd..35aa3edd 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -8,7 +8,7 @@ use xelis_common::{ globals::get_current_timestamp, block::{Block, BlockHeader, EXTRA_NONCE_SIZE}, immutable::Immutable, - serializer::Serializer, account::VersionedBalance, api::daemon::{NotifyEvent, DataHash, BlockOrderedEvent, TransactionExecutedEvent, BlockType}, network::Network + serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType}, DataHash}, network::Network }; use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; use super::storage::{Storage, DifficultyProvider}; diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index ee333f96..3752c166 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -1,13 +1,11 @@ use crate::core::{blockchain::Blockchain, storage::Storage, error::BlockchainError}; use super::{InternalRpcError, ApiError}; use anyhow::Context; -use serde::de::DeserializeOwned; use serde_json::{json, Value}; use xelis_common::{ - api::daemon::{ + api::{daemon::{ BlockType, BlockResponse, - DataHash, GetBlockAtTopoHeightParams, GetBlockByHashParams, GetBlockTemplateParams, @@ -20,20 +18,16 @@ use xelis_common::{ P2pStatusResult, GetBlocksAtHeightParams, GetRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse - }, + }, DataHash}, async_handler, serializer::Serializer, transaction::Transaction, crypto::hash::Hash, - block::{BlockHeader, Block}, config::{BLOCK_TIME_MILLIS, VERSION}, immutable::Immutable, rpc_server::RPCHandler, + block::{BlockHeader, Block}, config::{BLOCK_TIME_MILLIS, VERSION}, immutable::Immutable, rpc_server::{RPCHandler, parse_params}, }; use std::{sync::Arc, borrow::Cow}; use log::{info, debug}; -fn parse_params(value: Value) -> Result { - serde_json::from_value(value).map_err(|e| InternalRpcError::InvalidParams(e)) -} - pub async fn get_block_type_for_block(blockchain: &Blockchain, storage: &S, hash: &Hash) -> Result { Ok(if blockchain.is_block_orphaned_for_storage(storage, hash).await { BlockType::Orphaned diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 011d45e1..0a7658c6 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -8,7 +8,7 @@ use clap::Parser; use xelis_common::{config::{ DEFAULT_DAEMON_ADDRESS, VERSION, XELIS_ASSET -}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::TransactionType, globals::{format_coin, set_network_to}, serializer::Serializer, network::Network}; +}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::TransactionType, globals::{format_coin, set_network_to}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; use xelis_wallet::wallet::Wallet; #[derive(Parser)] @@ -169,8 +169,9 @@ async fn transfer(manager: &CommandManager>, mut arguments: Argument let tx = { let storage = wallet.get_storage().read().await; let transfer = wallet.create_transfer(&storage, asset, key, extra_data, amount)?; - wallet.create_transaction(&storage, TransactionType::Transfer(vec![transfer]))? + wallet.create_transaction(&storage, TransactionType::Transfer(vec![transfer]), FeeBuilder::Multiplier(1f64))? }; + let tx_hash = tx.hash(); manager.message(format!("Transaction hash: {}", tx_hash)); diff --git a/xelis_wallet/src/rpc/rpc.rs b/xelis_wallet/src/rpc/rpc.rs index a90e9961..fc4fbc00 100644 --- a/xelis_wallet/src/rpc/rpc.rs +++ b/xelis_wallet/src/rpc/rpc.rs @@ -1,13 +1,15 @@ -use std::sync::Arc; +use std::{sync::Arc, borrow::Cow}; +use anyhow::Context; use log::info; -use xelis_common::{rpc_server::{RPCHandler, InternalRpcError}, config::VERSION, async_handler}; +use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::VERSION, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse}, DataHash}, crypto::hash::Hashable}; use serde_json::{Value, json}; -use crate::wallet::Wallet; +use crate::wallet::{Wallet, WalletError}; pub fn register_methods(handler: &mut RPCHandler>) { info!("Registering RPC methods..."); handler.register_method("version", async_handler!(version)); + handler.register_method("build_transaction", async_handler!(build_transaction)); } async fn version(_: Arc, body: Value) -> Result { @@ -15,4 +17,29 @@ async fn version(_: Arc, body: Value) -> Result return Err(InternalRpcError::UnexpectedParams) } Ok(json!(VERSION)) +} + +async fn build_transaction(wallet: Arc, body: Value) -> Result { + let params: BuildTransactionParams = parse_params(body)?; + // request ask to broadcast the TX but wallet is not connected to any daemon + if !wallet.is_online().await && params.broadcast { + return Err(WalletError::NotOnlineMode).context("Cannot broadcast TX")? + } + + // create the TX + let storage = wallet.get_storage().read().await; + let tx = wallet.create_transaction(&storage, params.tx_type, params.fee.unwrap_or(FeeBuilder::Multiplier(1f64)))?; + + // if requested, broadcast the TX ourself + if params.broadcast { + wallet.submit_transaction(&tx).await.context("Couldn't broadcast transaction")?; + } + + // returns the created TX and its hash + Ok(json!(TransactionResponse { + tx: DataHash { + hash: Cow::Owned(tx.hash()), + data: Cow::Owned(tx) + } + })) } \ No newline at end of file diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 859b17ba..0218d69c 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -4,16 +4,11 @@ use xelis_common::{ transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, globals::calculate_tx_fee, serializer::{Writer, Serializer}, - crypto::{key::{SIGNATURE_LENGTH, PublicKey, KeyPair}, hash::Hash} + crypto::{key::{SIGNATURE_LENGTH, PublicKey, KeyPair}, hash::Hash}, api::wallet::FeeBuilder }; use crate::wallet::WalletError; -pub enum FeeBuilder { - Multiplier(f64), // calculate tx fees based on its size and multiply by this value - Value(u64) // set a direct value of how much fees you want to pay -} - pub struct TransactionBuilder { owner: PublicKey, data: TransactionType, diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 3f887c73..3f2e4108 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use anyhow::{Error, Context}; use tokio::sync::{Mutex, RwLock}; use xelis_common::api::DataType; +use xelis_common::api::wallet::FeeBuilder; use xelis_common::config::XELIS_ASSET; use xelis_common::crypto::address::Address; use xelis_common::crypto::hash::Hash; @@ -17,7 +18,7 @@ use crate::mnemonics; use crate::network_handler::{NetworkHandler, SharedNetworkHandler}; use crate::rpc::WalletRpcServer; use crate::storage::{EncryptedStorage, Storage}; -use crate::transaction_builder::{TransactionBuilder, FeeBuilder}; +use crate::transaction_builder::TransactionBuilder; use chacha20poly1305::{aead::OsRng, Error as CryptoError}; use rand::RngCore; use thiserror::Error; @@ -267,9 +268,9 @@ impl Wallet { // create the final transaction with calculated fees and signature // also check that we have enough funds for the transaction - pub fn create_transaction(&self, storage: &EncryptedStorage, transaction_type: TransactionType) -> Result { + pub fn create_transaction(&self, storage: &EncryptedStorage, transaction_type: TransactionType, fee: FeeBuilder) -> Result { let nonce = storage.get_nonce().unwrap_or(0); - let builder = TransactionBuilder::new(self.keypair.get_public_key().clone(), transaction_type, nonce, FeeBuilder::Multiplier(1f64)); + let builder = TransactionBuilder::new(self.keypair.get_public_key().clone(), transaction_type, nonce, fee); let assets_spent: HashMap<&Hash, u64> = builder.total_spent(); // check that we have enough balance for every assets spent From 5cae9635261d89629fb777cecf1209235f34e809 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 19 Apr 2023 12:16:12 +0200 Subject: [PATCH 08/74] wallet: feature rpc_server --- xelis_wallet/Cargo.toml | 7 +++++-- xelis_wallet/src/lib.rs | 1 + xelis_wallet/src/wallet.rs | 6 +++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/xelis_wallet/Cargo.toml b/xelis_wallet/Cargo.toml index 95aa3c4e..976453e4 100644 --- a/xelis_wallet/Cargo.toml +++ b/xelis_wallet/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Slixe "] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -xelis_common = { path = "../xelis_common", features = ["json_rpc", "prompt", "clap", "rpc_server"] } +xelis_common = { path = "../xelis_common", features = ["json_rpc", "prompt", "clap"] } chacha20poly1305 = "0.10.1" sled = "0.34.7" clap = { version = "3.1.18", features = ["derive"] } @@ -25,4 +25,7 @@ tokio = { version = "1", features = ["rt-multi-thread"]} anyhow = "1" fern = { version = "0.6", features = ["colored"] } serde = { version = "1", features = ["derive", "rc"] } -serde_json = "1" \ No newline at end of file +serde_json = "1" + +[features] +rpc_server = ["xelis_common/rpc_server"] \ No newline at end of file diff --git a/xelis_wallet/src/lib.rs b/xelis_wallet/src/lib.rs index c2a5ed2d..e05f05c3 100644 --- a/xelis_wallet/src/lib.rs +++ b/xelis_wallet/src/lib.rs @@ -7,4 +7,5 @@ pub mod api; pub mod network_handler; pub mod entry; pub mod mnemonics; +#[cfg(feature = "rpc_server")] pub mod rpc; \ No newline at end of file diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 3f2e4108..8c860925 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -16,7 +16,6 @@ use crate::cipher::Cipher; use crate::config::{PASSWORD_ALGORITHM, PASSWORD_HASH_SIZE, SALT_SIZE}; use crate::mnemonics; use crate::network_handler::{NetworkHandler, SharedNetworkHandler}; -use crate::rpc::WalletRpcServer; use crate::storage::{EncryptedStorage, Storage}; use crate::transaction_builder::TransactionBuilder; use chacha20poly1305::{aead::OsRng, Error as CryptoError}; @@ -24,6 +23,9 @@ use rand::RngCore; use thiserror::Error; use log::{error, debug}; +#[cfg(feature = "rpc_server")] +use crate::rpc::WalletRpcServer; + #[derive(Error, Debug)] pub enum WalletError { #[error("Invalid key pair")] @@ -78,6 +80,7 @@ pub struct Wallet { // network on which we are connected network: Network, // RPC Server + #[cfg(feature = "rpc_server")] rpc_server: Option> } @@ -94,6 +97,7 @@ impl Wallet { keypair, network_handler: Mutex::new(None), network, + #[cfg(feature = "rpc_server")] rpc_server: None }; From 5bcf69c43c6aed7fa5d0dc74b18e83d0f761d3a1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 19 Apr 2023 13:19:37 +0200 Subject: [PATCH 09/74] wallet: enable RPC Server --- xelis_wallet/Cargo.toml | 1 + xelis_wallet/src/main.rs | 13 ++++++++++++- xelis_wallet/src/rpc/mod.rs | 12 ++++++++++++ xelis_wallet/src/wallet.rs | 26 +++++++++++++++++++++----- 4 files changed, 46 insertions(+), 6 deletions(-) diff --git a/xelis_wallet/Cargo.toml b/xelis_wallet/Cargo.toml index 976453e4..a43e1665 100644 --- a/xelis_wallet/Cargo.toml +++ b/xelis_wallet/Cargo.toml @@ -28,4 +28,5 @@ serde = { version = "1", features = ["derive", "rc"] } serde_json = "1" [features] +default = ["rpc_server"] rpc_server = ["xelis_common/rpc_server"] \ No newline at end of file diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 0a7658c6..e0a9ccf3 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -40,7 +40,10 @@ pub struct Config { seed: Option, /// Network selected for chain #[clap(long, arg_enum, default_value_t = Network::Mainnet)] - network: Network + network: Network, + #[cfg(feature = "rpc_server")] + #[clap(long)] + rpc_bind_address: Option } #[tokio::main] @@ -69,6 +72,14 @@ async fn main() -> Result<()> { } } + #[cfg(feature = "rpc_server")] + if let Some(address) = config.rpc_bind_address { + info!("Enabling RPC Server on {}", address); + if let Err(e) = wallet.enable_rpc_server(address).await { + error!("Error while enabling RPC Server: {}", e); + } + } + if let Err(e) = run_prompt(prompt, wallet, config.network).await { error!("Error while running prompt: {}", e); } diff --git a/xelis_wallet/src/rpc/mod.rs b/xelis_wallet/src/rpc/mod.rs index 0c93f68a..f5094f01 100644 --- a/xelis_wallet/src/rpc/mod.rs +++ b/xelis_wallet/src/rpc/mod.rs @@ -3,6 +3,7 @@ mod rpc; use std::sync::Arc; use anyhow::Result; +use log::{info, warn}; use tokio::sync::Mutex; use xelis_common::{config, rpc_server::{RPCHandler, RPCServerHandler, json_rpc}}; use actix_web::{get, HttpResponse, Responder, HttpServer, web::{Data, self}, App, dev::ServerHandle}; @@ -47,6 +48,17 @@ impl WalletRpcServer { Ok(server) } + + pub async fn stop(&self) { + info!("Stopping RPC Server..."); + let mut handle = self.handle.lock().await; + if let Some(handle) = handle.take() { + handle.stop(false).await; + info!("RPC Server is now stopped!"); + } else { + warn!("RPC Server is not running!"); + } + } } impl RPCServerHandler> for WalletRpcServer { diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 8c860925..77fe625f 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -67,7 +67,9 @@ pub enum WalletError { #[error("Topoheight is too high to rescan")] RescanTopoheightTooHigh, #[error(transparent)] - Any(#[from] Error) + Any(#[from] Error), + #[error("RPC Server is not running")] + RPCServerNotRunning } pub struct Wallet { @@ -81,7 +83,7 @@ pub struct Wallet { network: Network, // RPC Server #[cfg(feature = "rpc_server")] - rpc_server: Option> + rpc_server: Mutex>> } pub fn hash_password(password: String, salt: &[u8]) -> Result<[u8; PASSWORD_HASH_SIZE], WalletError> { @@ -98,11 +100,10 @@ impl Wallet { network_handler: Mutex::new(None), network, #[cfg(feature = "rpc_server")] - rpc_server: None + rpc_server: Mutex::new(None) }; - let zelf = Arc::new(zelf); - zelf + Arc::new(zelf) } pub fn create(name: String, password: String, seed: Option, network: Network) -> Result, Error> { @@ -193,6 +194,21 @@ impl Wallet { Ok(Self::new(storage, keypair, network)) } + #[cfg(feature = "rpc_server")] + pub async fn enable_rpc_server(self: &Arc, bind_address: String) -> Result<(), Error> { + let rpc_server = WalletRpcServer::new(bind_address, Arc::clone(self)).await?; + *self.rpc_server.lock().await = Some(rpc_server); + Ok(()) + } + + #[cfg(feature = "rpc_server")] + pub async fn stop_rpc_server(&self) -> Result<(), Error> { + let mut lock = self.rpc_server.lock().await; + let rpc_server = lock.take().ok_or(WalletError::RPCServerNotRunning)?; + rpc_server.stop().await; + Ok(()) + } + pub async fn set_password(&self, old_password: String, password: String) -> Result<(), Error> { let mut encrypted_storage = self.storage.write().await; let storage = encrypted_storage.get_mutable_public_storage(); From 80f889331aa088205bc559084811f679ef839efc Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 19 Apr 2023 18:56:36 +0200 Subject: [PATCH 10/74] wallet: RPC Server authentication --- Cargo.lock | 16 +++++++++++++ xelis_wallet/Cargo.toml | 1 + xelis_wallet/src/main.rs | 44 ++++++++++++++++++++++++++++++------ xelis_wallet/src/rpc/mod.rs | 45 ++++++++++++++++++++++++++++++++----- xelis_wallet/src/wallet.rs | 16 ++++++++----- 5 files changed, 105 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e57f6e6..882233de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -226,6 +226,21 @@ dependencies = [ "syn 1.0.108", ] +[[package]] +name = "actix-web-httpauth" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dda62cf04bc3a9ad2ea8f314f721951cfdb4cdacec4e984d20e77c7bb170991" +dependencies = [ + "actix-utils", + "actix-web", + "base64 0.13.1", + "futures-core", + "futures-util", + "log", + "pin-project-lite", +] + [[package]] name = "actix-ws" version = "0.2.5" @@ -2546,6 +2561,7 @@ version = "1.3.0" dependencies = [ "actix", "actix-web", + "actix-web-httpauth", "anyhow", "argon2", "chacha20poly1305", diff --git a/xelis_wallet/Cargo.toml b/xelis_wallet/Cargo.toml index a43e1665..621a981a 100644 --- a/xelis_wallet/Cargo.toml +++ b/xelis_wallet/Cargo.toml @@ -26,6 +26,7 @@ anyhow = "1" fern = { version = "0.6", features = ["colored"] } serde = { version = "1", features = ["derive", "rc"] } serde_json = "1" +actix-web-httpauth = "0.8.0" [features] default = ["rpc_server"] diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index e0a9ccf3..4e3afb36 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -1,7 +1,7 @@ use std::{sync::Arc, time::Duration, path::Path}; use anyhow::{Result, Context}; -use xelis_wallet::config::DIR_PATH; +use xelis_wallet::{config::DIR_PATH}; use fern::colors::Color; use log::{error, info}; use clap::Parser; @@ -11,6 +11,23 @@ use xelis_common::{config::{ }, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::TransactionType, globals::{format_coin, set_network_to}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; use xelis_wallet::wallet::Wallet; +#[cfg(feature = "rpc_server")] +use xelis_wallet::rpc::AuthConfig; + +#[cfg(feature = "rpc_server")] +#[derive(Debug, clap::StructOpt)] +pub struct RPCConfig { + /// RPC Server bind address + #[clap(long)] + rpc_bind_address: Option, + /// username for RPC authentication + #[clap(long)] + rpc_username: Option, + /// password for RPC authentication + #[clap(long)] + rpc_password: Option +} + #[derive(Parser)] #[clap(version = VERSION, about = "XELIS Wallet")] pub struct Config { @@ -42,8 +59,8 @@ pub struct Config { #[clap(long, arg_enum, default_value_t = Network::Mainnet)] network: Network, #[cfg(feature = "rpc_server")] - #[clap(long)] - rpc_bind_address: Option + #[structopt(flatten)] + rpc: RPCConfig } #[tokio::main] @@ -73,10 +90,23 @@ async fn main() -> Result<()> { } #[cfg(feature = "rpc_server")] - if let Some(address) = config.rpc_bind_address { - info!("Enabling RPC Server on {}", address); - if let Err(e) = wallet.enable_rpc_server(address).await { - error!("Error while enabling RPC Server: {}", e); + if let Some(address) = config.rpc.rpc_bind_address { + if config.rpc.rpc_password.is_some() != config.rpc.rpc_username.is_some() { + error!("Invalid parameters configuration: usernamd AND password must be provided"); + } else { + let auth_config = if let (Some(username), Some(password)) = (config.rpc.rpc_username, config.rpc.rpc_password) { + Some(AuthConfig { + username, + password + }) + } else { + None + }; + + info!("Enabling RPC Server on {} {}", address, if auth_config.is_some() { "with authentication" } else { "without authentication" }); + if let Err(e) = wallet.enable_rpc_server(address, auth_config).await { + error!("Error while enabling RPC Server: {}", e); + } } } diff --git a/xelis_wallet/src/rpc/mod.rs b/xelis_wallet/src/rpc/mod.rs index f5094f01..78d4c1e8 100644 --- a/xelis_wallet/src/rpc/mod.rs +++ b/xelis_wallet/src/rpc/mod.rs @@ -2,34 +2,45 @@ mod rpc; use std::sync::Arc; +use actix_web_httpauth::{middleware::HttpAuthentication, extractors::basic::BasicAuth}; use anyhow::Result; use log::{info, warn}; use tokio::sync::Mutex; use xelis_common::{config, rpc_server::{RPCHandler, RPCServerHandler, json_rpc}}; -use actix_web::{get, HttpResponse, Responder, HttpServer, web::{Data, self}, App, dev::ServerHandle}; -use crate::wallet::Wallet; +use actix_web::{get, HttpResponse, Responder, HttpServer, web::{Data, self}, App, dev::{ServerHandle, ServiceRequest}, Error, error::{ErrorUnauthorized, ErrorBadGateway, ErrorBadRequest}}; +use crate::wallet::{Wallet}; + +pub struct AuthConfig { + pub username: String, + pub password: String +} pub struct WalletRpcServer { handle: Mutex>, rpc_handler: Arc>>, + auth_config: Option } impl WalletRpcServer { - pub async fn new(bind_address: String, wallet: Arc) -> Result> { + pub async fn new(bind_address: String, wallet: Arc, auth_config: Option) -> Result> { let mut rpc_handler = RPCHandler::new(wallet); rpc::register_methods(&mut rpc_handler); let rpc_handler = Arc::new(rpc_handler); let server = Arc::new(Self { handle: Mutex::new(None), - rpc_handler + rpc_handler, + auth_config }); { let clone = Arc::clone(&server); let http_server = HttpServer::new(move || { let server = Arc::clone(&clone); - App::new().app_data(Data::from(server)) + let auth = HttpAuthentication::basic(auth); + App::new() + .app_data(Data::from(server)) + .wrap(auth) .route("/json_rpc", web::post().to(json_rpc::, WalletRpcServer>)) .service(index) }) @@ -49,6 +60,19 @@ impl WalletRpcServer { Ok(server) } + async fn authenticate(&self, credentials: BasicAuth) -> Result<(), Error> { + if let Some(config) = &self.auth_config { + let user = credentials.user_id(); + let password = credentials.password().ok_or(ErrorBadRequest("Missing password"))?; + + if *config.username != *user || *config.password != *password { + return Err(ErrorUnauthorized("Username/password are invalid")) + } + } + + Ok(()) + } + pub async fn stop(&self) { info!("Stopping RPC Server..."); let mut handle = self.handle.lock().await; @@ -67,6 +91,17 @@ impl RPCServerHandler> for WalletRpcServer { } } +async fn auth(request: ServiceRequest, credentials: BasicAuth) -> Result { + let data: Option<&Arc> = request.app_data(); + match data { + Some(server) => match server.authenticate(credentials).await { + Ok(_) => Ok(request), + Err(e) => Err((e, request)) + }, + None => Err((ErrorBadGateway("RPC Server was not found"), request)) + } +} + #[get("/")] async fn index() -> impl Responder { HttpResponse::Ok().body(format!("Hello, world!\nRunning on: {}", config::VERSION)) diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 77fe625f..45d39c8d 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -24,7 +24,7 @@ use thiserror::Error; use log::{error, debug}; #[cfg(feature = "rpc_server")] -use crate::rpc::WalletRpcServer; +use crate::rpc::{AuthConfig, WalletRpcServer}; #[derive(Error, Debug)] pub enum WalletError { @@ -69,7 +69,9 @@ pub enum WalletError { #[error(transparent)] Any(#[from] Error), #[error("RPC Server is not running")] - RPCServerNotRunning + RPCServerNotRunning, + #[error("RPC Server is already running")] + RPCServerAlreadyRunning } pub struct Wallet { @@ -195,9 +197,13 @@ impl Wallet { } #[cfg(feature = "rpc_server")] - pub async fn enable_rpc_server(self: &Arc, bind_address: String) -> Result<(), Error> { - let rpc_server = WalletRpcServer::new(bind_address, Arc::clone(self)).await?; - *self.rpc_server.lock().await = Some(rpc_server); + pub async fn enable_rpc_server(self: &Arc, bind_address: String, config: Option) -> Result<(), Error> { + let mut lock = self.rpc_server.lock().await; + if lock.is_some() { + return Err(WalletError::RPCServerAlreadyRunning.into()) + } + let rpc_server = WalletRpcServer::new(bind_address, Arc::clone(self), config).await?; + *lock = Some(rpc_server); Ok(()) } From 4a5043f0f233fa3c0f4387c0416ac3b852b36de7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 20 Apr 2023 11:02:04 +0200 Subject: [PATCH 11/74] wallet: add `list_transactions` rpc method --- xelis_common/src/api/wallet.rs | 23 ++++++++++++++++- xelis_wallet/src/entry.rs | 9 +++++++ xelis_wallet/src/rpc/mod.rs | 2 +- xelis_wallet/src/rpc/rpc.rs | 45 ++++++++++++++++++++++++++++++++-- 4 files changed, 75 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index d7281342..93c381fd 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use crate::transaction::{TransactionType, Transaction}; +use crate::{transaction::{TransactionType, Transaction}, crypto::key::PublicKey}; use super::DataHash; @@ -18,6 +18,27 @@ pub struct BuildTransactionParams { pub broadcast: bool } +// :( +fn default_filter_value() -> bool { + true +} + +#[derive(Serialize, Deserialize)] +pub struct ListTransactionsParams { + pub min_topoheight: Option, + pub max_topoheight: Option, + /// Receiver address for outgoing txs, and owner/sender for incoming + pub address: Option, + #[serde(default = "default_filter_value")] + pub accept_incoming: bool, + #[serde(default = "default_filter_value")] + pub accept_outgoing: bool, + #[serde(default = "default_filter_value")] + pub accept_coinbase: bool, + #[serde(default = "default_filter_value")] + pub accept_burn: bool, +} + #[derive(Serialize, Deserialize)] pub struct TransactionResponse<'a> { pub tx: DataHash<'a, Transaction>, diff --git a/xelis_wallet/src/entry.rs b/xelis_wallet/src/entry.rs index e76c3f3f..db6088a8 100644 --- a/xelis_wallet/src/entry.rs +++ b/xelis_wallet/src/entry.rs @@ -1,7 +1,9 @@ use std::fmt::{self, Display, Formatter}; +use serde::Serialize; use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, globals::format_coin}; +#[derive(Serialize, Clone)] pub struct Transfer { key: PublicKey, asset: Hash, @@ -76,13 +78,18 @@ impl Serializer for Transfer { } // TODO support SC call / SC Deploy +#[derive(Serialize, Clone)] pub enum EntryData { + #[serde(rename = "coinbase")] Coinbase(u64), // Coinbase is only XELIS_ASSET + #[serde(rename = "burn")] Burn { asset: Hash, amount: u64 }, + #[serde(rename = "incoming")] Incoming(PublicKey, Vec), + #[serde(rename = "outgoing")] Outgoing(Vec) } @@ -148,11 +155,13 @@ impl Serializer for EntryData { } } +#[derive(Serialize, Clone)] pub struct TransactionEntry { hash: Hash, topoheight: u64, fee: Option, nonce: Option, + #[serde(flatten)] entry: EntryData } diff --git a/xelis_wallet/src/rpc/mod.rs b/xelis_wallet/src/rpc/mod.rs index 78d4c1e8..b4bc741b 100644 --- a/xelis_wallet/src/rpc/mod.rs +++ b/xelis_wallet/src/rpc/mod.rs @@ -92,7 +92,7 @@ impl RPCServerHandler> for WalletRpcServer { } async fn auth(request: ServiceRequest, credentials: BasicAuth) -> Result { - let data: Option<&Arc> = request.app_data(); + let data: Option<&Data> = request.app_data(); match data { Some(server) => match server.authenticate(credentials).await { Ok(_) => Ok(request), diff --git a/xelis_wallet/src/rpc/rpc.rs b/xelis_wallet/src/rpc/rpc.rs index fc4fbc00..a6fdf80a 100644 --- a/xelis_wallet/src/rpc/rpc.rs +++ b/xelis_wallet/src/rpc/rpc.rs @@ -2,14 +2,15 @@ use std::{sync::Arc, borrow::Cow}; use anyhow::Context; use log::info; -use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::VERSION, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse}, DataHash}, crypto::hash::Hashable}; +use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::VERSION, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams}, DataHash}, crypto::hash::Hashable}; use serde_json::{Value, json}; -use crate::wallet::{Wallet, WalletError}; +use crate::{wallet::{Wallet, WalletError}, entry::{EntryData, TransactionEntry}}; pub fn register_methods(handler: &mut RPCHandler>) { info!("Registering RPC methods..."); handler.register_method("version", async_handler!(version)); handler.register_method("build_transaction", async_handler!(build_transaction)); + handler.register_method("list_transactions", async_handler!(list_transactions)); } async fn version(_: Arc, body: Value) -> Result { @@ -42,4 +43,44 @@ async fn build_transaction(wallet: Arc, body: Value) -> Result, body: Value) -> Result { + let params: ListTransactionsParams = parse_params(body)?; + let wallet = wallet.get_storage().read().await; + let txs = wallet.get_transactions()?; + let response: Vec> = txs.iter().filter(|e| { + if let Some(topoheight) = ¶ms.min_topoheight { + if e.get_topoheight() < *topoheight { + return false + } + } + + if let Some(topoheight) = ¶ms.max_topoheight { + if e.get_topoheight() > *topoheight { + return false + } + } + + match e.get_entry() { + EntryData::Coinbase(_) if params.accept_coinbase => true, + EntryData::Burn { .. } if params.accept_burn => true, + EntryData::Incoming(sender, _) if params.accept_incoming => match ¶ms.address { + Some(key) => *key == *sender, + None => true + }, + EntryData::Outgoing(txs) if params.accept_outgoing => match ¶ms.address { + Some(filter_key) => txs.iter().find(|tx| { + *tx.get_key() == *filter_key + }).is_some(), + None => true, + }, + _ => false + } + }).map(|e| { + let hash = e.get_hash(); + DataHash { hash: Cow::Borrowed(hash), data: Cow::Borrowed(e) } + }).collect(); + + Ok(json!(response)) } \ No newline at end of file From 8d6557da2bc92dd6a7b54a3a1e9bb2908ef46a55 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 20 Apr 2023 11:08:27 +0200 Subject: [PATCH 12/74] update dependencies --- Cargo.lock | 470 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 264 insertions(+), 206 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 882233de..6207e8fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -45,15 +45,15 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.3.0" +version = "3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0070905b2c4a98d184c4e81025253cb192aa8a73827553f38e9410801ceb35bb" +checksum = "c2079246596c18b4a33e274ae10c0e50613f4d32a4198e09c7b93771013fed74" dependencies = [ "actix-codec", "actix-rt", "actix-service", "actix-utils", - "ahash", + "ahash 0.8.3", "base64 0.21.0", "bitflags", "brotli", @@ -89,7 +89,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "465a6172cf69b960917811022d8f29bc0b7fa1398bc4f78b3c466673db1213b6" dependencies = [ "quote", - "syn 1.0.108", + "syn 1.0.109", ] [[package]] @@ -157,9 +157,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "4.3.0" +version = "4.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464e0fddc668ede5f26ec1f9557a8d44eda948732f40c6b0ad79126930eb775f" +checksum = "cd3cb42f9566ab176e1ef0b8b3a896529062b4efc6be0123046095914c4c1c96" dependencies = [ "actix-codec", "actix-http", @@ -170,7 +170,7 @@ dependencies = [ "actix-service", "actix-utils", "actix-web-codegen", - "ahash", + "ahash 0.7.6", "bytes", "bytestring", "cfg-if", @@ -192,7 +192,7 @@ dependencies = [ "serde_urlencoded", "smallvec", "socket2", - "time 0.3.19", + "time 0.3.20", "url", ] @@ -216,14 +216,14 @@ dependencies = [ [[package]] name = "actix-web-codegen" -version = "4.1.0" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa9362663c8643d67b2d5eafba49e4cb2c8a053a29ed00a0bea121f17c76b13" +checksum = "2262160a7ae29e3415554a3f1fc04c764b1540c116aa524683208078b7a75bc9" dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", ] [[package]] @@ -262,7 +262,7 @@ checksum = "6d44b8fee1ced9671ba043476deddef739dd0959bf77030b26b738cc591737a7" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", ] [[package]] @@ -273,9 +273,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aead" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", "generic-array", @@ -287,7 +287,19 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +dependencies = [ + "cfg-if", + "getrandom 0.2.9", "once_cell", "version_check", ] @@ -327,9 +339,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "argon2" @@ -344,24 +356,24 @@ dependencies = [ [[package]] name = "async-recursion" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b015a331cc64ebd1774ba119538573603427eaace0a1950c423ab971f903796" +checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] name = "async-trait" -version = "0.1.64" +version = "0.1.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2" +checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] @@ -395,9 +407,9 @@ checksum = "a4a4ddaa51a5bc52a6948f74c06d20aaaddb71924eab79b8c97a8c556e942d6a" [[package]] name = "base64ct" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bitflags" @@ -425,9 +437,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] @@ -473,9 +485,9 @@ checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" [[package]] name = "bytestring" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f83e57d9154148e355404702e2694463241880b939570d7c97c014da7a69a1" +checksum = "238e4886760d98c4f899360c834fa93e62cf7f721ac3c2da375cbdf4b8679aae" dependencies = [ "bytes", ] @@ -497,9 +509,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chacha20" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fc89c7c5b9e7a02dfe45cd2367bae382f9ed31c61ca8debe5f827c420a2f08" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", "cipher", @@ -521,9 +533,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ "iana-time-zone", "js-sys", @@ -536,9 +548,9 @@ dependencies = [ [[package]] name = "cipher" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", @@ -572,7 +584,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", ] [[package]] @@ -618,7 +630,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" dependencies = [ "percent-encoding", - "time 0.3.19", + "time 0.3.20", "version_check", ] @@ -634,15 +646,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" +checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +checksum = "280a9f2d8b3a38871a3c8a46fb80db65e5e5ed97da80c4d08bf27fb63e35e181" dependencies = [ "libc", ] @@ -658,9 +670,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", @@ -668,9 +680,9 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.13" +version = "0.9.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" +checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg", "cfg-if", @@ -681,9 +693,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ "cfg-if", ] @@ -714,9 +726,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d3488e7665a7a483b57e25bdd90d0aeb2bc7608c8d0346acf2ad3f1caf1d62" +checksum = "f61f1b6389c3fe1c316bf8a4dccc90a38208354b330925bce1f74a6c4756eb93" dependencies = [ "cc", "cxxbridge-flags", @@ -726,9 +738,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48fcaf066a053a41a81dfb14d57d99738b767febb8b735c3016e469fac5da690" +checksum = "12cee708e8962df2aeb38f594aae5d827c022b6460ac71a7a3e2c3c2aae5a07b" dependencies = [ "cc", "codespan-reporting", @@ -736,24 +748,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] name = "cxxbridge-flags" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef98b8b717a829ca5603af80e1f9e2e48013ab227b68ef37872ef84ee479bf" +checksum = "7944172ae7e4068c533afbb984114a56c46e9ccddda550499caa222902c7f7bb" [[package]] name = "cxxbridge-macro" -version = "1.0.91" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "086c685979a698443656e5cf7856c95c642295a38599f12fb1ff76fb28d19892" +checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] @@ -766,7 +778,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 1.0.108", + "syn 1.0.109", ] [[package]] @@ -784,7 +796,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ - "block-buffer 0.10.3", + "block-buffer 0.10.4", "crypto-common", "subtle", ] @@ -825,9 +837,9 @@ dependencies = [ [[package]] name = "fern" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd7b0849075e79ee9a1836df22c717d1eba30451796fdc631b04565dd11e2a" +checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" dependencies = [ "colored", "log", @@ -870,9 +882,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.26" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e5317663a9089767a1ec00a487df42e0ca174b61b4483213ac24448e4664df5" +checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" dependencies = [ "futures-core", ] @@ -891,7 +903,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.12", + "syn 2.0.15", ] [[package]] @@ -932,9 +944,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.6" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", @@ -953,9 +965,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ "cfg-if", "libc", @@ -964,9 +976,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.15" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +checksum = "17f8a914c2987b688368b5138aa05321db91f4090cf26118185672ad588bce21" dependencies = [ "bytes", "fnv", @@ -987,7 +999,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash", + "ahash 0.7.6", ] [[package]] @@ -1056,9 +1068,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.24" +version = "0.14.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e011372fa0b68db8350aa7a248930ecc7839bf46d8485577d69f117a75f164c" +checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" dependencies = [ "bytes", "futures-channel", @@ -1080,16 +1092,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "winapi", + "windows", ] [[package]] @@ -1114,9 +1126,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.2" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown", @@ -1142,21 +1154,21 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" +checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" [[package]] name = "itoa" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" +checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" [[package]] name = "jobserver" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -1193,9 +1205,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.139" +version = "0.2.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" +checksum = "3304a64d199bb964be99741b7a14d26972741915b3649639149b2479bb46f4b5" [[package]] name = "link-cplusplus" @@ -1260,18 +1272,18 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "mime" -version = "0.3.16" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" @@ -1354,9 +1366,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "os_str_bytes" -version = "6.4.1" +version = "6.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" +checksum = "ceedf44fb00f2d1984b0bc98102627ce622e083e49a5bacdb3e514fa4238e267" [[package]] name = "parking_lot" @@ -1419,9 +1431,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" +checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" [[package]] name = "percent-encoding" @@ -1473,7 +1485,7 @@ dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", "version_check", ] @@ -1490,9 +1502,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.54" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e472a104799c74b514a57226160104aa483546de37e839ec50e3c2e41dd87534" +checksum = "2b63bdb0cd06f1f4dedf69b254734f9b45af66e4a031e42a7480257d9898b435" dependencies = [ "unicode-ident", ] @@ -1565,7 +1577,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.8", + "getrandom 0.2.9", ] [[package]] @@ -1588,9 +1600,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -1599,15 +1611,15 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.28" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "reqwest" -version = "0.11.14" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21eed90ec8570952d53b772ecf8f206aa1ec9a3d76b2521c56c42973f2d91ee9" +checksum = "27b71749df584b7f4cac2c426c127a7c785a5106cc98f7a8feb044115f0fa254" dependencies = [ "base64 0.21.0", "bytes", @@ -1697,9 +1709,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" +checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" [[package]] name = "schannel" @@ -1718,9 +1730,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scratch" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" +checksum = "1792db035ce95be60c3f8853017b3999209281c24e2ba5bc8e59bf97a0c590c1" [[package]] name = "sct" @@ -1757,15 +1769,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" +checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" [[package]] name = "serde" -version = "1.0.152" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] @@ -1781,20 +1793,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -1839,9 +1851,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" +checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" dependencies = [ "digest 0.10.6", "keccak", @@ -1895,9 +1907,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" dependencies = [ "libc", "winapi", @@ -1923,9 +1935,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.108" +version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56e159d99e6c2b93995d171050271edb50ecc5288fbc7cc17de8fdce4e58c14" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", @@ -1934,27 +1946,15 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.12" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79d9531f94112cfc3e4c8f5f02cb2b58f72c97b7efd85f70203cc6d8efda5927" +checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.108", - "unicode-xid", -] - [[package]] name = "termcolor" version = "1.2.0" @@ -1972,22 +1972,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" +checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" +checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] @@ -2003,9 +2003,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53250a3b3fed8ff8fd988587d8925d26a83ac3845d9e03b220b37f34c2b8d6c2" +checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890" dependencies = [ "itoa", "serde", @@ -2021,9 +2021,9 @@ checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" [[package]] name = "time-macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a460aeb8de6dcb0f381e1ee05f1cd56fcf5a5f6eb8187ff3d8f0b11078d38b7c" +checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36" dependencies = [ "time-core", ] @@ -2045,14 +2045,13 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.25.0" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e00990ebabbe4c14c08aca901caed183ecd5c09562a12c824bb53d3c3fd3af" +checksum = "d0de47a4eecbe11f498978a9b29d792f0d2692d1dd003650c24c76510e3bc001" dependencies = [ "autocfg", "bytes", "libc", - "memchr", "mio", "num_cpus", "parking_lot 0.12.1", @@ -2060,18 +2059,18 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.42.0", + "windows-sys 0.45.0", ] [[package]] name = "tokio-macros" -version = "1.8.2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +checksum = "61a573bdc87985e9d6ddeed1b3d864e8a302c847e40d647746df2f1de209d1ce" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 2.0.15", ] [[package]] @@ -2177,15 +2176,15 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "unicode-bidi" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54675592c1dbefd78cbd98db9bacd89886e1ca50692a0692baefffdeb92dd58" +checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" +checksum = "e5464a87b239f13a63a501f2701565754bae92d243d4bb7eb12f6d57d2269bf4" [[package]] name = "unicode-normalization" @@ -2202,12 +2201,6 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" -[[package]] -name = "unicode-xid" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" - [[package]] name = "universal-hash" version = "0.5.0" @@ -2296,7 +2289,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", "wasm-bindgen-shared", ] @@ -2330,7 +2323,7 @@ checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", + "syn 1.0.109", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2392,19 +2385,28 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +dependencies = [ + "windows-targets 0.48.0", +] + [[package]] name = "windows-sys" version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -2413,65 +2415,122 @@ version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ - "windows-targets", + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.42.1" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.42.1" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.42.1" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winreg" @@ -2581,23 +2640,22 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.3" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 1.0.108", - "synstructure", + "syn 2.0.15", ] [[package]] @@ -2611,9 +2669,9 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "6.0.4+zstd.1.5.4" +version = "6.0.5+zstd.1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7afb4b54b8910cf5447638cb54bf4e8a65cbedd783af98b98c62ffe91f185543" +checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" dependencies = [ "libc", "zstd-sys", @@ -2621,9 +2679,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.7+zstd.1.5.4" +version = "2.0.8+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94509c3ba2fe55294d752b79842c530ccfab760192521df74a081a78d2b3c7f5" +checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" dependencies = [ "cc", "libc", From e82a2edb48fc511c94126acb8f3c3bf6f61d119b Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 20 Apr 2023 20:06:34 +0200 Subject: [PATCH 13/74] hash: add inline --- xelis_common/src/block/miner.rs | 1 + xelis_common/src/crypto/hash.rs | 3 +++ 2 files changed, 4 insertions(+) diff --git a/xelis_common/src/block/miner.rs b/xelis_common/src/block/miner.rs index d68a0f6b..b4e36922 100644 --- a/xelis_common/src/block/miner.rs +++ b/xelis_common/src/block/miner.rs @@ -25,6 +25,7 @@ impl<'a> BlockMiner<'a> { } } + #[inline(always)] pub fn get_pow_hash(&self) -> Hash { // TODO replace with real POW algorithm hash(&self.to_bytes()) diff --git a/xelis_common/src/crypto/hash.rs b/xelis_common/src/crypto/hash.rs index e964cb40..8169737e 100644 --- a/xelis_common/src/crypto/hash.rs +++ b/xelis_common/src/crypto/hash.rs @@ -83,12 +83,14 @@ impl<'a> Deserialize<'a> for Hash { } pub trait Hashable: Serializer { + #[inline(always)] fn hash(&self) -> Hash { let bytes = self.to_bytes(); hash(&bytes) } } +#[inline(always)] pub fn hash(value: &[u8]) -> Hash { let mut hasher = get_hasher(); hasher.update(value); @@ -96,6 +98,7 @@ pub fn hash(value: &[u8]) -> Hash { Hash(result) } +#[inline(always)] pub fn get_hasher() -> impl Digest { Keccak256::new() } \ No newline at end of file From 52a5575b259456c0a6dfbd95e8d6d4901858dab8 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 20 Apr 2023 22:08:48 +0200 Subject: [PATCH 14/74] delete unused Cargo.lock --- xelis_wallet/Cargo.lock | 183 ---------------------------------------- 1 file changed, 183 deletions(-) delete mode 100644 xelis_wallet/Cargo.lock diff --git a/xelis_wallet/Cargo.lock b/xelis_wallet/Cargo.lock deleted file mode 100644 index 787a2e15..00000000 --- a/xelis_wallet/Cargo.lock +++ /dev/null @@ -1,183 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "aead" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c192eb8f11fc081b0fe4259ba5af04217d4e0faddd02417310a927911abd7c8" -dependencies = [ - "crypto-common", - "generic-array", -] - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chacha20" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fc89c7c5b9e7a02dfe45cd2367bae382f9ed31c61ca8debe5f827c420a2f08" -dependencies = [ - "cfg-if", - "cipher", - "cpufeatures", -] - -[[package]] -name = "chacha20poly1305" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" -dependencies = [ - "aead", - "chacha20", - "cipher", - "poly1305", - "zeroize", -] - -[[package]] -name = "cipher" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1873270f8f7942c191139cb8a40fd228da6c3fd2fc376d7e92d47aa14aeb59e" -dependencies = [ - "crypto-common", - "inout", - "zeroize", -] - -[[package]] -name = "cpufeatures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" -dependencies = [ - "libc", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "rand_core", - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" -dependencies = [ - "cfg-if", - "libc", - "wasi", -] - -[[package]] -name = "inout" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" -dependencies = [ - "generic-array", -] - -[[package]] -name = "libc" -version = "0.2.139" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "poly1305" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" -dependencies = [ - "cpufeatures", - "opaque-debug", - "universal-hash", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "typenum" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" - -[[package]] -name = "universal-hash" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d3160b73c9a19f7e2939a2fdad446c57c1bbbbf4d919d3213ff1267a580d8b5" -dependencies = [ - "crypto-common", - "subtle", -] - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wallet" -version = "0.1.0" -dependencies = [ - "chacha20poly1305", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "zeroize" -version = "1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" From 7a31b0bf92167fdba283351bab61f4943484e19b Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 20 Apr 2023 22:10:25 +0200 Subject: [PATCH 15/74] downgrade to sha3 v0.10.6 due to performance issues --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6207e8fe..2d483e47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1851,9 +1851,9 @@ dependencies = [ [[package]] name = "sha3" -version = "0.10.7" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54c2bb1a323307527314a36bfb73f24febb08ce2b8a554bf4ffd6f51ad15198c" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ "digest 0.10.6", "keccak", From 269c72f1ef7a07afbf5ea39750c8d4053bc44880 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 21 Apr 2023 12:02:11 +0200 Subject: [PATCH 16/74] common: use digest function instead of Hasher --- xelis_common/src/crypto/hash.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/xelis_common/src/crypto/hash.rs b/xelis_common/src/crypto/hash.rs index 8169737e..ba212946 100644 --- a/xelis_common/src/crypto/hash.rs +++ b/xelis_common/src/crypto/hash.rs @@ -92,13 +92,6 @@ pub trait Hashable: Serializer { #[inline(always)] pub fn hash(value: &[u8]) -> Hash { - let mut hasher = get_hasher(); - hasher.update(value); - let result: [u8; HASH_SIZE] = hasher.finalize()[..].try_into().unwrap(); + let result: [u8; HASH_SIZE] = Keccak256::digest(value)[..].try_into().unwrap(); Hash(result) -} - -#[inline(always)] -pub fn get_hasher() -> impl Digest { - Keccak256::new() } \ No newline at end of file From 7fc6d511f1b0470bb1243185b2bb424c8c6bd31e Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 21 Apr 2023 13:00:55 +0200 Subject: [PATCH 17/74] miner: display height from job --- xelis_miner/src/main.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xelis_miner/src/main.rs b/xelis_miner/src/main.rs index dc5260a6..807f43f4 100644 --- a/xelis_miner/src/main.rs +++ b/xelis_miner/src/main.rs @@ -55,7 +55,7 @@ pub struct MinerConfig { #[derive(Clone)] enum ThreadNotification<'a> { - NewJob(BlockMiner<'a>, u64), + NewJob(BlockMiner<'a>, u64, u64), // block work, difficulty, height Exit } @@ -240,11 +240,11 @@ async fn handle_websocket_message(message: Result(text.as_bytes())? { SocketMessage::NewJob(job) => { - info!("New job received from daemon: difficulty = {}", job.difficulty); + info!("New job received from daemon: difficulty = {} and height = {}", job.difficulty, job.height); let block = BlockMiner::from_hex(job.template).context("Error while decoding new job received from daemon")?; CURRENT_HEIGHT.store(job.height, Ordering::SeqCst); - if let Err(e) = job_sender.send(ThreadNotification::NewJob(block, job.difficulty)) { + if let Err(e) = job_sender.send(ThreadNotification::NewJob(block, job.difficulty, job.height)) { error!("Error while sending new job to threads: {}", e); } }, @@ -290,7 +290,7 @@ fn start_thread(id: u8, mut job_receiver: broadcast::Receiver { + ThreadNotification::NewJob(new_job, expected_difficulty, height) => { debug!("Mining Thread #{} received a new job", id); job = new_job; // set thread id in extra nonce for more work spread between threads @@ -319,7 +319,7 @@ fn start_thread(id: u8, mut job_receiver: broadcast::Receiver Date: Fri, 21 Apr 2023 21:52:17 +0200 Subject: [PATCH 18/74] daemon: add cache for tip base --- xelis_daemon/src/core/blockchain.rs | 30 +++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 35aa3edd..9e036b94 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1,4 +1,5 @@ use anyhow::Error; +use lru::LruCache; use serde_json::{Value, json}; use xelis_common::{ config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME}, @@ -71,7 +72,10 @@ pub struct Blockchain { // used to skip PoW verification simulator: bool, // current network type on which one we're using/connected to - network: Network + network: Network, + // this cache is used to avoid to recompute the common base for each block and is mandatory + // key is (tip hash, tip height) while value is (base hash, base height) + tip_base_cache: Mutex>, } impl Blockchain { @@ -101,7 +105,8 @@ impl Blockchain { rpc: Mutex::new(None), difficulty: AtomicU64::new(GENESIS_BLOCK_DIFFICULTY), simulator: config.simulator, - network + network, + tip_base_cache: Mutex::new(LruCache::new(1024)) }; // include genesis block @@ -355,12 +360,21 @@ impl Blockchain { Ok(true) } - // TODO: cache based on height/hash #[async_recursion] async fn find_tip_base(&self, storage: &S, hash: &Hash, height: u64) -> Result<(Hash, u64), BlockchainError> { + // first, check if we have it in cache + { + if let Some((base_hash, base_height)) = self.tip_base_cache.lock().await.get(&(hash.clone(), height)) { + debug!("Tip Base for {} at height {} found: {} for height {}", hash, height, hash, height); + return Ok((base_hash.clone(), *base_height)) + } + } + let tips = storage.get_past_blocks_for_block_hash(hash).await?; let tips_count = tips.len(); if tips_count == 0 { // only genesis block can have 0 tips saved + // save in cache + self.tip_base_cache.lock().await.put((hash.clone(), height), (hash.clone(), 0)); return Ok((hash.clone(), 0)) } @@ -368,6 +382,8 @@ impl Blockchain { for hash in tips.iter() { if self.is_block_sync_at_height(storage, hash, height).await? { let block_height = storage.get_height_for_block_hash(hash).await?; + // save in cache + self.tip_base_cache.lock().await.put((hash.clone(), height), (hash.clone(), block_height)); return Ok((hash.clone(), block_height)) } bases.push(self.find_tip_base(storage, hash, height).await?); @@ -381,7 +397,11 @@ impl Blockchain { bases.sort_by(|(_, a), (_, b)| b.cmp(a)); // assert!(bases[0].1 >= bases[bases.len() - 1].1); - Ok(bases.remove(bases.len() - 1)) + let (base_hash, base_height) = bases.remove(bases.len() - 1); + // save in cache + self.tip_base_cache.lock().await.put((hash.clone(), height), (base_hash.clone(), base_height)); + + Ok((base_hash, base_height)) } // find the common base (block hash and block height) of all tips @@ -413,6 +433,8 @@ impl Blockchain { // assert!(bases[0].1 >= bases[bases.len() - 1].1); // retrieve the first block hash with its height + // we delete the last element because we sorted it descending + // and we want the lowest height Ok(bases.remove(bases.len() - 1)) } From 15710d4a698f19906d09c2211757612e201093d9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 21 Apr 2023 23:24:41 +0200 Subject: [PATCH 19/74] daemon: implement work tip score cache --- xelis_daemon/src/core/blockchain.rs | 94 ++++++++++++++++++----------- 1 file changed, 60 insertions(+), 34 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 9e036b94..389b0aeb 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -76,6 +76,8 @@ pub struct Blockchain { // this cache is used to avoid to recompute the common base for each block and is mandatory // key is (tip hash, tip height) while value is (base hash, base height) tip_base_cache: Mutex>, + // tip work score is used to determine the best tip based on a block, tip base ands a base height + tip_work_score_cache: Mutex, u64)>>, } impl Blockchain { @@ -106,7 +108,8 @@ impl Blockchain { difficulty: AtomicU64::new(GENESIS_BLOCK_DIFFICULTY), simulator: config.simulator, network, - tip_base_cache: Mutex::new(LruCache::new(1024)) + tip_base_cache: Mutex::new(LruCache::new(1024)), + tip_work_score_cache: Mutex::new(LruCache::new(1024)) }; // include genesis block @@ -362,34 +365,42 @@ impl Blockchain { #[async_recursion] async fn find_tip_base(&self, storage: &S, hash: &Hash, height: u64) -> Result<(Hash, u64), BlockchainError> { - // first, check if we have it in cache - { - if let Some((base_hash, base_height)) = self.tip_base_cache.lock().await.get(&(hash.clone(), height)) { - debug!("Tip Base for {} at height {} found: {} for height {}", hash, height, hash, height); + let (tips, tips_count) = { + // first, check if we have it in cache + let mut cache = self.tip_base_cache.lock().await; + if let Some((base_hash, base_height)) = cache.get(&(hash.clone(), height)) { + trace!("Tip Base for {} at height {} found in cache: {} for height {}", hash, height, base_hash, base_height); return Ok((base_hash.clone(), *base_height)) } - } - let tips = storage.get_past_blocks_for_block_hash(hash).await?; - let tips_count = tips.len(); - if tips_count == 0 { // only genesis block can have 0 tips saved - // save in cache - self.tip_base_cache.lock().await.put((hash.clone(), height), (hash.clone(), 0)); - return Ok((hash.clone(), 0)) - } + let tips = storage.get_past_blocks_for_block_hash(hash).await?; + let tips_count = tips.len(); + if tips_count == 0 { // only genesis block can have 0 tips saved + // save in cache + cache.put((hash.clone(), height), (hash.clone(), 0)); + return Ok((hash.clone(), 0)) + } + (tips, tips_count) + }; let mut bases = Vec::with_capacity(tips_count); for hash in tips.iter() { + // if block is sync, it is a tip base if self.is_block_sync_at_height(storage, hash, height).await? { let block_height = storage.get_height_for_block_hash(hash).await?; - // save in cache - self.tip_base_cache.lock().await.put((hash.clone(), height), (hash.clone(), block_height)); + // save in cache (lock each time to avoid deadlocks) + let mut cache = self.tip_base_cache.lock().await; + cache.put((hash.clone(), height), (hash.clone(), block_height)); + return Ok((hash.clone(), block_height)) } + + // if block is not sync, we need to find its tip base too bases.push(self.find_tip_base(storage, hash, height).await?); } if bases.is_empty() { + error!("Tip base for {} at height {} not found", hash, height); return Err(BlockchainError::ExpectedTips) } @@ -399,13 +410,16 @@ impl Blockchain { let (base_hash, base_height) = bases.remove(bases.len() - 1); // save in cache - self.tip_base_cache.lock().await.put((hash.clone(), height), (base_hash.clone(), base_height)); + let mut cache = self.tip_base_cache.lock().await; + cache.put((hash.clone(), height), (base_hash.clone(), base_height)); + trace!("Tip Base for {} at height {} found: {} for height {}", hash, height, base_hash, base_height); Ok((base_hash, base_height)) } // find the common base (block hash and block height) of all tips async fn find_common_base<'a, I: IntoIterator + Copy>(&self, storage: &S, tips: I) -> Result<(Hash, u64), BlockchainError> { + debug!("Searching for common base for tips {}", tips.into_iter().map(|h| h.to_string()).collect::>().join(", ")); let mut best_height = 0; // first, we check the best (highest) height of all tips for hash in tips.into_iter() { @@ -435,7 +449,9 @@ impl Blockchain { // retrieve the first block hash with its height // we delete the last element because we sorted it descending // and we want the lowest height - Ok(bases.remove(bases.len() - 1)) + let (base_hash, base_height) = bases.remove(bases.len() - 1); + debug!("Common base {} with height {} on {}", base_hash, base_height, bases.len() + 1); + Ok((base_hash, base_height)) } #[async_recursion] // TODO no recursion @@ -533,9 +549,14 @@ impl Blockchain { Ok(()) } - // TODO cache // find the sum of work done - async fn find_tip_work_score(&self, storage: &S, hash: &Hash, base: &Hash, base_height: u64) -> Result<(HashMap, u64), BlockchainError> { + async fn find_tip_work_score(&self, storage: &S, hash: &Hash, base: &Hash, base_height: u64) -> Result<(HashSet, u64), BlockchainError> { + let mut cache = self.tip_work_score_cache.lock().await; + if let Some(value) = cache.get(&(hash.clone(), base.clone(), base_height)) { + trace!("Found tip work score in cache: set [{}], height: {}", value.0.iter().map(|h| h.to_string()).collect::>().join(", "), value.1); + return Ok(value.clone()) + } + let block = storage.get_block_header_by_hash(hash).await?; let mut map: HashMap = HashMap::new(); let base_topoheight = storage.get_topo_height_for_hash(base).await?; @@ -553,12 +574,17 @@ impl Blockchain { } map.insert(hash.clone(), storage.get_difficulty_for_block_hash(hash).await?); + let mut set = HashSet::with_capacity(map.len()); let mut score = 0; - for value in map.values() { + for (hash, value) in map { + set.insert(hash); score += value; } - Ok((map, score)) + // save this result in cache + cache.put((hash.clone(), base.clone(), base_height), (set.clone(), score)); + + Ok((set, score)) } async fn find_best_tip<'a>(&self, storage: &S, tips: &'a HashSet, base: &Hash, base_height: u64) -> Result<&'a Hash, BlockchainError> { @@ -1057,12 +1083,12 @@ impl Blockchain { let (base_hash, base_height) = self.find_common_base(storage, &tips).await?; let best_tip = self.find_best_tip(storage, &tips, &base_hash, base_height).await?; - trace!("Best tip selected: {}", best_tip); + debug!("Best tip selected: {}", best_tip); let base_topo_height = storage.get_topo_height_for_hash(&base_hash).await?; // generate a full order until base_topo_height let mut full_order = self.generate_full_order(storage, &best_tip, &base_hash, base_topo_height).await?; - trace!("Generated full order size: {}, with base ({}) topo height: {}", full_order.len(), base_hash, base_topo_height); + debug!("Generated full order size: {}, with base ({}) topo height: {}", full_order.len(), base_hash, base_topo_height); // rpc server lock let rpc_server = self.rpc.lock().await; @@ -1079,15 +1105,16 @@ impl Blockchain { let mut is_written = base_topo_height == 0; let mut skipped = 0; // detect which part of DAG reorg stay, for other part, undo all executed txs + debug!("Detecting stable point of DAG and cleaning txs above it"); { let mut topoheight = base_topo_height; while topoheight <= current_topoheight { let hash_at_topo = storage.get_hash_at_topo_height(topoheight).await?; - debug!("Cleaning txs at topoheight {} ({})", topoheight, hash_at_topo); + trace!("Cleaning txs at topoheight {} ({})", topoheight, hash_at_topo); if !is_written { if let Some(order) = full_order.get(0) { if storage.is_block_topological_ordered(order).await && *order == hash_at_topo { - debug!("Hash {} at topo {} stay the same, skipping cleaning", hash_at_topo, topoheight); + trace!("Hash {} at topo {} stay the same, skipping cleaning", hash_at_topo, topoheight); // remove the hash from the order because we don't need to recompute it full_order.remove(0); topoheight += 1; @@ -1098,13 +1125,13 @@ impl Blockchain { is_written = true; } - debug!("Cleaning transactions executions at topo height {} (block {})", topoheight, hash_at_topo); + trace!("Cleaning transactions executions at topo height {} (block {})", topoheight, hash_at_topo); let block = storage.get_block_header_by_hash(&hash_at_topo).await?; // mark txs as unexecuted for tx_hash in block.get_txs_hashes() { - debug!("Removing execution of {}", tx_hash); + trace!("Removing execution of {}", tx_hash); storage.remove_tx_executed(&tx_hash)?; } @@ -1113,6 +1140,7 @@ impl Blockchain { } // time to order the DAG that is moving + debug!("Ordering blocks based on generated DAG order ({} blocks)", full_order.len()); for (i, hash) in full_order.into_iter().enumerate() { highest_topo = base_topo_height + skipped + i as u64; @@ -1124,7 +1152,7 @@ impl Blockchain { } is_written = true; - debug!("Ordering block {} at topoheight {}", hash, highest_topo); + trace!("Ordering block {} at topoheight {}", hash, highest_topo); storage.set_topo_height_for_block(&hash, highest_topo).await?; let past_supply = if highest_topo == 0 { @@ -1134,7 +1162,7 @@ impl Blockchain { }; let block_reward = if self.is_side_block(storage, &hash).await? { - debug!("Block {} at topoheight {} is a side block", hash, highest_topo); + trace!("Block {} at topoheight {} is a side block", hash, highest_topo); let reward = get_block_reward(past_supply); reward * SIDE_BLOCK_REWARD_PERCENT / 100 } else { @@ -1156,12 +1184,12 @@ impl Blockchain { // TODO improve it (too much read/write that can be refactored) if !storage.has_block_linked_to_tx(&tx_hash, &hash)? { storage.add_block_for_tx(&tx_hash, hash.clone())?; - debug!("Block {} is now linked to tx {}", hash, tx_hash); + trace!("Block {} is now linked to tx {}", hash, tx_hash); } // check that the tx was not yet executed in another tip branch if storage.has_tx_executed_in_block(tx_hash)? { - debug!("Tx {} was already executed in a previous block, skipping...", tx_hash); + trace!("Tx {} was already executed in a previous block, skipping...", tx_hash); } else { // tx was not executed, but lets check that it is not a potential double spending // check that the nonce is not lower than the one already executed @@ -1174,7 +1202,7 @@ impl Blockchain { } } // mark tx as executed - debug!("Executing tx {} in block {}", tx_hash, hash); + trace!("Executing tx {} in block {}", tx_hash, hash); storage.set_tx_executed_in_block(tx_hash, &hash)?; self.execute_transaction(storage, &tx, &mut nonces, &mut balances, highest_topo).await?; @@ -1231,7 +1259,6 @@ impl Blockchain { new_tips.push(hash); } else { warn!("Rusty TIP declared stale {} with best height: {}, tip base distance: {}", hash, best_height, tip_base_distance); - // TODO rewind stale TIP } } @@ -1242,7 +1269,6 @@ impl Blockchain { if best_tip != hash { if !self.validate_tips(&storage, &best_tip, &hash).await? { warn!("Rusty TIP {} declared stale", hash); - // TODO rewind stale TIP } else { debug!("Tip {} is valid, adding to final Tips list", hash); tips.insert(hash); From f396d8446759aee3bdf37df57d10225edaa9e2b3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 21 Apr 2023 23:41:47 +0200 Subject: [PATCH 20/74] daemon: implement DAG full order cache --- xelis_daemon/src/core/blockchain.rs | 40 ++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 389b0aeb..3ce972d5 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -78,6 +78,7 @@ pub struct Blockchain { tip_base_cache: Mutex>, // tip work score is used to determine the best tip based on a block, tip base ands a base height tip_work_score_cache: Mutex, u64)>>, + full_order_cache: Mutex>>, } impl Blockchain { @@ -109,7 +110,8 @@ impl Blockchain { simulator: config.simulator, network, tip_base_cache: Mutex::new(LruCache::new(1024)), - tip_work_score_cache: Mutex::new(LruCache::new(1024)) + tip_work_score_cache: Mutex::new(LruCache::new(1024)), + full_order_cache: Mutex::new(LruCache::new(1024)) }; // include genesis block @@ -610,15 +612,31 @@ impl Blockchain { // first hash in order is the base hash #[async_recursion] async fn generate_full_order(&self, storage: &S, hash: &Hash, base: &Hash, base_topo_height: u64) -> Result, BlockchainError> { - let block_tips = storage.get_past_blocks_for_block_hash(hash).await?; - if block_tips.len() == 0 { - return Ok(vec![GENESIS_BLOCK_HASH.clone()]) - } + let block_tips = { + let mut cache = self.full_order_cache.lock().await; + // check if its present in the cache first + if let Some(value) = cache.get(&(hash.clone(), base.clone(), base_topo_height)) { + trace!("Found full order in cache: {}", value.iter().map(|h| h.to_string()).collect::>().join(", ")); + return Ok(value.clone()) + } - // if the block has been previously ordered, return it as base - if hash == base { - return Ok(vec![base.clone()]) - } + let block_tips = storage.get_past_blocks_for_block_hash(hash).await?; + // only the genesis block can have 0 tips, returns its hash + if block_tips.len() == 0 { + let result = vec![GENESIS_BLOCK_HASH.clone()]; + cache.put((hash.clone(), base.clone(), base_topo_height), result.clone()); + return Ok(result) + } + + // if the block has been previously ordered, return it as base + if hash == base { + let result = vec![base.clone()]; + cache.put((hash.clone(), base.clone(), base_topo_height), result.clone()); + return Ok(result) + } + + block_tips + }; let mut order: Vec = Vec::new(); let mut scores = Vec::new(); @@ -643,6 +661,10 @@ impl Blockchain { order.push(hash.clone()); + // save in cache final result + let mut cache = self.full_order_cache.lock().await; + cache.put((hash.clone(), base.clone(), base_topo_height), order.clone()); + Ok(order) } From c03604a2d24992957e04edc8d58ce1abc519b347 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 21:42:53 +0200 Subject: [PATCH 21/74] daemon: is_sync_block --- xelis_daemon/src/core/blockchain.rs | 12 ++++++------ xelis_daemon/src/p2p/mod.rs | 2 +- xelis_daemon/src/p2p/packet/handshake.rs | 6 +++--- xelis_daemon/src/rpc/rpc.rs | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3ce972d5..5f3dc58e 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -306,12 +306,12 @@ impl Blockchain { storage.has_block(hash).await } - pub async fn is_block_sync(&self, storage: &S, hash: &Hash) -> Result { + pub async fn is_sync_block(&self, storage: &S, hash: &Hash) -> Result { let current_height = self.get_height(); - self.is_block_sync_at_height(storage, hash, current_height).await + self.is_sync_block_at_height(storage, hash, current_height).await } - async fn is_block_sync_at_height(&self, storage: &S, hash: &Hash, height: u64) -> Result { + async fn is_sync_block_at_height(&self, storage: &S, hash: &Hash, height: u64) -> Result { let block_height = storage.get_height_for_block_hash(hash).await?; if block_height == 0 { // genesis block is a sync block return Ok(true) @@ -388,7 +388,7 @@ impl Blockchain { let mut bases = Vec::with_capacity(tips_count); for hash in tips.iter() { // if block is sync, it is a tip base - if self.is_block_sync_at_height(storage, hash, height).await? { + if self.is_sync_block_at_height(storage, hash, height).await? { let block_height = storage.get_height_for_block_hash(hash).await?; // save in cache (lock each time to avoid deadlocks) let mut cache = self.tip_base_cache.lock().await; @@ -408,7 +408,7 @@ impl Blockchain { // now we sort descending by height and return the last element deleted bases.sort_by(|(_, a), (_, b)| b.cmp(a)); - // assert!(bases[0].1 >= bases[bases.len() - 1].1); + debug_assert!(bases[0].1 >= bases[bases.len() - 1].1); let (base_hash, base_height) = bases.remove(bases.len() - 1); // save in cache @@ -446,7 +446,7 @@ impl Blockchain { // sort it descending by height // a = 5, b = 6, b.cmp(a) -> Ordering::Greater bases.sort_by(|(_, a), (_, b)| b.cmp(a)); - // assert!(bases[0].1 >= bases[bases.len() - 1].1); + debug_assert!(bases[0].1 >= bases[bases.len() - 1].1); // retrieve the first block hash with its height // we delete the last element because we sorted it descending diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1f6eedc1..d8fe9bba 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -60,7 +60,7 @@ pub struct P2pServer { impl P2pServer { pub fn new(tag: Option, max_peers: usize, bind_address: String, blockchain: Arc>, maintains_seed_nodes: bool) -> Result, P2pError> { if let Some(tag) = &tag { - assert!(tag.len() > 0 && tag.len() <= 16); + debug_assert!(tag.len() > 0 && tag.len() <= 16); } // set channel to communicate with listener thread diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index e80d1354..4d498c9e 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -36,12 +36,12 @@ impl Handshake { pub const MAX_LEN: usize = 16; pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { - assert!(version.len() > 0 && version.len() <= Handshake::MAX_LEN); // version cannot be greater than 16 chars + debug_assert!(version.len() > 0 && version.len() <= Handshake::MAX_LEN); // version cannot be greater than 16 chars if let Some(node_tag) = &node_tag { - assert!(node_tag.len() > 0 && node_tag.len() <= Handshake::MAX_LEN); // node tag cannot be greater than 16 chars + debug_assert!(node_tag.len() > 0 && node_tag.len() <= Handshake::MAX_LEN); // node tag cannot be greater than 16 chars } - assert!(peers.len() <= Handshake::MAX_LEN); // maximum 16 peers allowed + debug_assert!(peers.len() <= Handshake::MAX_LEN); // maximum 16 peers allowed Self { version, diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 3752c166..96b092b7 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -31,7 +31,7 @@ use log::{info, debug}; pub async fn get_block_type_for_block(blockchain: &Blockchain, storage: &S, hash: &Hash) -> Result { Ok(if blockchain.is_block_orphaned_for_storage(storage, hash).await { BlockType::Orphaned - } else if blockchain.is_block_sync(storage, hash).await.context("Error while checking if block is sync")? { + } else if blockchain.is_sync_block(storage, hash).await.context("Error while checking if block is sync")? { BlockType::Sync } else if blockchain.is_side_block(storage, hash).await.context("Error while checking if block is side")? { BlockType::Side From 4b36dbae3396ef8a0f00cd02c37d821570b504f0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 22:24:52 +0200 Subject: [PATCH 22/74] use Difficulty type, reduce the use of NETWORK static variable --- xelis_common/src/api/daemon.rs | 10 +++++----- xelis_common/src/config.rs | 6 +++--- xelis_common/src/crypto/key.rs | 12 ++++++------ xelis_common/src/difficulty.rs | 7 ++++--- xelis_common/src/globals.rs | 4 +++- xelis_daemon/src/core/blockchain.rs | 15 +++++++++------ xelis_daemon/src/core/blockdag.rs | 3 ++- xelis_daemon/src/core/storage/mod.rs | 8 ++++---- xelis_daemon/src/core/storage/sled.rs | 8 ++++---- xelis_daemon/src/p2p/chain_validator.rs | 10 +++++----- xelis_daemon/src/rpc/getwork_server.rs | 2 +- xelis_wallet/src/wallet.rs | 4 ++-- 12 files changed, 48 insertions(+), 41 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 725c935f..f5f9bc8b 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -2,7 +2,7 @@ use std::{borrow::Cow, collections::HashSet}; use serde::{Deserialize, Serialize}; -use crate::{crypto::{hash::Hash, address::Address}, account::VersionedBalance, network::Network}; +use crate::{crypto::{hash::Hash, address::Address}, account::VersionedBalance, network::Network, block::Difficulty}; use super::DataHash; @@ -18,10 +18,10 @@ pub enum BlockType { pub struct BlockResponse<'a, T: Clone> { pub topoheight: Option, pub block_type: BlockType, - pub difficulty: u64, + pub difficulty: Difficulty, pub supply: Option, pub reward: Option, - pub cumulative_difficulty: u64, + pub cumulative_difficulty: Difficulty, pub total_fees: u64, pub total_size_in_bytes: usize, #[serde(flatten)] @@ -64,7 +64,7 @@ pub struct GetBlockTemplateParams<'a> { pub struct GetBlockTemplateResult { pub template: String, // template is BlockMiner in hex format pub height: u64, // block height - pub difficulty: u64 // difficulty required for valid block + pub difficulty: Difficulty // difficulty required for valid block } #[derive(Serialize, Deserialize)] @@ -109,7 +109,7 @@ pub struct GetInfoResult { pub stableheight: u64, pub top_hash: Hash, pub native_supply: u64, - pub difficulty: u64, + pub difficulty: Difficulty, pub block_time_target: u64, // count how many transactions are present in mempool pub mempool_size: usize, diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index abfeec68..da142fe6 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -1,6 +1,6 @@ use lazy_static::lazy_static; -use crate::{crypto::{hash::{Hash, Hashable}, key::PublicKey, address::Address}, serializer::Serializer, block::BlockHeader}; +use crate::{crypto::{hash::{Hash, Hashable}, key::PublicKey, address::Address}, serializer::Serializer, block::{BlockHeader, Difficulty}}; pub const NETWORK_ID_SIZE: usize = 16; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); @@ -13,8 +13,8 @@ pub const XELIS_ASSET: Hash = Hash::zero(); pub const SIDE_BLOCK_REWARD_PERCENT: u64 = 30; // only 30% of reward for side block pub const BLOCK_TIME: u64 = 15; // Block Time in seconds pub const BLOCK_TIME_MILLIS: u64 = BLOCK_TIME * 1000; // Block Time in milliseconds -pub const MINIMUM_DIFFICULTY: u64 = BLOCK_TIME_MILLIS * 10; -pub const GENESIS_BLOCK_DIFFICULTY: u64 = 1; +pub const MINIMUM_DIFFICULTY: Difficulty = BLOCK_TIME_MILLIS * 10; +pub const GENESIS_BLOCK_DIFFICULTY: Difficulty = 1; pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); // 1.25 MB pub const FEE_PER_KB: u64 = 1000; // 0.01000 XLS per KB pub const DEV_FEE_PERCENT: u64 = 5; // 5% per block going to dev address diff --git a/xelis_common/src/crypto/key.rs b/xelis_common/src/crypto/key.rs index e18f0d69..1717c64c 100644 --- a/xelis_common/src/crypto/key.rs +++ b/xelis_common/src/crypto/key.rs @@ -33,12 +33,12 @@ impl PublicKey { self.0.as_bytes() } - pub fn to_address(&self) -> Address { - Address::new(get_network().is_mainnet(), AddressType::Normal, Cow::Borrowed(self)) + pub fn to_address(&self, mainnet: bool) -> Address { + Address::new(mainnet, AddressType::Normal, Cow::Borrowed(self)) } - pub fn to_address_with(&self, data: DataType) -> Address { - Address::new(get_network().is_mainnet(), AddressType::Data(data), Cow::Borrowed(self)) + pub fn to_address_with(&self, mainnet: bool, data: DataType) -> Address { + Address::new(mainnet, AddressType::Data(data), Cow::Borrowed(self)) } } @@ -78,7 +78,7 @@ impl serde::Serialize for PublicKey { where S: serde::Serializer, { - serializer.serialize_str(&self.to_address().to_string()) + serializer.serialize_str(&self.to_address(get_network().is_mainnet()).to_string()) } } @@ -92,7 +92,7 @@ impl<'de> serde::Deserialize<'de> for PublicKey { impl Display for PublicKey { fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - write!(f, "{}", &self.to_address()) + write!(f, "{}", &self.to_address(get_network().is_mainnet())) } } diff --git a/xelis_common/src/difficulty.rs b/xelis_common/src/difficulty.rs index fa4268ef..5f1a69c4 100644 --- a/xelis_common/src/difficulty.rs +++ b/xelis_common/src/difficulty.rs @@ -1,3 +1,4 @@ +use crate::block::Difficulty; use crate::config::{MINIMUM_DIFFICULTY, BLOCK_TIME_MILLIS}; use crate::crypto::hash::Hash; use num_bigint::{BigUint, ToBigUint}; @@ -16,14 +17,14 @@ pub enum DifficultyError { ErrorOnConversionBigUint } -pub fn check_difficulty(hash: &Hash, difficulty: u64) -> Result { +pub fn check_difficulty(hash: &Hash, difficulty: Difficulty) -> Result { let big_diff = difficulty_to_big(difficulty)?; let big_hash = hash_to_big(hash); Ok(big_hash <= big_diff) } -pub fn difficulty_to_big(difficulty: u64) -> Result { +pub fn difficulty_to_big(difficulty: Difficulty) -> Result { if difficulty == 0 { return Err(DifficultyError::DifficultyCannotBeZero) } @@ -40,7 +41,7 @@ pub fn hash_to_big(hash: &Hash) -> BigUint { BigUint::from_bytes_be(hash.as_bytes()) } -pub fn calculate_difficulty(parent_timestamp: u128, new_timestamp: u128, previous_difficulty: u64) -> u64 { +pub fn calculate_difficulty(parent_timestamp: u128, new_timestamp: u128, previous_difficulty: Difficulty) -> Difficulty { let mut solve_time: u128 = new_timestamp - parent_timestamp; if solve_time > (BLOCK_TIME_MILLIS as u128 * 2) { solve_time = BLOCK_TIME_MILLIS as u128 * 2; diff --git a/xelis_common/src/globals.rs b/xelis_common/src/globals.rs index 53a361c4..eb9914f1 100644 --- a/xelis_common/src/globals.rs +++ b/xelis_common/src/globals.rs @@ -1,3 +1,4 @@ +use crate::block::Difficulty; use crate::network::Network; use crate::serializer::{Reader, ReaderError}; use crate::config::{COIN_VALUE, FEE_PER_KB}; @@ -98,7 +99,7 @@ pub fn format_hashrate(mut hashrate: f64) -> String { const DIFFICULTY_FORMATS: [&str; 6] = ["", "K", "M", "G", "T", "P"]; -pub fn format_difficulty(mut difficulty: u64) -> String { +pub fn format_difficulty(mut difficulty: Difficulty) -> String { let max = HASHRATE_FORMATS.len() - 1; let mut count = 0; while difficulty > 1000 && count < max { @@ -110,6 +111,7 @@ pub fn format_difficulty(mut difficulty: u64) -> String { } // by default it start in mainnet mode +// it is mainly used by fmt::Display to display & Serde for the correct format of addresses / keys static NETWORK: Mutex = Mutex::new(Network::Mainnet); pub fn get_network() -> Network { let network = NETWORK.lock().unwrap(); diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 5f3dc58e..fb3ff71c 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -7,7 +7,7 @@ use xelis_common::{ difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, globals::get_current_timestamp, - block::{Block, BlockHeader, EXTRA_NONCE_SIZE}, + block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType}, DataHash}, network::Network }; @@ -68,7 +68,9 @@ pub struct Blockchain { storage: RwLock, // storage to retrieve/add blocks p2p: Mutex>>>, // P2p module rpc: Mutex>>, // Rpc module - difficulty: AtomicU64, // current difficulty + // current difficulty at tips + // its used as cache to display current network hashrate + difficulty: AtomicU64, // used to skip PoW verification simulator: bool, // current network type on which one we're using/connected to @@ -676,7 +678,7 @@ impl Blockchain { Ok(best_difficulty * 91 / 100 < block_difficulty) } - pub async fn get_difficulty_at_tips(&self, provider: &D, tips: &Vec) -> Result { + pub async fn get_difficulty_at_tips(&self, provider: &D, tips: &Vec) -> Result { if tips.len() == 0 { // Genesis difficulty return Ok(GENESIS_BLOCK_DIFFICULTY) } @@ -698,7 +700,7 @@ impl Blockchain { Ok(difficulty) } - pub fn get_difficulty(&self) -> u64 { + pub fn get_difficulty(&self) -> Difficulty { self.difficulty.load(Ordering::SeqCst) } @@ -1074,7 +1076,7 @@ impl Blockchain { // Compute cumulative difficulty for block let cumulative_difficulty = { - let cumulative_difficulty: u64 = if tips_count == 0 { + let cumulative_difficulty: Difficulty = if tips_count == 0 { GENESIS_BLOCK_DIFFICULTY } else { let (base, base_height) = self.find_common_base(storage, block.get_tips()).await?; @@ -1321,7 +1323,8 @@ impl Blockchain { let topoheight = storage.get_topo_height_for_hash(&block_hash).await?; debug!("Adding new '{}' {} at topoheight {}", block_hash, block, topoheight); } else { - debug!("Adding new '{}' {} with no topoheight (not ordered)!", block_hash, block); + // this means the block is considered as orphaned yet + warn!("Adding new '{}' {} with no topoheight (not ordered)!", block_hash, block); } // update stable height and difficulty in cache diff --git a/xelis_daemon/src/core/blockdag.rs b/xelis_daemon/src/core/blockdag.rs index 03ac3b1b..e0041047 100644 --- a/xelis_daemon/src/core/blockdag.rs +++ b/xelis_daemon/src/core/blockdag.rs @@ -1,9 +1,10 @@ +use xelis_common::block::Difficulty; use xelis_common::crypto::hash::Hash; use super::storage::Storage; use super::{error::BlockchainError, storage::DifficultyProvider}; // sort the scores by cumulative difficulty and, if equals, by hash value -pub fn sort_descending_by_cumulative_difficulty(scores: &mut Vec<(&Hash, u64)>) { +pub fn sort_descending_by_cumulative_difficulty(scores: &mut Vec<(&Hash, Difficulty)>) { scores.sort_by(|(a_hash, a), (b_hash, b)| { if a != b { b.cmp(a) diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index fd576d74..f0f4b7d4 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use xelis_common::{ crypto::{key::PublicKey, hash::Hash}, transaction::Transaction, - block::{Block, BlockHeader}, account::VersionedBalance, immutable::Immutable, network::Network, + block::{Block, BlockHeader, Difficulty}, account::VersionedBalance, immutable::Immutable, network::Network, }; use crate::core::error::BlockchainError; @@ -18,8 +18,8 @@ pub type Tips = HashSet; pub trait DifficultyProvider { async fn get_height_for_block_hash(&self, hash: &Hash) -> Result; async fn get_timestamp_for_block_hash(&self, hash: &Hash) -> Result; - async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result; - async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result; + async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result; + async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result; async fn get_past_blocks_for_block_hash(&self, hash: &Hash) -> Result>, BlockchainError>; async fn get_block_header_by_hash(&self, hash: &Hash) -> Result, BlockchainError>; } @@ -69,7 +69,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { // TODO delete t fn count_transactions(&self) -> usize; async fn has_transaction(&self, hash: &Hash) -> Result; - async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: u64, hash: Hash) -> Result<(), BlockchainError>; + async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: Difficulty, hash: Hash) -> Result<(), BlockchainError>; async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>, HashSet), BlockchainError>; fn has_blocks(&self) -> bool; fn count_blocks(&self) -> usize; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 9a634836..8685e63b 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -5,7 +5,7 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hash, hash}}, immutable::Immutable, transaction::Transaction, - block::{BlockHeader, Block}, account::VersionedBalance, network::Network, + block::{BlockHeader, Block, Difficulty}, account::VersionedBalance, network::Network, }; use std::{ collections::HashSet, @@ -608,7 +608,7 @@ impl Storage for SledStorage { self.transactions.len() } - async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: u64, hash: Hash) -> Result<(), BlockchainError> { + async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: Difficulty, hash: Hash) -> Result<(), BlockchainError> { debug!("Storing new {} with hash: {}, difficulty: {}", block, hash, difficulty); // Store transactions @@ -697,8 +697,8 @@ impl Storage for SledStorage { miners.insert(block.get_miner().clone()); - let _: u64 = self.delete_data_no_arc(&self.supply, &None, &hash).await?; - let _: u64 = self.delete_data_no_arc(&self.difficulty, &None, &hash).await?; + let _: Difficulty = self.delete_data_no_arc(&self.supply, &None, &hash).await?; + let _: Difficulty = self.delete_data_no_arc(&self.difficulty, &None, &hash).await?; trace!("Deleting cumulative difficulty"); let cumulative_difficulty: u64 = self.delete_data_no_arc(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; diff --git a/xelis_daemon/src/p2p/chain_validator.rs b/xelis_daemon/src/p2p/chain_validator.rs index 55161ffd..5b8f9a08 100644 --- a/xelis_daemon/src/p2p/chain_validator.rs +++ b/xelis_daemon/src/p2p/chain_validator.rs @@ -1,13 +1,13 @@ use std::{collections::{HashMap, HashSet}, sync::Arc}; use async_trait::async_trait; -use xelis_common::{crypto::hash::Hash, block::BlockHeader, config::TIPS_LIMIT}; +use xelis_common::{crypto::hash::Hash, block::{BlockHeader, Difficulty}, config::TIPS_LIMIT}; use crate::core::{error::BlockchainError, blockchain::Blockchain, storage::{DifficultyProvider, Storage}}; use log::{error, debug}; struct Data { header: Arc, - difficulty: u64, - cumulative_difficulty: u64 + difficulty: Difficulty, + cumulative_difficulty: Difficulty } pub struct ChainValidator { @@ -94,11 +94,11 @@ impl DifficultyProvider for ChainValidator { Ok(self.get_data(hash)?.header.get_timestamp()) } - async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result { + async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result { Ok(self.get_data(hash)?.difficulty) } - async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result { + async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result { Ok(self.get_data(hash)?.cumulative_difficulty) } diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 8f3a8fef..5f421b44 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -60,7 +60,7 @@ impl Miner { impl Display for Miner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Miner[address={}, name={}]", self.key.to_address(), self.name) + write!(f, "Miner[address={}, name={}]", self.key, self.name) } } diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 45d39c8d..66e4b149 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -413,11 +413,11 @@ impl Wallet { } pub fn get_address(&self) -> Address<'_> { - self.keypair.get_public_key().to_address() + self.keypair.get_public_key().to_address(self.get_network().is_mainnet()) } pub fn get_address_with(&self, data: DataType) -> Address<'_> { - self.keypair.get_public_key().to_address_with(data) + self.keypair.get_public_key().to_address_with(self.get_network().is_mainnet(), data) } pub fn get_seed(&self, language_index: usize) -> Result { From 562862704b41de673c514dae5f50b43063903295 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 22:28:20 +0200 Subject: [PATCH 23/74] daemon: use Difficulty type in sled.rs --- xelis_daemon/src/core/storage/sled.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 8685e63b..2d87aef8 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -47,7 +47,7 @@ pub struct SledStorage { past_blocks_cache: Option>>>>, // previous blocks saved at each new block topo_by_hash_cache: Option>>, hash_at_topo_cache: Option>>, - cumulative_difficulty_cache: Option>>, + cumulative_difficulty_cache: Option>>, assets_cache: Option>>, nonces_cache: Option>>, balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute @@ -251,12 +251,12 @@ impl DifficultyProvider for SledStorage { Ok(block.get_timestamp()) } - async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result { + async fn get_difficulty_for_block_hash(&self, hash: &Hash) -> Result { trace!("get difficulty for hash {}", hash); self.load_from_disk(&self.difficulty, hash.as_bytes()) } - async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result { + async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result { trace!("get cumulative difficulty for hash {}", hash); self.get_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, hash).await } @@ -966,12 +966,12 @@ impl Storage for SledStorage { } fn set_supply_for_block_hash(&mut self, hash: &Hash, supply: u64) -> Result<(), BlockchainError> { - trace!("set difficulty for hash {}", hash); + trace!("set supply for hash {}", hash); self.supply.insert(hash.as_bytes(), &supply.to_be_bytes())?; Ok(()) } - async fn set_cumulative_difficulty_for_block_hash(&mut self, hash: &Hash, cumulative_difficulty: u64) -> Result<(), BlockchainError> { + async fn set_cumulative_difficulty_for_block_hash(&mut self, hash: &Hash, cumulative_difficulty: Difficulty) -> Result<(), BlockchainError> { trace!("set cumulative difficulty for hash {}", hash); self.cumulative_difficulty.insert(hash.as_bytes(), cumulative_difficulty.to_bytes())?; Ok(()) From 15301c827508eb9f7bf938ea64847c072d65a463 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 22:51:15 +0200 Subject: [PATCH 24/74] daemon: use base_height for cache in generate_full_order --- xelis_daemon/src/core/blockchain.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index fb3ff71c..99b50fe5 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -612,12 +612,13 @@ impl Blockchain { // base represents the block hash of a block already ordered and in stable height // the full order is re generated each time a new block is added based on new TIPS // first hash in order is the base hash + // base_height is only used for the cache key #[async_recursion] - async fn generate_full_order(&self, storage: &S, hash: &Hash, base: &Hash, base_topo_height: u64) -> Result, BlockchainError> { + async fn generate_full_order(&self, storage: &S, hash: &Hash, base: &Hash, base_height: u64, base_topo_height: u64) -> Result, BlockchainError> { let block_tips = { let mut cache = self.full_order_cache.lock().await; // check if its present in the cache first - if let Some(value) = cache.get(&(hash.clone(), base.clone(), base_topo_height)) { + if let Some(value) = cache.get(&(hash.clone(), base.clone(), base_height)) { trace!("Found full order in cache: {}", value.iter().map(|h| h.to_string()).collect::>().join(", ")); return Ok(value.clone()) } @@ -626,34 +627,37 @@ impl Blockchain { // only the genesis block can have 0 tips, returns its hash if block_tips.len() == 0 { let result = vec![GENESIS_BLOCK_HASH.clone()]; - cache.put((hash.clone(), base.clone(), base_topo_height), result.clone()); + cache.put((hash.clone(), base.clone(), base_height), result.clone()); return Ok(result) } // if the block has been previously ordered, return it as base if hash == base { let result = vec![base.clone()]; - cache.put((hash.clone(), base.clone(), base_topo_height), result.clone()); + cache.put((hash.clone(), base.clone(), base_height), result.clone()); return Ok(result) } block_tips }; - let mut order: Vec = Vec::new(); let mut scores = Vec::new(); for hash in block_tips.iter() { let is_ordered = storage.is_block_topological_ordered(hash).await; if !is_ordered || (is_ordered && storage.get_topo_height_for_hash(hash).await? >= base_topo_height) { let diff = storage.get_cumulative_difficulty_for_block_hash(hash).await?; scores.push((hash, diff)); + } else { + debug!("Block {} is skipped in generate_full_order, is ordered = {}, base topo height = {}", hash, is_ordered, base_topo_height); } } blockdag::sort_descending_by_cumulative_difficulty(&mut scores); + // let's build the right order now + let mut order: Vec = Vec::new(); for (hash, _) in scores { - let sub_order = self.generate_full_order(storage, hash, base, base_topo_height).await?; + let sub_order = self.generate_full_order(storage, hash, base, base_height, base_topo_height).await?; for order_hash in sub_order { if !order.contains(&order_hash) { order.push(order_hash); @@ -665,7 +669,7 @@ impl Blockchain { // save in cache final result let mut cache = self.full_order_cache.lock().await; - cache.put((hash.clone(), base.clone(), base_topo_height), order.clone()); + cache.put((hash.clone(), base.clone(), base_height), order.clone()); Ok(order) } @@ -1111,7 +1115,7 @@ impl Blockchain { let base_topo_height = storage.get_topo_height_for_hash(&base_hash).await?; // generate a full order until base_topo_height - let mut full_order = self.generate_full_order(storage, &best_tip, &base_hash, base_topo_height).await?; + let mut full_order = self.generate_full_order(storage, &best_tip, &base_hash, base_height, base_topo_height).await?; debug!("Generated full order size: {}, with base ({}) topo height: {}", full_order.len(), base_hash, base_topo_height); // rpc server lock From 19dcaafa0271e032c5d52542f24fa3c8b9cc292c Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 23:01:04 +0200 Subject: [PATCH 25/74] daemon: processing time for a new block --- xelis_daemon/src/core/blockchain.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 99b50fe5..603eb8d9 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -13,7 +13,7 @@ use xelis_common::{ }; use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; use super::storage::{Storage, DifficultyProvider}; -use std::{sync::atomic::{Ordering, AtomicU64}, collections::hash_map::Entry, time::Duration, borrow::Cow}; +use std::{sync::atomic::{Ordering, AtomicU64}, collections::hash_map::Entry, time::{Duration, Instant}, borrow::Cow}; use std::collections::{HashMap, HashSet}; use async_recursion::async_recursion; use tokio::{time::interval, sync::{Mutex, RwLock}}; @@ -883,7 +883,9 @@ impl Blockchain { } pub async fn add_new_block_for_storage(&self, storage: &mut S, block: Block, broadcast: bool) -> Result<(), BlockchainError> { + let start = Instant::now(); let block_hash = block.hash(); + debug!("Add new block {}", block_hash); if storage.has_block(&block_hash).await? { error!("Block is already in chain!"); return Err(BlockchainError::AlreadyInChain) @@ -1328,7 +1330,7 @@ impl Blockchain { debug!("Adding new '{}' {} at topoheight {}", block_hash, block, topoheight); } else { // this means the block is considered as orphaned yet - warn!("Adding new '{}' {} with no topoheight (not ordered)!", block_hash, block); + debug!("Adding new '{}' {} with no topoheight (not ordered)!", block_hash, block); } // update stable height and difficulty in cache @@ -1352,6 +1354,8 @@ impl Blockchain { } } + debug!("Processed block {} in {}ms", block_hash, start.elapsed().as_millis()); + // broadcast to websocket new block if let Some(rpc) = rpc_server.as_ref() { // if we have a getwork server, notify miners From 2facaca0249a613266fc080431035c766ca4a37a Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 23 Apr 2023 23:24:13 +0200 Subject: [PATCH 26/74] daemon: send BlockRejected response when error occured while accepting miner block --- xelis_daemon/src/rpc/getwork_server.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 5f421b44..5151914d 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -84,7 +84,7 @@ impl StreamHandler> for GetWorkWebSoc fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { match msg { Ok(Message::Text(text)) => { - debug!("New message incoming from miner"); + debug!("New message incoming from miner: {}", text); let address = ctx.address(); let template: SubmitBlockParams = match serde_json::from_slice(text.as_bytes()) { Ok(template) => template, @@ -162,8 +162,8 @@ impl GetWorkServer { // then, send it async fn send_new_job(self: Arc, addr: Addr>, key: PublicKey) -> Result<(), InternalRpcError> { let (mut job, height, difficulty) = { - let mut mining_jobs = self.mining_jobs.lock().await; let mut hash = self.last_header_hash.lock().await; + let mut mining_jobs = self.mining_jobs.lock().await; let (job, height, difficulty); if let Some(hash) = hash.as_ref() { let (header, diff) = mining_jobs.peek(hash).ok_or_else(|| { @@ -261,7 +261,13 @@ impl GetWorkServer { // if its block is rejected, resend him the job pub async fn handle_block_for(self: Arc, addr: Addr>, template: SubmitBlockParams) -> Result<(), InternalRpcError> { let job = BlockMiner::from_hex(template.block_template)?; - let response = self.accept_miner_job(job).await?; + let response = match self.accept_miner_job(job).await { + Ok(response) => response, + Err(e) => { + debug!("Error while accepting miner job: {}", e); + Response::BlockRejected + } + }; tokio::spawn(async move { let resend_job = response == Response::BlockRejected; From c7c1207f61974abf8242885f2c3fade851f5cf14 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 24 Apr 2023 17:01:38 +0200 Subject: [PATCH 27/74] common: improve invalid prefix error bech32 --- xelis_common/src/crypto/address.rs | 9 +++++++-- xelis_common/src/crypto/bech32.rs | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/crypto/address.rs b/xelis_common/src/crypto/address.rs index 45159ebb..053808bd 100644 --- a/xelis_common/src/crypto/address.rs +++ b/xelis_common/src/crypto/address.rs @@ -77,7 +77,7 @@ impl<'a> Address<'a> { let (hrp, decoded) = decode(address)?; // check that hrp is valid one if hrp != PREFIX_ADDRESS && hrp != TESTNET_PREFIX_ADDRESS { - return Err(Bech32Error::InvalidPrefix(hrp).into()) + return Err(Bech32Error::InvalidPrefix(hrp, format!("{} or {}", PREFIX_ADDRESS, TESTNET_PREFIX_ADDRESS)).into()) } let bits = convert_bits(&decoded, 5, 8, false)?; @@ -86,7 +86,12 @@ impl<'a> Address<'a> { // now check that the hrp decoded is the one for the network state if (addr.is_mainnet() && hrp != PREFIX_ADDRESS) || (!addr.is_mainnet() && hrp != TESTNET_PREFIX_ADDRESS) { - return Err(Bech32Error::InvalidPrefix(hrp).into()) + let expected = if addr.is_mainnet() { + PREFIX_ADDRESS + } else { + TESTNET_PREFIX_ADDRESS + }; + return Err(Bech32Error::InvalidPrefix(hrp, expected.to_owned()).into()) } Ok(addr) diff --git a/xelis_common/src/crypto/bech32.rs b/xelis_common/src/crypto/bech32.rs index 8c9d426f..89e716b9 100644 --- a/xelis_common/src/crypto/bech32.rs +++ b/xelis_common/src/crypto/bech32.rs @@ -28,8 +28,8 @@ pub enum Bech32Error { Separator1InvalidPosition(usize), // position #[error(transparent)] InvalidUTF8Sequence(#[from] FromUtf8Error), // error returned by 'String::from_utf8' - #[error("Invalid prefix, got: {}", _0)] - InvalidPrefix(String), + #[error("Invalid prefix, got: {}, expected: {}", _0, _1)] + InvalidPrefix(String, String), #[error("Invalid checksum")] InvalidChecksum, #[error("Invalid index '{}': not found", _0)] From f8574fb3611928799f637ddd4eb83689938134e3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 24 Apr 2023 21:27:44 +0200 Subject: [PATCH 28/74] wallet: add burn command --- xelis_wallet/src/main.rs | 51 +++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 4e3afb36..4c833d1a 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -8,7 +8,7 @@ use clap::Parser; use xelis_common::{config::{ DEFAULT_DAEMON_ADDRESS, VERSION, XELIS_ASSET -}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::TransactionType, globals::{format_coin, set_network_to}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; +}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, globals::{format_coin, set_network_to}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; use xelis_wallet::wallet::Wallet; #[cfg(feature = "rpc_server")] @@ -120,7 +120,8 @@ async fn main() -> Result<()> { async fn run_prompt(prompt: Arc, wallet: Arc, network: Network) -> Result<()> { let mut command_manager: CommandManager> = CommandManager::default(); command_manager.add_command(Command::with_required_arguments("set_password", "Set a new password to open your wallet", vec![Arg::new("old_password", ArgType::String), Arg::new("password", ArgType::String)], None, CommandHandler::Async(async_handler!(set_password)))); - command_manager.add_command(Command::with_required_arguments("transfer", "Send asset to a specified address", vec![Arg::new("address", ArgType::String), Arg::new("amount", ArgType::Number)], Some(Arg::new("asset", ArgType::String)), CommandHandler::Async(async_handler!(transfer)))); + command_manager.add_command(Command::with_required_arguments("transfer", "Send asset to a specified address", vec![Arg::new("address", ArgType::String), Arg::new("amount", ArgType::Number)], Some(Arg::new("asset", ArgType::Hash)), CommandHandler::Async(async_handler!(transfer)))); + command_manager.add_command(Command::with_required_arguments("burn", "Burn amount of asset", vec![Arg::new("asset", ArgType::Hash), Arg::new("amount", ArgType::Number)], None, CommandHandler::Async(async_handler!(burn)))); command_manager.add_command(Command::new("display_address", "Show your wallet address", None, CommandHandler::Async(async_handler!(display_address)))); command_manager.add_command(Command::new("balance", "Show your current balance", Some(Arg::new("asset", ArgType::String)), CommandHandler::Async(async_handler!(balance)))); command_manager.add_command(Command::new("history", "Show all your transactions", Some(Arg::new("page", ArgType::Number)), CommandHandler::Async(async_handler!(history)))); @@ -199,8 +200,11 @@ async fn transfer(manager: &CommandManager>, mut arguments: Argument XELIS_ASSET // default asset selected is XELIS }; + manager.message(format!("Sending {} of {} to {}", format_coin(amount), asset, address.to_string())); + let wallet = manager.get_data()?; manager.message("Building transaction..."); + let (key, address_type) = address.split(); let extra_data = match address_type { AddressType::Normal => None, @@ -213,20 +217,21 @@ async fn transfer(manager: &CommandManager>, mut arguments: Argument wallet.create_transaction(&storage, TransactionType::Transfer(vec![transfer]), FeeBuilder::Multiplier(1f64))? }; - let tx_hash = tx.hash(); - manager.message(format!("Transaction hash: {}", tx_hash)); + broadcast_tx(wallet, manager, tx).await; + Ok(()) +} - if wallet.is_online().await { - if let Err(e) = wallet.submit_transaction(&tx).await { - manager.error(format!("Couldn't submit transaction: {}", e)); - } else { - manager.message("Transaction submitted successfully!"); - } - } else { - manager.warn("You are currently offline, transaction cannot be send automatically. Please send it manually to the network."); - manager.message(format!("Transaction Hex: {}", tx.to_hex())); - } +async fn burn(manager: &CommandManager>, mut arguments: ArgumentManager) -> Result<(), CommandError> { + let amount = arguments.get_value("amount")?.to_number()?; + let asset = arguments.get_value("asset")?.to_hash()?; + let wallet = manager.get_data()?; + manager.message(format!("Burning {} of {}", format_coin(amount), asset)); + let tx = { + let storage = wallet.get_storage().read().await; + wallet.create_transaction(&storage, TransactionType::Burn(asset, amount), FeeBuilder::Multiplier(1f64))? + }; + broadcast_tx(wallet, manager, tx).await; Ok(()) } @@ -357,4 +362,22 @@ async fn nonce(manager: &CommandManager>, _: ArgumentManager) -> Res let nonce = wallet.get_nonce().await; manager.message(format!("Nonce: {}", nonce)); Ok(()) +} + +// broadcast tx if possible +async fn broadcast_tx(wallet: &Wallet, manager: &CommandManager>, tx: Transaction) { + let tx_hash = tx.hash(); + manager.message(format!("Transaction hash: {}", tx_hash)); + + if wallet.is_online().await { + if let Err(e) = wallet.submit_transaction(&tx).await { + manager.error(format!("Couldn't submit transaction: {}", e)); + manager.error("You can try to rescan your balance with the command 'rescan'"); + } else { + manager.message("Transaction submitted successfully!"); + } + } else { + manager.warn("You are currently offline, transaction cannot be send automatically. Please send it manually to the network."); + manager.message(format!("Transaction in hex format: {}", tx.to_hex())); + } } \ No newline at end of file From 2bbe197aedf4f60fe35d60bf85e9a84029cc6dd5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 24 Apr 2023 21:32:01 +0200 Subject: [PATCH 29/74] daemon: fix mistake in TX broadcast --- xelis_daemon/src/p2p/packet/object.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/packet/object.rs b/xelis_daemon/src/p2p/packet/object.rs index 6f465db3..1977b257 100644 --- a/xelis_daemon/src/p2p/packet/object.rs +++ b/xelis_daemon/src/p2p/packet/object.rs @@ -54,7 +54,8 @@ impl Serializer for ObjectRequest { let id = reader.read_u8()?; Ok(match id { 0 => ObjectRequest::Block(reader.read_hash()?), - 1 => ObjectRequest::Transaction(reader.read_hash()?), + 1 => ObjectRequest::BlockHeader(reader.read_hash()?), + 2 => ObjectRequest::Transaction(reader.read_hash()?), _ => return Err(ReaderError::InvalidValue) }) } From b88868382ed6277068d37c85bae7bec019349600 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 24 Apr 2023 21:58:09 +0200 Subject: [PATCH 30/74] pretty print of Burn transaction --- xelis_common/src/transaction/mod.rs | 10 +++++++--- xelis_daemon/src/core/blockchain.rs | 6 +++--- xelis_wallet/src/main.rs | 2 +- xelis_wallet/src/network_handler.rs | 2 +- xelis_wallet/src/transaction_builder.rs | 2 +- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/xelis_common/src/transaction/mod.rs b/xelis_common/src/transaction/mod.rs index be25f2a4..356da108 100644 --- a/xelis_common/src/transaction/mod.rs +++ b/xelis_common/src/transaction/mod.rs @@ -26,9 +26,13 @@ pub struct SmartContractCall { // Smart Contract system is not yet available but types are already there #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub enum TransactionType { + #[serde(rename = "transfers")] Transfer(Vec), - Burn(Hash, u64), + #[serde(rename = "burn")] + Burn { asset: Hash, amount: u64 }, + #[serde(rename = "call_contract")] CallContract(SmartContractCall), + #[serde(rename = "deploy_contract")] DeployContract(String), // represent the code to deploy } @@ -44,7 +48,7 @@ pub struct Transaction { impl Serializer for TransactionType { fn write(&self, writer: &mut Writer) { match self { - TransactionType::Burn(asset, amount) => { + TransactionType::Burn { asset, amount } => { writer.write_u8(0); writer.write_hash(asset); writer.write_u64(amount); @@ -92,7 +96,7 @@ impl Serializer for TransactionType { 0 => { let asset = reader.read_hash()?; let amount = reader.read_u64()?; - TransactionType::Burn(asset, amount) + TransactionType::Burn { asset, amount } }, 1 => { // Normal let txs_count = reader.read_u8()?; diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 603eb8d9..3c9cd8f1 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1637,7 +1637,7 @@ impl Blockchain { return Err(BlockchainError::InvalidTransactionExtraDataTooBig(EXTRA_DATA_LIMIT_SIZE, extra_data_size)) } } - TransactionType::Burn(asset, amount) => { + TransactionType::Burn { asset, amount } => { if *amount == 0 { error!("Burn Tx {} has no value to burn", hash); return Err(BlockchainError::NoValueForBurn) @@ -1747,7 +1747,7 @@ impl Blockchain { total_deducted.insert(&XELIS_ASSET, transaction.get_fee()); match transaction.get_data() { - TransactionType::Burn(asset, amount) => { + TransactionType::Burn { asset, amount } => { *total_deducted.entry(asset).or_insert(0) += amount; } TransactionType::Transfer(txs) => { @@ -1797,7 +1797,7 @@ impl Blockchain { } } }, - TransactionType::Burn(asset, _) => { + TransactionType::Burn { asset, amount: _ } => { if !assets.contains(asset) { assets.insert(asset); } diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 4c833d1a..651ffa46 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -229,7 +229,7 @@ async fn burn(manager: &CommandManager>, mut arguments: ArgumentMana let tx = { let storage = wallet.get_storage().read().await; - wallet.create_transaction(&storage, TransactionType::Burn(asset, amount), FeeBuilder::Multiplier(1f64))? + wallet.create_transaction(&storage, TransactionType::Burn { asset, amount }, FeeBuilder::Multiplier(1f64))? }; broadcast_tx(wallet, manager, tx).await; Ok(()) diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 60504af9..6c4c2ff4 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -139,7 +139,7 @@ impl NetworkHandler { let nonce = if is_owner { Some(tx.get_nonce()) } else { None }; let (owner, data) = tx.consume(); let entry: Option = match data { - TransactionType::Burn(asset, amount) => { + TransactionType::Burn { asset, amount } => { if is_owner { Some(EntryData::Burn { asset, amount }) } else { diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 0218d69c..54a67b93 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -47,7 +47,7 @@ impl TransactionBuilder { pub fn total_spent(&self) -> HashMap<&Hash, u64> { let mut total_spent = HashMap::new(); match &self.data { - TransactionType::Burn(asset, amount) => { + TransactionType::Burn { asset, amount } => { total_spent.insert(asset, *amount); }, TransactionType::CallContract(call) => { From 2deac30e2f346c192d11bc33d219c4b2965f33ac Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 24 Apr 2023 22:26:16 +0200 Subject: [PATCH 31/74] daemon: use Difficulty type --- xelis_common/src/config.rs | 2 +- xelis_common/src/difficulty.rs | 2 +- xelis_daemon/src/core/blockchain.rs | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index da142fe6..3a012f22 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -13,7 +13,7 @@ pub const XELIS_ASSET: Hash = Hash::zero(); pub const SIDE_BLOCK_REWARD_PERCENT: u64 = 30; // only 30% of reward for side block pub const BLOCK_TIME: u64 = 15; // Block Time in seconds pub const BLOCK_TIME_MILLIS: u64 = BLOCK_TIME * 1000; // Block Time in milliseconds -pub const MINIMUM_DIFFICULTY: Difficulty = BLOCK_TIME_MILLIS * 10; +pub const MINIMUM_DIFFICULTY: Difficulty = BLOCK_TIME_MILLIS as Difficulty * 10; pub const GENESIS_BLOCK_DIFFICULTY: Difficulty = 1; pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); // 1.25 MB pub const FEE_PER_KB: u64 = 1000; // 0.01000 XLS per KB diff --git a/xelis_common/src/difficulty.rs b/xelis_common/src/difficulty.rs index 5f1a69c4..a8abaf9e 100644 --- a/xelis_common/src/difficulty.rs +++ b/xelis_common/src/difficulty.rs @@ -48,7 +48,7 @@ pub fn calculate_difficulty(parent_timestamp: u128, new_timestamp: u128, previou } let easypart = (E.powf((1f64 - solve_time as f64 / BLOCK_TIME_MILLIS as f64) / M) * 10000f64) as i64; - let diff = ((previous_difficulty as i64 * easypart) / 10000) as u64; + let diff = ((previous_difficulty as i64 * easypart) / 10000) as Difficulty; trace!("Difficulty calculated, easypart: {}, previous diff: {}, diff: {}", easypart, previous_difficulty, diff); if diff < MINIMUM_DIFFICULTY { diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3c9cd8f1..a33bcc12 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -79,7 +79,7 @@ pub struct Blockchain { // key is (tip hash, tip height) while value is (base hash, base height) tip_base_cache: Mutex>, // tip work score is used to determine the best tip based on a block, tip base ands a base height - tip_work_score_cache: Mutex, u64)>>, + tip_work_score_cache: Mutex, Difficulty)>>, full_order_cache: Mutex>>, } @@ -537,7 +537,7 @@ impl Blockchain { } #[async_recursion] // TODO no recursion - async fn find_tip_work_score_internal<'a>(&self, storage: &S, map: &mut HashMap, hash: &'a Hash, base_topoheight: u64, base_height: u64) -> Result<(), BlockchainError> { + async fn find_tip_work_score_internal<'a>(&self, storage: &S, map: &mut HashMap, hash: &'a Hash, base_topoheight: u64, base_height: u64) -> Result<(), BlockchainError> { let tips = storage.get_past_blocks_for_block_hash(hash).await?; for hash in tips.iter() { if !map.contains_key(hash) { @@ -554,7 +554,7 @@ impl Blockchain { } // find the sum of work done - async fn find_tip_work_score(&self, storage: &S, hash: &Hash, base: &Hash, base_height: u64) -> Result<(HashSet, u64), BlockchainError> { + async fn find_tip_work_score(&self, storage: &S, hash: &Hash, base: &Hash, base_height: u64) -> Result<(HashSet, Difficulty), BlockchainError> { let mut cache = self.tip_work_score_cache.lock().await; if let Some(value) = cache.get(&(hash.clone(), base.clone(), base_height)) { trace!("Found tip work score in cache: set [{}], height: {}", value.0.iter().map(|h| h.to_string()).collect::>().join(", "), value.1); @@ -562,7 +562,7 @@ impl Blockchain { } let block = storage.get_block_header_by_hash(hash).await?; - let mut map: HashMap = HashMap::new(); + let mut map: HashMap = HashMap::new(); let base_topoheight = storage.get_topo_height_for_hash(base).await?; for hash in block.get_tips() { if !map.contains_key(hash) { From 47608ac062f83db35e664e7d6430533690bf1ba1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 25 Apr 2023 17:19:31 +0200 Subject: [PATCH 32/74] daemon: rework mempool --- xelis_daemon/src/core/blockchain.rs | 111 +++++++-------- xelis_daemon/src/core/mempool.rs | 211 ++++++++++++++++++++-------- xelis_daemon/src/rpc/rpc.rs | 5 +- 3 files changed, 203 insertions(+), 124 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index a33bcc12..97ff6d46 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -6,7 +6,7 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hashable, Hash}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, - globals::get_current_timestamp, + globals::{get_current_timestamp, format_coin}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType}, DataHash}, network::Network @@ -755,39 +755,31 @@ impl Blockchain { { // get the highest nonce for this owner let owner = tx.get_owner(); - let mut nonces = HashMap::new(); - let mut balances = HashMap::new(); - - // list of potential TXs from same owner - let mut owner_txs = Vec::new(); - let mempool_txs = mempool.get_txs(); - for (hash, tx) in mempool_txs { - if tx.get_owner() == owner { - let nonce = nonces.entry(tx.get_owner()).or_insert(0); - // if the tx is in mempool, then the nonce should be valid. - if *nonce < tx.get_nonce() { - *nonce = tx.get_nonce(); - } - owner_txs.push((hash, tx)); + // get the highest nonce available + // if presents, it means we have at least one tx from this owner in mempool + if let Some(nonce) = mempool.get_cached_nonce(owner) { + // check that the nonce is in the range + if !(tx.get_nonce() <= nonce.get_max() + 1 && tx.get_nonce() >= nonce.get_min()) { + return Err(BlockchainError::InvalidTxNonce) } - } - // if the nonce of tx is N + 1, we increment it to let it pass - // so we have multiple TXs from same owner in the same block - if let Some(nonce) = nonces.get_mut(owner) { - if *nonce + 1 == tx.get_nonce() { - *nonce += 1; - // compute balances of previous pending TXs - for (hash, tx) in owner_txs { - if tx.get_owner() == owner { - // we also need to pre-compute the balance of the owner - self.verify_transaction_with_hash(storage, tx, &hash, &mut balances, None, true).await?; - } - } + // compute balances of previous pending TXs + let txs_hashes = nonce.get_txs(); + let mut owner_txs = Vec::with_capacity(txs_hashes.len()); + for hash in txs_hashes { + let tx = mempool.get_tx(hash)?; + owner_txs.push(tx); } - } - self.verify_transaction_with_hash(&storage, &tx, &hash, &mut balances, Some(&mut nonces), false).await? + // we need to do it in two times because of the constraint of lifetime on &tx + let mut balances = HashMap::new(); + for tx in &owner_txs { + self.verify_transaction_with_hash(storage, tx, &hash, &mut balances, None, true).await?; + } + } else { + let mut balances = HashMap::new(); + self.verify_transaction_with_hash(&storage, &tx, &hash, &mut balances, None, false).await? + } } if broadcast { @@ -838,29 +830,36 @@ impl Blockchain { let mempool = self.mempool.read().await; let txs = mempool.get_sorted_txs(); - let mut tx_size = 0; + let mut total_txs_size = 0; let mut nonces: HashMap<&PublicKey, u64> = HashMap::new(); - for tx in txs { - if block.size() + tx_size + tx.get_size() > MAX_BLOCK_SIZE { - break; - } - let transaction = mempool.view_tx(tx.get_hash())?; - let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { - *nonce - } else { - let nonce = storage.get_nonce(transaction.get_owner()).await?; - nonces.insert(transaction.get_owner(), nonce); - nonce - }; + // txs are sorted in descending order thanks to Reverse + 'main: for (fee, hashes) in txs { + for hash in hashes { + let sorted_tx = mempool.get_sorted_tx(hash)?; + if block.size() + total_txs_size + sorted_tx.get_size() > MAX_BLOCK_SIZE { + break 'main; + } - if account_nonce < transaction.get_nonce() { - debug!("Skipping {} with {} fees because another TX should be selected first due to nonce", tx.get_hash(), tx.get_fee()); - } else { - // TODO no clone - block.txs_hashes.push(tx.get_hash().clone()); - tx_size += tx.get_size(); - *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + let transaction = sorted_tx.get_tx(); + let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { + *nonce + } else { + let nonce = storage.get_nonce(transaction.get_owner()).await?; + nonces.insert(transaction.get_owner(), nonce); + nonce + }; + + if account_nonce < transaction.get_nonce() { + debug!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); + } else { + debug!("Selected {} for mining", hash); + // TODO no clone + block.txs_hashes.push(hash.as_ref().clone()); + total_txs_size += sorted_tx.get_size(); + // we use unwrap because above we insert it + *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + } } } Ok(block) @@ -1094,17 +1093,7 @@ impl Blockchain { cumulative_difficulty }; - // Delete all txs from mempool let mut mempool = self.mempool.write().await; - for hash in block.get_txs_hashes() { // remove all txs present in mempool - match mempool.remove_tx(hash) { - Ok(_) => { - debug!("Removing tx hash '{}' from mempool", hash); - }, - Err(_) => {} - }; - } - let mut tips = storage.get_tips().await?; tips.insert(block_hash.clone()); for hash in block.get_tips() { @@ -1393,7 +1382,7 @@ impl Blockchain { } // Clean all old txs - mempool.clean_up(storage, nonces).await; + mempool.clean_up(nonces).await; Ok(()) } diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index aa48788b..ad59f33c 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -1,8 +1,7 @@ -use super::storage::Storage; use super::error::BlockchainError; -use std::collections::HashMap; +use std::cmp::Reverse; +use std::collections::{HashMap, BTreeMap, HashSet}; use std::sync::Arc; -use log::warn; use xelis_common::{ crypto::{ hash::Hash, @@ -14,43 +13,69 @@ use xelis_common::{ #[derive(serde::Serialize)] pub struct SortedTx { - hash: Hash, + tx: Arc, fee: u64, size: usize } +#[derive(serde::Serialize)] +pub struct NonceCache { + min: u64, + max: u64, + txs: HashSet>, +} + #[derive(serde::Serialize)] pub struct Mempool { - txs: HashMap>, - txs_sorted: Vec, + // store all txs waiting to be included in a block + txs: HashMap, SortedTx>, + // store all sender's nonce for faster finding + nonces: HashMap, + // binary tree map for sorted txs hash by fees + // keys represents fees, while value represents all txs hash + sorted_txs: BTreeMap, HashSet>> } impl Mempool { pub fn new() -> Self { Mempool { txs: HashMap::new(), - txs_sorted: Vec::new() + nonces: HashMap::new(), + sorted_txs: BTreeMap::new() } } // All checks are made in Blockchain before calling this function pub fn add_tx(&mut self, hash: Hash, tx: Arc) -> Result<(), BlockchainError> { + let hash = Arc::new(hash); + // update the cache for this owner + if let Some(cache) = self.nonces.get_mut(tx.get_owner()) { + cache.update(hash.clone(), tx.get_nonce()); + } else { + let mut txs = HashSet::new(); + txs.insert(hash.clone()); + + let cache = NonceCache { + max: tx.get_nonce(), + min: tx.get_nonce(), + txs + }; + self.nonces.insert(tx.get_owner().clone(), cache); + } + let sorted_tx = SortedTx { - hash: hash.clone(), fee: tx.get_fee(), - size: tx.size() + size: tx.size(), + tx }; - let mut index = 0; - while index < self.txs_sorted.len() { // TODO Optimize - if self.txs_sorted[index].fee < sorted_tx.fee { - break; - } - index += 1; - } + let entry = self.sorted_txs.entry(Reverse(sorted_tx.get_fee())).or_insert_with(HashSet::new); + // add the tx hash in sorted txs + entry.insert(hash.clone()); + + // insert in map + self.txs.insert(hash, sorted_tx); - self.txs_sorted.insert(index, sorted_tx); - self.txs.insert(hash, tx); Ok(()) } @@ -58,82 +83,109 @@ impl Mempool { self.txs.contains_key(hash) } - pub fn remove_tx(&mut self, hash: &Hash) -> Result, BlockchainError> { - let tx = self.txs.remove(hash).ok_or_else(|| BlockchainError::TxNotFound(hash.clone()))?; - let index = self.txs_sorted.iter().position(|tx| tx.hash == *hash).ok_or_else(|| BlockchainError::TxNotFoundInSortedList(hash.clone()))?; // TODO Optimized - self.txs_sorted.remove(index); - + pub fn get_sorted_tx(&self, hash: &Hash) -> Result<&SortedTx, BlockchainError> { + let tx = self.txs.get(hash).ok_or_else(|| BlockchainError::TxNotFound(hash.clone()))?; Ok(tx) } pub fn get_tx(&self, hash: &Hash) -> Result, BlockchainError> { - let tx = self.txs.get(hash).ok_or_else(|| BlockchainError::TxNotFound(hash.clone()))?; - Ok(Arc::clone(tx)) + let tx = self.get_sorted_tx(hash)?; + Ok(Arc::clone(tx.get_tx())) } pub fn view_tx<'a>(&'a self, hash: &Hash) -> Result<&'a Arc, BlockchainError> { - if let Some(tx) = self.txs.get(hash) { - return Ok(tx) + if let Some(sorted_tx) = self.txs.get(hash) { + return Ok(sorted_tx.get_tx()) } Err(BlockchainError::TxNotFound(hash.clone())) } - pub fn get_sorted_txs(&self) -> &Vec { - &self.txs_sorted + pub fn get_txs(&self) -> &HashMap, SortedTx> { + &self.txs } - pub fn get_txs(&self) -> &HashMap> { - &self.txs + pub fn get_sorted_txs(&self) -> &BTreeMap, HashSet>> { + &self.sorted_txs + } + + pub fn get_cached_nonce(&self, key: &PublicKey) -> Option<&NonceCache> { + self.nonces.get(key) } pub fn size(&self) -> usize { - self.txs_sorted.len() + self.txs.len() } pub fn clear(&mut self) { self.txs.clear(); - self.txs_sorted.clear(); + self.sorted_txs.clear(); + self.nonces.clear(); } // delete all old txs not compatible anymore with current state of account - pub async fn clean_up(&mut self, storage: &S, nonces: HashMap) { - let txs_sorted = std::mem::replace(&mut self.txs_sorted, vec!()); - for sorted in txs_sorted { - let tx_nonce; - let account_nonce; - - if let Some(tx) = self.txs.get(&sorted.hash) { - tx_nonce = tx.get_nonce(); - account_nonce = if let Some(nonce) = nonces.get(tx.get_owner()) { - *nonce - } else { - match storage.get_nonce(tx.get_owner()).await { - Ok(nonce) => nonce, - Err(e) => { - warn!("Error while cleaning up tx {}: {}", sorted.hash, e); - // should not be possible, but in case - self.txs.remove(&sorted.hash); - continue; + pub async fn clean_up(&mut self, nonces: HashMap) { + for (key, nonce) in nonces { + let mut delete_cache = false; + // check if we have a TX in cache for this owner + if let Some(cache) = self.nonces.get_mut(&key) { + // check if the minimum nonce used is lower than new nonce + if cache.get_min() < nonce { + // txs hashes to delete + let mut hashes: Vec> = Vec::with_capacity(cache.txs.len()); + + // filter all txs hashes which are not found + // or where its nonce is smaller than the new nonce + // TODO when drain_filter is stable, use it (allow to get all hashes deleted) + cache.txs.retain(|hash| { + let delete = { + if let Some(sorted_tx) = self.txs.get(hash) { + if sorted_tx.get_tx().get_nonce() < nonce { + hashes.push(hash.clone()); + true + } else { + false + } + } else { + true + } + }; + !delete + }); + + if cache.txs.is_empty() { + delete_cache = true; + } + + // now delete all necessary txs + for hash in hashes { + if let Some(sorted_tx) = self.txs.remove(&hash) { + let fee_reverse = Reverse(sorted_tx.get_fee()); + let mut is_empty = false; + if let Some(hashes) = self.sorted_txs.get_mut(&fee_reverse) { + hashes.remove(&hash); + is_empty = hashes.is_empty(); + } + + // don't keep empty data + if is_empty { + self.sorted_txs.remove(&fee_reverse); + } } } - }; - } else { - continue; + } } - if tx_nonce >= account_nonce { - self.txs_sorted.push(sorted); - } else { - self.txs.remove(&sorted.hash); + if delete_cache { + self.nonces.remove(&key); } } } } impl SortedTx { - pub fn get_hash(&self) -> &Hash { - &self.hash + pub fn get_tx(&self) -> &Arc { + &self.tx } pub fn get_fee(&self) -> u64 { @@ -143,4 +195,43 @@ impl SortedTx { pub fn get_size(&self) -> usize { self.size } + + pub fn consume(self) -> Arc { + self.tx + } +} + +impl NonceCache { + pub fn get_min(&self) -> u64 { + self.min + } + + pub fn get_max(&self) -> u64 { + self.max + } + + pub fn get_txs(&self) -> &HashSet> { + &self.txs + } + + fn update(&mut self, hash: Arc, nonce: u64) { + self.add_tx(hash); + self.set_nonce(nonce); + } + + fn add_tx(&mut self, hash: Arc) { + self.txs.insert(hash); + } + + fn set_nonce(&mut self, nonce: u64) { + debug_assert!(self.min <= self.max); + + if nonce < self.min { + self.min = nonce; + } + + if nonce > self.max { + self.max = nonce; + } + } } \ No newline at end of file diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 96b092b7..f70eb8cf 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -341,9 +341,8 @@ async fn get_mempool(blockchain: Arc>, body: Value) -> let mempool = blockchain.get_mempool().read().await; let storage = blockchain.get_storage().read().await; let mut transactions: Vec = Vec::new(); - for tx in mempool.get_sorted_txs() { - let transaction = mempool.view_tx(tx.get_hash()).context("Error while retrieving TX from mempool")?; - transactions.push(get_transaction_response(&*storage, transaction, tx.get_hash()).await?); + for (hash, sorted_tx) in mempool.get_txs() { + transactions.push(get_transaction_response(&*storage, sorted_tx.get_tx(), hash).await?); } Ok(json!(transactions)) From 1509b03019c8323e44cdc96a63bc75ec97c6689d Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 25 Apr 2023 23:18:38 +0200 Subject: [PATCH 33/74] daemon: improve tx selection algorithm --- xelis_daemon/src/core/blockchain.rs | 60 ++++++++++++++++++----------- xelis_daemon/src/core/mempool.rs | 5 +++ xelis_daemon/src/p2p/mod.rs | 2 +- xelis_daemon/src/rpc/rpc.rs | 2 +- 4 files changed, 44 insertions(+), 25 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 97ff6d46..0cc7fbe9 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -12,7 +12,7 @@ use xelis_common::{ serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType}, DataHash}, network::Network }; use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; -use super::storage::{Storage, DifficultyProvider}; +use super::{storage::{Storage, DifficultyProvider}, mempool::SortedTx}; use std::{sync::atomic::{Ordering, AtomicU64}, collections::hash_map::Entry, time::{Duration, Instant}, borrow::Cow}; use std::collections::{HashMap, HashSet}; use async_recursion::async_recursion; @@ -834,31 +834,45 @@ impl Blockchain { let mut nonces: HashMap<&PublicKey, u64> = HashMap::new(); // txs are sorted in descending order thanks to Reverse - 'main: for (fee, hashes) in txs { - for hash in hashes { - let sorted_tx = mempool.get_sorted_tx(hash)?; - if block.size() + total_txs_size + sorted_tx.get_size() > MAX_BLOCK_SIZE { - break 'main; + { + 'main: for (fee, hashes) in txs { + let mut transactions: Vec<(&Arc, &SortedTx)> = Vec::with_capacity(hashes.len()); + // prepare TXs by sorting them by nonce + // only txs from same owner who have same fees or decreasing fees with increasing nonce will have + // all its txs in the same block + // maybe we can improve this to support all levels of fees + for hash in hashes { + let tx = mempool.get_sorted_tx(hash)?; + transactions.push((hash, tx)); } + transactions.sort_by(|(_, a), (_, b)| a.get_tx().get_nonce().cmp(&b.get_tx().get_nonce())); - let transaction = sorted_tx.get_tx(); - let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { - *nonce - } else { - let nonce = storage.get_nonce(transaction.get_owner()).await?; - nonces.insert(transaction.get_owner(), nonce); - nonce - }; + for (hash, sorted_tx) in transactions { + if block.size() + total_txs_size + sorted_tx.get_size() > MAX_BLOCK_SIZE { + break 'main; + } - if account_nonce < transaction.get_nonce() { - debug!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); - } else { - debug!("Selected {} for mining", hash); - // TODO no clone - block.txs_hashes.push(hash.as_ref().clone()); - total_txs_size += sorted_tx.get_size(); - // we use unwrap because above we insert it - *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + let transaction = sorted_tx.get_tx(); + let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { + *nonce + } else { + let nonce = storage.get_nonce(transaction.get_owner()).await?; + nonces.insert(transaction.get_owner(), nonce); + nonce + }; + + if account_nonce < transaction.get_nonce() { + debug!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); + } else if account_nonce == transaction.get_nonce() { + debug!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); + // TODO no clone + block.txs_hashes.push(hash.as_ref().clone()); + total_txs_size += sorted_tx.get_size(); + // we use unwrap because above we insert it + *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + } else { + warn!("This TX in mempool {} is in advance (nonce: {}, account nonce: {}, fees: {}), it should be removed from mempool", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); + } } } } diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index ad59f33c..e9c3d308 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -2,6 +2,7 @@ use super::error::BlockchainError; use std::cmp::Reverse; use std::collections::{HashMap, BTreeMap, HashSet}; use std::sync::Arc; +use log::{trace, debug}; use xelis_common::{ crypto::{ hash::Hash, @@ -125,6 +126,7 @@ impl Mempool { // delete all old txs not compatible anymore with current state of account pub async fn clean_up(&mut self, nonces: HashMap) { + debug!("Cleaning up mempool ({} accounts)...", nonces.len()); for (key, nonce) in nonces { let mut delete_cache = false; // check if we have a TX in cache for this owner @@ -163,12 +165,14 @@ impl Mempool { let fee_reverse = Reverse(sorted_tx.get_fee()); let mut is_empty = false; if let Some(hashes) = self.sorted_txs.get_mut(&fee_reverse) { + trace!("Removing tx hash {} for fee entry {}", hash, fee_reverse.0); hashes.remove(&hash); is_empty = hashes.is_empty(); } // don't keep empty data if is_empty { + trace!("Removing empty fee ({}) entry", fee_reverse.0); self.sorted_txs.remove(&fee_reverse); } } @@ -177,6 +181,7 @@ impl Mempool { } if delete_cache { + trace!("Removing empty nonce cache for owner {}", key); self.nonces.remove(&key); } } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index d8fe9bba..850a4553 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -930,7 +930,7 @@ impl P2pServer { peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(tx)))).await?; }, Err(e) => { - debug!("{} asked tx '{}' but got on error while retrieving it: {}", peer, hash, e); + debug!("{} asked tx '{}' but got an error while retrieving it: {}", peer, hash, e); peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; } } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index f70eb8cf..7a8b29bd 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -295,7 +295,7 @@ async fn count_transactions(blockchain: Arc>, body: Va async fn submit_transaction(blockchain: Arc>, body: Value) -> Result { let params: SubmitTransactionParams = parse_params(body)?; let transaction = Transaction::from_hex(params.data)?; - blockchain.add_tx_to_mempool(transaction, true).await.context("Error while adding tx to mempool")?; + blockchain.add_tx_to_mempool(transaction, true).await.map_err(|e| InternalRpcError::AnyError(e.into()))?; Ok(json!(true)) } From a41a7bc1cd48ca8959f378982f1604a28c29223f Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 25 Apr 2023 23:30:32 +0200 Subject: [PATCH 34/74] daemon: add add_tx command --- xelis_daemon/src/main.rs | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 87d28105..9e67a422 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -8,7 +8,7 @@ use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; use xelis_common::{ prompt::{Prompt, command::{CommandManager, CommandError, Command, CommandHandler}, PromptError, argument::{ArgumentManager, Arg, ArgType}, LogLevel}, - config::{VERSION, BLOCK_TIME}, globals::{format_hashrate, set_network_to}, async_handler, crypto::address::Address, network::Network + config::{VERSION, BLOCK_TIME}, globals::{format_hashrate, set_network_to}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; use crate::core::{ blockchain::{Config, Blockchain}, @@ -86,6 +86,7 @@ async fn run_prompt(prompt: &Arc, blockchain: Arc>> = match blockchain.get_p2p().lock().await.as_ref() { Some(p2p) => Some(p2p.clone()), @@ -254,7 +255,6 @@ async fn top_block(manager: &CommandManager>>, _: Ok(()) } - async fn pop_blocks(manager: &CommandManager>>, mut arguments: ArgumentManager) -> Result<(), CommandError> { let amount = arguments.get_value("amount")?.to_number()?; let blockchain = manager.get_data()?; @@ -276,5 +276,24 @@ async fn clear_mempool(manager: &CommandManager>>, mempool.clear(); info!("Mempool cleared"); + Ok(()) +} + +// add manually a TX in mempool +async fn add_tx(manager: &CommandManager>>, mut arguments: ArgumentManager) -> Result<(), CommandError> { + let hex = arguments.get_value("hex")?.to_string_value()?; + let broadcast = if arguments.has_argument("broadcast") { + arguments.get_value("broadcast")?.to_bool()? + } else { + true + }; + + let tx = Transaction::from_hex(hex).context("Error while decoding tx in hexadecimal format")?; + let hash = tx.hash(); + manager.message(format!("Adding TX {} to mempool...", hash)); + + let blockchain = manager.get_data()?; + blockchain.add_tx_with_hash_to_mempool(tx, hash, broadcast).await.context("Error while adding TX to mempool")?; + manager.message("TX has been added to mempool"); Ok(()) } \ No newline at end of file From 817d4b5dbaf5cb0b799bf1321371a15c3af178b5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 25 Apr 2023 23:56:11 +0200 Subject: [PATCH 35/74] daemon: fix deadlock, fix block size limit --- xelis_daemon/src/core/blockchain.rs | 29 +++++++++++++++-------------- xelis_daemon/src/core/error.rs | 2 +- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0cc7fbe9..5ca2e182 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -742,8 +742,8 @@ impl Blockchain { } pub async fn add_tx_with_hash_to_mempool(&self, tx: Transaction, hash: Hash, broadcast: bool) -> Result<(), BlockchainError> { - let mut mempool = self.mempool.write().await; let storage = self.storage.read().await; + let mut mempool = self.mempool.write().await; self.add_tx_for_mempool(&storage, &mut mempool, tx, hash, broadcast).await } @@ -784,7 +784,11 @@ impl Blockchain { if broadcast { if let Some(p2p) = self.p2p.lock().await.as_ref() { - p2p.broadcast_tx_hash(&hash).await; + let p2p = Arc::clone(p2p); + let hash = hash.clone(); + tokio::spawn(async move { + p2p.broadcast_tx_hash(&hash).await; + }); } } @@ -848,7 +852,7 @@ impl Blockchain { transactions.sort_by(|(_, a), (_, b)| a.get_tx().get_nonce().cmp(&b.get_tx().get_nonce())); for (hash, sorted_tx) in transactions { - if block.size() + total_txs_size + sorted_tx.get_size() > MAX_BLOCK_SIZE { + if block.size() + total_txs_size + sorted_tx.get_size() >= MAX_BLOCK_SIZE { break 'main; } @@ -862,9 +866,9 @@ impl Blockchain { }; if account_nonce < transaction.get_nonce() { - debug!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); + trace!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); } else if account_nonce == transaction.get_nonce() { - debug!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); + trace!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); // TODO no clone block.txs_hashes.push(hash.as_ref().clone()); total_txs_size += sorted_tx.get_size(); @@ -922,6 +926,12 @@ impl Blockchain { return Err(BlockchainError::ExpectedTips) } + // block contains header and full TXs + if block.size() > MAX_BLOCK_SIZE { + error!("Block size ({} bytes) is greater than the limit ({} bytes)", block.size(), MAX_BLOCK_SIZE); + return Err(BlockchainError::InvalidBlockSize(MAX_BLOCK_SIZE, block.size())); + } + { let mut cache = HashSet::with_capacity(tips_count); for tip in block.get_tips() { @@ -993,7 +1003,6 @@ impl Blockchain { let difficulty = self.verify_proof_of_work(storage, &pow_hash, block.get_tips()).await?; debug!("PoW is valid for difficulty {}", difficulty); - let mut total_tx_size: usize = 0; { // Transaction verification let hashes_len = block.get_txs_hashes().len(); let txs_len = block.get_transactions().len(); @@ -1052,7 +1061,6 @@ impl Blockchain { // DAG will choose which branch will execute the TX info!("TX {} was executed in another branch, skipping verification", tx_hash); // increase the total size - total_tx_size += tx.size(); // add tx hash in cache cache_tx.insert(tx_hash, true); // because TX was already validated & executed and is not in block tips @@ -1069,17 +1077,10 @@ impl Blockchain { self.verify_transaction_with_hash(storage, tx, &tx_hash, &mut balances, Some(&mut cache_account), false).await?; - // increase the total size - total_tx_size += tx.size(); // add tx hash in cache cache_tx.insert(tx_hash, true); } - if block.size() + total_tx_size > MAX_BLOCK_SIZE { - error!("Block size ({} bytes) is greater than the limit ({} bytes)", block.size() + total_tx_size, MAX_BLOCK_SIZE); - return Err(BlockchainError::InvalidBlockSize(MAX_BLOCK_SIZE, block.size() + total_tx_size)); - } - if cache_tx.len() != txs_len || cache_tx.len() != hashes_len { error!("Invalid count in TXs, received only {} unique txs", cache_tx.len()); return Err(BlockchainError::InvalidBlockTxs(block.get_txs_hashes().len(), cache_tx.len())) diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 38dc8c5c..11ece298 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -35,7 +35,7 @@ pub enum BlockchainError { InvalidHash(Hash, Hash), #[error("Invalid previous block hash, expected {}, got {}", _0, _1)] InvalidPreviousBlockHash(Hash, Hash), - #[error("Block size is more than limit: {}, got {}", _0, _1)] + #[error("Block size is more than limit, expected maximum: {}, got {}", _0, _1)] InvalidBlockSize(usize, usize), #[error("Block contains invalid txs count: expected {}, got {} txs.", _0, _1)] InvalidBlockTxs(usize, usize), From 09d8a7f8306ca9f1d20ca35839bf96151c57b144 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 26 Apr 2023 00:17:42 +0200 Subject: [PATCH 36/74] daemon: p2p object request retrieve tx from disk when not found in mempool --- xelis_daemon/src/p2p/mod.rs | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 850a4553..3cd26ff4 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -924,13 +924,25 @@ impl P2pServer { } }, ObjectRequest::Transaction(hash) => { - let mempool = self.blockchain.get_mempool().read().await; - match mempool.view_tx(hash) { - Ok(tx) => { + let on_disk = { + let mempool = self.blockchain.get_mempool().read().await; + if let Ok(tx) = mempool.view_tx(hash) { peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(tx)))).await?; - }, - Err(e) => { - debug!("{} asked tx '{}' but got an error while retrieving it: {}", peer, hash, e); + false + } else { + debug!("{} asked transaction '{}' but not present in our mempool", peer, hash); + true + } + }; + + if on_disk { + debug!("Looking on disk for transaction {}", hash); + let storage = self.blockchain.get_storage().read().await; + if storage.has_transaction(hash).await? { + let tx = storage.get_transaction(hash).await?; + peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(&tx)))).await?; + } else { + debug!("{} asked transaction '{}' but not present in our chain", peer, hash); peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; } } From a5ea1434a3466fdd30a8851c73f77639fd8f0026 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 26 Apr 2023 10:41:33 +0200 Subject: [PATCH 37/74] up to 255 outputs in transfers --- README.md | 2 +- xelis_daemon/src/core/blockchain.rs | 5 +++++ xelis_daemon/src/core/error.rs | 2 ++ xelis_wallet/src/transaction_builder.rs | 4 ++++ xelis_wallet/src/wallet.rs | 2 ++ 5 files changed, 14 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 108140c8..1c35ce6a 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ This feature allows to accept others branch tips even if transactions are the sa ## Transaction Transaction types supported: -- Transfer: possibility to send many assets to many addresses in the same TX +- Transfer: possibility to send many assets to many addresses in the same TX (up to 255 outputs inside) - Burn: publicly burn amount of a specific asset and use this TX as proof of burn (coins are completely deleted from circulation) - Call Contract: call a Smart Contract with specific parameters and list of assets to deposit (WIP) (NOTE: Multi Call Contract in the same TX ?) - Deploy Contract: deploy a new (valid) Smart Contract on chain (WIP) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 5ca2e182..90a96306 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1612,6 +1612,11 @@ impl Blockchain { return Err(BlockchainError::TxEmpty(hash.clone())) } + // invalid serde tx + if txs.len() > u8::MAX as usize { + return Err(BlockchainError::TooManyOutputInTx(hash.clone())) + } + let mut extra_data_size = 0; for output in txs { if output.to == *tx.get_owner() { // we can't transfer coins to ourself, why would you do that ? diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 11ece298..f870a37b 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -49,6 +49,8 @@ pub enum BlockchainError { TxAlreadyInMempool(Hash), #[error("Normal Tx {} is empty", _0)] TxEmpty(Hash), + #[error("Tx {} has too many output", _0)] + TooManyOutputInTx(Hash), #[error("Tx {} is already in block", _0)] TxAlreadyInBlock(Hash), #[error("Duplicate registration tx for address '{}' found in same block", _0)] diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 54a67b93..5b7d4566 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -94,6 +94,10 @@ impl TransactionBuilder { return Err(WalletError::ExpectedOneTx) } + if txs.len() > u8::MAX as usize { + return Err(WalletError::TooManyTx) + } + for tx in txs { if tx.to == self.owner { return Err(WalletError::TxOwnerIsReceiver) diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 66e4b149..0f7d534c 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -32,6 +32,8 @@ pub enum WalletError { InvalidKeyPair, #[error("Expected a TX")] ExpectedOneTx, + #[error("Too many txs included max is {}", u8::MAX)] + TooManyTx, #[error("Transaction owner is the receiver")] TxOwnerIsReceiver, #[error("Error from crypto: {}", _0)] From 8fbfe408bd313ae085ef2ab6555ece37f1da3caf Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 27 Apr 2023 10:41:31 +0200 Subject: [PATCH 38/74] daemon: BlockResponse `total_fees` can be null when txs are not included --- xelis_common/src/api/daemon.rs | 2 +- xelis_daemon/src/rpc/rpc.rs | 26 +++++++++++++++----------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index f5f9bc8b..33427e0b 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -22,7 +22,7 @@ pub struct BlockResponse<'a, T: Clone> { pub supply: Option, pub reward: Option, pub cumulative_difficulty: Difficulty, - pub total_fees: u64, + pub total_fees: Option, pub total_size_in_bytes: usize, #[serde(flatten)] pub data: DataHash<'a, T> diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 7a8b29bd..84becfc1 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -44,7 +44,7 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, let (topoheight, supply, reward) = if storage.is_block_topological_ordered(&hash).await { ( Some(storage.get_topo_height_for_hash(&hash).await.context("Error while retrieving topo height")?), - Some( storage.get_supply_for_block_hash(&hash).context("Error while retrieving supply")?), + Some(storage.get_supply_for_block_hash(&hash).context("Error while retrieving supply")?), Some(storage.get_block_reward(&hash).context("Error while retrieving block reward")?), ) } else { @@ -58,19 +58,23 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, let block_type = get_block_type_for_block(&blockchain, &storage, &hash).await?; let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&hash).await.context("Error while retrieving cumulative difficulty")?; let difficulty = storage.get_difficulty_for_block_hash(&hash).await.context("Error while retrieving difficulty")?; - let block = storage.get_block(&hash).await.context("Error while retrieving block")?; - let total_size_in_bytes = block.size(); - let mut total_fees = 0; - for tx in block.get_transactions() { - total_fees += tx.get_fee(); - } - let value: Value = if include_txs { + let block = storage.get_block(&hash).await.context("Error while retrieving full block")?; + + let total_size_in_bytes = block.size(); + let mut total_fees = 0; + for tx in block.get_transactions() { + total_fees += tx.get_fee(); + } + let data: DataHash<'_, Block> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Owned(block) }; - json!(BlockResponse { topoheight, block_type, cumulative_difficulty, difficulty, supply, reward, total_fees, total_size_in_bytes, data }) + json!(BlockResponse { topoheight, block_type, cumulative_difficulty, difficulty, supply, reward, total_fees: Some(total_fees), total_size_in_bytes, data }) } else { - let data: DataHash<'_, Arc> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Owned(block.to_header()) }; - json!(BlockResponse { topoheight, block_type, cumulative_difficulty, difficulty, supply, reward, total_fees, total_size_in_bytes, data }) + let block = storage.get_block_header_by_hash(&hash).await.context("Error while retrieving full block")?; + + let total_size_in_bytes = block.size(); + let data: DataHash<'_, Arc> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Borrowed(&block) }; + json!(BlockResponse { topoheight, block_type, cumulative_difficulty, difficulty, supply, reward, total_fees: None, total_size_in_bytes, data }) }; Ok(value) From 385ef28328c0b06d7529174fa7f4f44890354f98 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 27 Apr 2023 13:35:44 +0200 Subject: [PATCH 39/74] daemon: update API.md, check that the TX is executed in block for fees --- API.md | 23 ++++++++++++++++------- xelis_daemon/src/core/blockchain.rs | 6 +++--- xelis_daemon/src/core/storage/mod.rs | 5 +++-- xelis_daemon/src/core/storage/sled.rs | 15 ++++++++++++--- xelis_daemon/src/rpc/rpc.rs | 10 +++++++--- 5 files changed, 41 insertions(+), 18 deletions(-) diff --git a/API.md b/API.md index 177f014d..eb291b40 100644 --- a/API.md +++ b/API.md @@ -223,7 +223,7 @@ Retrieve a block at a specific topo height "000001aa69c15167a192de809eeed112f50ec91e513cfbf7b1674523583acbf9" ], "topoheight": 23, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 @@ -231,6 +231,8 @@ Retrieve a block at a specific topo height } ``` +NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). + #### Get Blocks At Height Retrieve all blocks at a specific height @@ -276,13 +278,16 @@ Retrieve all blocks at a specific height "000001aa69c15167a192de809eeed112f50ec91e513cfbf7b1674523583acbf9" ], "topoheight": 23, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 } ] } + +NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). + ``` #### Get Block By Hash @@ -329,7 +334,7 @@ Retrieve a block by its hash "000001aa69c15167a192de809eeed112f50ec91e513cfbf7b1674523583acbf9" ], "topoheight": 23, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 @@ -337,6 +342,8 @@ Retrieve a block by its hash } ``` +NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). + #### Get Top Block Retrieve the highest block based on the topological height @@ -378,7 +385,7 @@ Retrieve the highest block based on the topological height "000001aa69c15167a192de809eeed112f50ec91e513cfbf7b1674523583acbf9" ], "topoheight": 23, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 @@ -386,6 +393,8 @@ Retrieve the highest block based on the topological height } ``` +NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). + #### Get Nonce Retrieve the nonce for address in request params. @@ -916,7 +925,7 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight "timestamp": 1678215431432, "tips": [], "topoheight": 0, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 99, "txs_hashes": [], "version": 0 @@ -937,7 +946,7 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight "55a162b8e0e137bb6a8de9f4c4b214fb60bcd2df15ec32fdd8f06759b863f06e" ], "topoheight": 1, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 @@ -958,7 +967,7 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight "000011152d66bfe7a2b1d2e18a09a94c1d1593ae8ddeafcfc8f1b8c2b03b7995" ], "topoheight": 2, - "total_fees": 0, + "total_fees": null, "total_size_in_bytes": 131, "txs_hashes": [], "version": 0 diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 90a96306..be6d856a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1032,8 +1032,8 @@ impl Blockchain { } // check that the TX included is not executed in stable height or in block TIPS - if storage.has_tx_executed_in_block(hash)? { - let block_executed = storage.get_tx_executed_in_block(hash)?; + if storage.is_tx_executed_in_a_block(hash)? { + let block_executed = storage.get_block_executer_for_tx(hash)?; debug!("Tx {} was executed in {}", hash, block); let block_height = storage.get_height_for_block_hash(&block_executed).await?; // if the tx was executed below stable height, reject whole block! @@ -1222,7 +1222,7 @@ impl Blockchain { } // check that the tx was not yet executed in another tip branch - if storage.has_tx_executed_in_block(tx_hash)? { + if storage.is_tx_executed_in_a_block(tx_hash)? { trace!("Tx {} was already executed in a previous block, skipping...", tx_hash); } else { // tx was not executed, but lets check that it is not a potential double spending diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index f0f4b7d4..307278a3 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -26,10 +26,11 @@ pub trait DifficultyProvider { #[async_trait] pub trait Storage: DifficultyProvider + Sync + Send + 'static { // TODO delete these traits - fn get_tx_executed_in_block(&self, tx: &Hash) -> Result; + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError>; - fn has_tx_executed_in_block(&self, tx: &Hash) -> Result; + fn is_tx_executed_in_a_block(&self, tx: &Hash) -> Result; + fn is_tx_executed_in_block(&self, tx: &Hash, block: &Hash) -> Result; fn set_blocks_for_tx(&mut self, tx: &Hash, blocks: &HashSet) -> Result<(), BlockchainError>; fn get_network(&self) -> Result; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 2d87aef8..c96a7f1a 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -298,7 +298,7 @@ impl DifficultyProvider for SledStorage { #[async_trait] impl Storage for SledStorage { - fn get_tx_executed_in_block(&self, tx: &Hash) -> Result { + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result { self.load_from_disk(&self.txs_executed, tx.as_bytes()) } @@ -312,10 +312,19 @@ impl Storage for SledStorage { Ok(()) } - fn has_tx_executed_in_block(&self, tx: &Hash) -> Result { + fn is_tx_executed_in_a_block(&self, tx: &Hash) -> Result { Ok(self.txs_executed.contains_key(tx.as_bytes())?) } + fn is_tx_executed_in_block(&self, tx: &Hash, block: &Hash) -> Result { + if let Ok(hash) = self.get_block_executer_for_tx(tx) { + if hash == *block { + return Ok(true) + } + } + Ok(false) + } + fn get_network(&self) -> Result { trace!("get network"); self.load_from_disk(&self.extra, NETWORK) @@ -717,7 +726,7 @@ impl Storage for SledStorage { trace!("Tx was included in {}, blocks left: {}", blocks_len, blocks.into_iter().map(|b| b.to_string()).collect::>().join(", ")); } - if self.has_tx_executed_in_block(tx_hash)? { + if self.is_tx_executed_in_a_block(tx_hash)? { trace!("Tx {} was executed, deleting", tx_hash); self.remove_tx_executed(&tx_hash)?; } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 84becfc1..cf3462dc 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -63,8 +63,12 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, let total_size_in_bytes = block.size(); let mut total_fees = 0; - for tx in block.get_transactions() { - total_fees += tx.get_fee(); + for (tx, tx_hash) in block.get_transactions().iter().zip(block.get_txs_hashes()) { + // check that the TX was correctly executed in this block + // retrieve all fees for valid txs + if storage.is_tx_executed_in_block(tx_hash, &hash).context("Error while checking if tx was executed")? { + total_fees += tx.get_fee(); + } } let data: DataHash<'_, Block> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Owned(block) }; @@ -88,7 +92,7 @@ pub async fn get_transaction_response(storage: &S, tx: &Arc> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Borrowed(tx) }; - let executed_in_block = storage.get_tx_executed_in_block(hash).ok(); + let executed_in_block = storage.get_block_executer_for_tx(hash).ok(); Ok(json!(TransactionResponse { blocks, executed_in_block, data })) } From b807cce3e9619fd1c8dce21324cc0f9208d89707 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 28 Apr 2023 01:01:48 +0200 Subject: [PATCH 40/74] daemon: working on p2p inventory --- xelis_daemon/src/core/blockchain.rs | 4 ++ xelis_daemon/src/core/error.rs | 4 +- xelis_daemon/src/p2p/mod.rs | 63 +++++++++++++++++++++++- xelis_daemon/src/p2p/packet/inventory.rs | 43 ++++++++++++++++ xelis_daemon/src/p2p/packet/mod.rs | 9 +++- xelis_daemon/src/p2p/peer.rs | 24 ++++++++- 6 files changed, 141 insertions(+), 6 deletions(-) create mode 100644 xelis_daemon/src/p2p/packet/inventory.rs diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index be6d856a..8179d974 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -752,6 +752,10 @@ impl Blockchain { return Err(BlockchainError::TxAlreadyInMempool(hash)) } + if storage.has_transaction(&hash).await? { + return Err(BlockchainError::TxAlreadyInBlockchain(hash)) + } + { // get the highest nonce for this owner let owner = tx.get_owner(); diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index f870a37b..6cf60e77 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -154,7 +154,9 @@ pub enum BlockchainError { #[error("Error, block include a dead tx {}", _0)] DeadTx(Hash), #[error("A non-zero value is required for burn")] - NoValueForBurn + NoValueForBurn, + #[error("TX {} is already in blockchain", _0)] + TxAlreadyInBlockchain(Hash) } impl From> for BlockchainError { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 3cd26ff4..62cd2d94 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -959,7 +959,68 @@ impl P2pServer { if sender.send(response.to_owned()?).is_err() { error!("Error while sending object response to sender!"); } - } + }, + Packet::NotifyInventory(packet_wrapper) => { + trace!("Received a notify inventory from {}", peer); + let (inventory, ping) = packet_wrapper.consume(); + ping.into_owned().update_peer(peer).await; + + if !peer.has_requested_inventory() { + debug!("Received a notify inventory from {} but we didn't request it", peer); + return Err(P2pError::InvalidPacket) + } + + // we received the inventory + peer.set_requested_inventory(false); + peer.set_last_inventory(get_current_time()); + + // check and add if we are missing a TX in our mempool or storage + let mut missing_txs: Vec<&Cow> = Vec::new(); + { + let mempool = self.blockchain.get_mempool().read().await; + let storage = self.blockchain.get_storage().read().await; + for hash in inventory.get_txs().iter() { + if !mempool.contains_tx(hash) && !storage.has_transaction(&hash).await? { + missing_txs.push(hash); + } + } + } + + // retrieve all txs we don't have concurrently + for hash in missing_txs { + let peer = Arc::clone(&peer); + let blockchain = Arc::clone(&self.blockchain); + let hash = hash.as_ref().clone(); + tokio::spawn(async move { + let response = match peer.request_blocking_object(ObjectRequest::Transaction(hash)).await { + Ok(response) => response, + Err(e) => { + error!("Error while retrieving tx from {} inventory: {}", peer, e); + peer.increment_fail_count(); + return; + } + }; + + if let OwnedObjectResponse::Transaction(tx, hash) = response { + if let Err(e) = blockchain.add_tx_with_hash_to_mempool(tx, hash, false).await { + match e { + BlockchainError::TxAlreadyInMempool(hash) | BlockchainError::TxAlreadyInBlockchain(hash) => { + // ignore because maybe another peer send us this same tx + trace!("Received a tx we already have in mempool: {}", hash); + }, + _ => { + error!("Error while adding tx to mempool from {} inventory: {}", peer, e); + peer.increment_fail_count(); + } + } + } + } else { + error!("Error while retrieving tx from {} inventory, got an invalid type, we should ban this peer", peer); + peer.increment_fail_count(); + } + }); + } + }, }; Ok(()) } diff --git a/xelis_daemon/src/p2p/packet/inventory.rs b/xelis_daemon/src/p2p/packet/inventory.rs new file mode 100644 index 00000000..6230b29e --- /dev/null +++ b/xelis_daemon/src/p2p/packet/inventory.rs @@ -0,0 +1,43 @@ +use std::{borrow::Cow, collections::HashSet}; + +use xelis_common::{crypto::hash::Hash, serializer::{Serializer, ReaderError, Reader, Writer}}; + +pub const NOTIFY_MAX_LEN: usize = 512; // 512 * 32 bytes = 16KB + +#[derive(Debug, Clone)] +pub struct NotifyInventory<'a> { + txs: Cow<'a, HashSet>>, +} + +impl<'a> NotifyInventory<'a> { + pub fn new(txs: Cow<'a, HashSet>>) -> Self { + Self { txs } + } + + pub fn get_txs(&self) -> &Cow<'a, HashSet>> { + &self.txs + } +} + +impl<'a> Serializer for NotifyInventory<'a> { + fn read(reader: &mut Reader) -> Result { + let count = reader.read_u16()?; + if count > NOTIFY_MAX_LEN as u16 { + return Err(ReaderError::InvalidSize); + } + + let mut txs = HashSet::with_capacity(count as usize); + for _ in 0..count { + txs.insert(Cow::Owned(reader.read_hash()?)); + } + + Ok(Self::new(Cow::Owned(txs))) + } + + fn write(&self, writer: &mut Writer) { + writer.write_u16(self.txs.len() as u16); + for tx in self.txs.iter() { + writer.write_hash(tx); + } + } +} \ No newline at end of file diff --git a/xelis_daemon/src/p2p/packet/mod.rs b/xelis_daemon/src/p2p/packet/mod.rs index 1cc52af6..da64eb6a 100644 --- a/xelis_daemon/src/p2p/packet/mod.rs +++ b/xelis_daemon/src/p2p/packet/mod.rs @@ -2,7 +2,9 @@ pub mod handshake; pub mod chain; pub mod ping; pub mod object; +pub mod inventory; +use self::inventory::NotifyInventory; use self::object::{ObjectRequest, ObjectResponse}; use self::chain::{ChainRequest, ChainResponse}; use self::handshake::Handshake; @@ -24,6 +26,7 @@ const CHAIN_RESPONSE_ID: u8 = 4; const PING_ID: u8 = 5; const OBJECT_REQUEST_ID: u8 = 6; const OBJECT_RESPONSE_ID: u8 = 7; +const NOTIFY_INV_ID: u8 = 8; // PacketWrapper allows us to link any Packet to a Ping #[derive(Debug)] @@ -73,7 +76,8 @@ pub enum Packet<'a> { ChainResponse(ChainResponse), Ping(Cow<'a, Ping<'a>>), ObjectRequest(Cow<'a, ObjectRequest>), - ObjectResponse(ObjectResponse<'a>) + ObjectResponse(ObjectResponse<'a>), + NotifyInventory(PacketWrapper<'a, NotifyInventory<'a>>) } impl<'a> Serializer for Packet<'a> { @@ -105,7 +109,8 @@ impl<'a> Serializer for Packet<'a> { Packet::ChainResponse(response) => (CHAIN_RESPONSE_ID, response), Packet::Ping(ping) => (PING_ID, ping.as_ref()), Packet::ObjectRequest(request) => (OBJECT_REQUEST_ID, request.as_ref()), - Packet::ObjectResponse(response) => (OBJECT_RESPONSE_ID, response) + Packet::ObjectResponse(response) => (OBJECT_RESPONSE_ID, response), + Packet::NotifyInventory(inventory) => (NOTIFY_INV_ID, inventory) }; let packet = serializer.to_bytes(); diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index f49587ab..44a2adf1 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -47,7 +47,9 @@ pub struct Peer { last_ping: AtomicU64, // last time we got a ping packet from this peer cumulative_difficulty: AtomicU64, // cumulative difficulty of peer chain txs_cache: Mutex>, // All transactions propagated to/from this peer - blocks_propagation: Mutex> // last blocks propagated to/from this peer + blocks_propagation: Mutex>, // last blocks propagated to/from this peer + last_inventory: AtomicU64, // last time we got an inventory packet from this peer + requested_inventory: AtomicBool // if we requested this peer to send us an inventory notification } impl Peer { @@ -74,7 +76,9 @@ impl Peer { last_ping: AtomicU64::new(0), cumulative_difficulty: AtomicU64::new(cumulative_difficulty), txs_cache: Mutex::new(LruCache::new(128)), - blocks_propagation: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)) + blocks_propagation: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)), + last_inventory: AtomicU64::new(0), + requested_inventory: AtomicBool::new(false) } } @@ -257,6 +261,22 @@ impl Peer { self.last_ping.store(value, Ordering::Release) } + pub fn get_last_inventory(&self) -> u64 { + self.last_inventory.load(Ordering::Acquire) + } + + pub fn set_last_inventory(&self, value: u64) { + self.last_inventory.store(value, Ordering::Release) + } + + pub fn has_requested_inventory(&self) -> bool { + self.requested_inventory.load(Ordering::Acquire) + } + + pub fn set_requested_inventory(&self, value: bool) { + self.requested_inventory.store(value, Ordering::Release) + } + pub async fn close(&self) -> Result<(), P2pError> { trace!("Closing connection with {}", self); let mut peer_list = self.peer_list.write().await; From 5e9c1ba06fb2dec4e907956c562862d0cf243ea8 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 28 Apr 2023 01:23:16 +0200 Subject: [PATCH 41/74] daemon: no clone --- xelis_daemon/src/p2p/mod.rs | 13 +++++++------ xelis_daemon/src/p2p/packet/inventory.rs | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 62cd2d94..e6cd547e 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -963,6 +963,7 @@ impl P2pServer { Packet::NotifyInventory(packet_wrapper) => { trace!("Received a notify inventory from {}", peer); let (inventory, ping) = packet_wrapper.consume(); + let inventory = inventory.into_owned(); ping.into_owned().update_peer(peer).await; if !peer.has_requested_inventory() { @@ -975,22 +976,22 @@ impl P2pServer { peer.set_last_inventory(get_current_time()); // check and add if we are missing a TX in our mempool or storage - let mut missing_txs: Vec<&Cow> = Vec::new(); + let mut missing_txs: Vec = Vec::new(); { let mempool = self.blockchain.get_mempool().read().await; let storage = self.blockchain.get_storage().read().await; - for hash in inventory.get_txs().iter() { - if !mempool.contains_tx(hash) && !storage.has_transaction(&hash).await? { - missing_txs.push(hash); + for hash in inventory.get_txs().into_owned() { + if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? { + missing_txs.push(hash.into_owned()); } } } - // retrieve all txs we don't have concurrently + // second part is to retrieve all txs we don't have concurrently + // we don't want to block the peer and others locks for too long so we do it in a separate task for hash in missing_txs { let peer = Arc::clone(&peer); let blockchain = Arc::clone(&self.blockchain); - let hash = hash.as_ref().clone(); tokio::spawn(async move { let response = match peer.request_blocking_object(ObjectRequest::Transaction(hash)).await { Ok(response) => response, diff --git a/xelis_daemon/src/p2p/packet/inventory.rs b/xelis_daemon/src/p2p/packet/inventory.rs index 6230b29e..a95f7d50 100644 --- a/xelis_daemon/src/p2p/packet/inventory.rs +++ b/xelis_daemon/src/p2p/packet/inventory.rs @@ -14,8 +14,8 @@ impl<'a> NotifyInventory<'a> { Self { txs } } - pub fn get_txs(&self) -> &Cow<'a, HashSet>> { - &self.txs + pub fn get_txs(self) -> Cow<'a, HashSet>> { + self.txs } } From 5418e07d863776b840d0e1dc7e13de8851afac43 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 29 Apr 2023 12:05:15 +0200 Subject: [PATCH 42/74] workspace: enable `lto` for better performances --- .gitignore | 7 ++++++- Cargo.toml | 14 +++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index f4816289..a1833608 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,9 @@ dev/ .vscode/ wallets/ peerlist-*.json -build/ \ No newline at end of file +build/ + +# ignore all profiling files +flamegraph.svg +perf.data +perf.data.old \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 2e13c7c1..dbe18c92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,4 +5,16 @@ members = [ "xelis_wallet", "xelis_miner", "xelis_daemon" -] \ No newline at end of file +] + +# cargo run --release +[profile.release] +opt-level = 3 +debug-assertions = false +overflow-checks = false +lto = true + +# cargo run --profile release-no-lto +[profile.release-no-lto] +inherits = "release" +lto = false \ No newline at end of file From 5cd755b14e7b7065ffbb8febb29d9a7efd27ca31 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 29 Apr 2023 12:15:22 +0200 Subject: [PATCH 43/74] miner: delete unused code --- xelis_miner/src/main.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/xelis_miner/src/main.rs b/xelis_miner/src/main.rs index 807f43f4..74d9bd70 100644 --- a/xelis_miner/src/main.rs +++ b/xelis_miner/src/main.rs @@ -324,10 +324,6 @@ fn start_thread(id: u8, mut job_receiver: broadcast::Receiver Date: Sun, 30 Apr 2023 11:44:03 +0200 Subject: [PATCH 44/74] daemon: pre check in mempool --- xelis_daemon/src/core/mempool.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index e9c3d308..c71a77ed 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -126,6 +126,11 @@ impl Mempool { // delete all old txs not compatible anymore with current state of account pub async fn clean_up(&mut self, nonces: HashMap) { + if self.nonces.is_empty() || nonces.is_empty() { + debug!("No mempool cleanup needed"); + return; + } + debug!("Cleaning up mempool ({} accounts)...", nonces.len()); for (key, nonce) in nonces { let mut delete_cache = false; @@ -155,9 +160,8 @@ impl Mempool { !delete }); - if cache.txs.is_empty() { - delete_cache = true; - } + // delete the nonce cache if no txs are left + delete_cache = cache.txs.is_empty(); // now delete all necessary txs for hash in hashes { From 29075d294a7c811da609d6ff2ba993044de9ea2f Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 30 Apr 2023 12:06:30 +0200 Subject: [PATCH 45/74] daemon: fix rewind chain method --- xelis_daemon/src/core/blockchain.rs | 20 +++++++++----------- xelis_daemon/src/core/error.rs | 14 ++++++++------ xelis_daemon/src/core/storage/sled.rs | 10 +++++----- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 8179d974..3d45727b 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -761,10 +761,10 @@ impl Blockchain { let owner = tx.get_owner(); // get the highest nonce available // if presents, it means we have at least one tx from this owner in mempool - if let Some(nonce) = mempool.get_cached_nonce(owner) { + if let Some(nonce) = mempool.get_cached_nonce(owner) { // check that the nonce is in the range if !(tx.get_nonce() <= nonce.get_max() + 1 && tx.get_nonce() >= nonce.get_min()) { - return Err(BlockchainError::InvalidTxNonce) + return Err(BlockchainError::InvalidTxNonceMempoolCache) } // compute balances of previous pending TXs @@ -1506,11 +1506,13 @@ impl Blockchain { let mut keys = HashSet::new(); // merge miners keys for key in &miners { + debug!("Adding miner key {}", key); keys.insert(key); } // Add dev address in rewinding in case we receive dev fees if DEV_FEE_PERCENT != 0 { + debug!("Adding dev key {}", *DEV_PUBLIC_KEY); keys.insert(&DEV_PUBLIC_KEY); } @@ -1694,14 +1696,14 @@ impl Blockchain { if *nonce != tx.get_nonce() { debug!("Tx {} has nonce {} but expected {}", hash, tx.get_nonce(), nonce); - return Err(BlockchainError::InvalidTxNonce) + return Err(BlockchainError::InvalidTxNonce(tx.get_nonce(), *nonce, tx.get_owner().clone())) } // we increment it in case any new tx for same owner is following *nonce += 1; } else { let nonce = storage.get_nonce(tx.get_owner()).await?; if nonce != tx.get_nonce() { - return Err(BlockchainError::InvalidTxNonce) + return Err(BlockchainError::InvalidTxNonce(tx.get_nonce(), nonce, tx.get_owner().clone())) } } } @@ -1805,15 +1807,11 @@ impl Blockchain { TransactionType::Transfer(txs) => { for output in txs { keys.insert(&output.to); - if !assets.contains(&output.asset) { - assets.insert(&output.asset); - } + assets.insert(&output.asset); } }, TransactionType::Burn { asset, amount: _ } => { - if !assets.contains(asset) { - assets.insert(asset); - } + assets.insert(asset); }, _ => { return Err(BlockchainError::SmartContractTodo) @@ -1822,7 +1820,7 @@ impl Blockchain { // keep the lowest nonce available let nonce = nonces.entry(transaction.get_owner()).or_insert(transaction.get_nonce()); - if *nonce < transaction.get_nonce() { + if *nonce > transaction.get_nonce() { *nonce = transaction.get_nonce(); } Ok(()) diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 6cf60e77..326d8779 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -139,16 +139,18 @@ pub enum BlockchainError { BlockDeviation, #[error("Invalid genesis block hash")] InvalidGenesisHash, - #[error("Invalid tx nonce for account")] - InvalidTxNonce, + #[error("Invalid tx nonce (got {} expected {}) for {}", _0, _1, _2)] + InvalidTxNonce(u64, u64, PublicKey), + #[error("Invalid tx nonce for mempool cache")] + InvalidTxNonceMempoolCache, #[error("Invalid asset ID: {}", _0)] AssetNotFound(Hash), #[error(transparent)] DifficultyError(#[from] DifficultyError), - #[error("No balance found on disk")] - NoBalance, - #[error("No balance changes for specific topoheight and asset")] - NoBalanceChanges, + #[error("No balance found on disk for {}", _0)] + NoBalance(PublicKey), + #[error("No balance changes for {} at specific topoheight and asset", _0)] + NoBalanceChanges(PublicKey), #[error("Overflow detected")] Overflow, #[error("Error, block include a dead tx {}", _0)] diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index c96a7f1a..9bf4dca5 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -443,7 +443,7 @@ impl Storage for SledStorage { trace!("has balance {} for {} at exact topoheight {}", asset, key, topoheight); // check first that this address has balance, if no returns if !self.has_balance_for(key, asset).await? { - return Err(BlockchainError::NoBalanceChanges) + return Err(BlockchainError::NoBalanceChanges(key.clone())) } let tree = self.get_versioned_balance_tree(asset, topoheight).await?; @@ -456,11 +456,11 @@ impl Storage for SledStorage { trace!("get balance {} for {} at exact topoheight {}", asset, key, topoheight); // check first that this address has balance, if no returns if !self.has_balance_for(key, asset).await? { - return Err(BlockchainError::NoBalanceChanges) + return Err(BlockchainError::NoBalanceChanges(key.clone())) } let tree = self.get_versioned_balance_tree(asset, topoheight).await?; - self.get_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges) + self.get_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) } // delete the last topoheight registered for this key @@ -516,7 +516,7 @@ impl Storage for SledStorage { async fn delete_balance_at_topoheight(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("delete balance {} for {} at topoheight {}", asset, key, topoheight); let tree = self.get_versioned_balance_tree(asset, topoheight).await?; - self.delete_data_no_arc(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges) + self.delete_data_no_arc(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) } // returns a new versioned balance with already-set previous topoheight @@ -550,7 +550,7 @@ impl Storage for SledStorage { async fn get_last_balance(&self, key: &PublicKey, asset: &Hash) -> Result<(u64, VersionedBalance), BlockchainError> { trace!("get last balance {} for {}", asset, key); if !self.has_balance_for(key, asset).await? { - return Err(BlockchainError::NoBalance) + return Err(BlockchainError::NoBalance(key.clone())) } let tree = self.db.open_tree(asset.as_bytes())?; From 141ac0351a677622fef313a00979ad2ec15a0785 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 30 Apr 2023 12:39:01 +0200 Subject: [PATCH 46/74] remove fixed TODO --- xelis_common/src/api/daemon.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 33427e0b..25d45211 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -154,7 +154,6 @@ pub struct TransactionResponse<'a, T: Clone> { pub blocks: Option>, // in which blocks it was executed pub executed_in_block: Option, - // TODO executed_block which give the hash of the block in which this tx got executed #[serde(flatten)] pub data: DataHash<'a, T> } From 3f15add5df9729f0acbbfc8b091e75d6b98e9603 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 30 Apr 2023 22:48:40 +0200 Subject: [PATCH 47/74] daemon: `get_blocks_range_by_height` rpc method --- API.md | 102 ++++++++++++++++++++++++++++++++- xelis_common/src/api/daemon.rs | 8 ++- xelis_daemon/src/rpc/rpc.rs | 62 ++++++++++++++------ 3 files changed, 151 insertions(+), 21 deletions(-) diff --git a/API.md b/API.md index eb291b40..c9aad98b 100644 --- a/API.md +++ b/API.md @@ -881,10 +881,10 @@ Submit a block to the daemon } ``` -#### Get Blocks +#### Get Blocks Range By TopoHeight Retrieve a specific range of blocks (up to 20 maximum) based on topoheight -##### Method `get_blocks` +##### Method `get_blocks_range_by_topoheight` ##### Parameters | Name | Type | Required | Note | @@ -897,7 +897,7 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight { "jsonrpc": "2.0", "id": 1, - "method": "get_blocks", + "method": "get_blocks_range_by_topoheight", "params": { "start_topoheight": 0, "end_topoheight": 2 @@ -905,6 +905,102 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight } ``` +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + { + "block_type": "Sync", + "cumulative_difficulty": 1, + "difficulty": 1, + "extra_nonce": "0000000000000000000000000000000000000000000000000000000000000000", + "hash": "55a162b8e0e137bb6a8de9f4c4b214fb60bcd2df15ec32fdd8f06759b863f06e", + "height": 0, + "miner": "xel1qqqxcfxdc8ywarcz3wx2leahnfn2pyp0ymvfm42waluq408j2x5680gtl9ky3", + "nonce": 0, + "reward": 877380, + "supply": 877380, + "timestamp": 1678215431432, + "tips": [], + "topoheight": 0, + "total_fees": null, + "total_size_in_bytes": 99, + "txs_hashes": [], + "version": 0 + }, + { + "block_type": "Sync", + "cumulative_difficulty": 150001, + "difficulty": 150000, + "extra_nonce": "e9a96f6130943e4ce3cbd6d4999efa1ca28020be6119f3da77dbcc837731600e", + "hash": "000011152d66bfe7a2b1d2e18a09a94c1d1593ae8ddeafcfc8f1b8c2b03b7995", + "height": 1, + "miner": "xel1qqqd2jtz9f2u3z6uznpx8mqdkh6llt3yn3eg3a5tpsfn8jcsthufg5q08670u", + "nonce": 3837, + "reward": 877379, + "supply": 1754759, + "timestamp": 1678215668838, + "tips": [ + "55a162b8e0e137bb6a8de9f4c4b214fb60bcd2df15ec32fdd8f06759b863f06e" + ], + "topoheight": 1, + "total_fees": null, + "total_size_in_bytes": 131, + "txs_hashes": [], + "version": 0 + }, + { + "block_type": "Sync", + "cumulative_difficulty": 300001, + "difficulty": 150000, + "extra_nonce": "f7c22d4f517c384493fa271304b885d1f092ab969a87e901fe9245ad0ca4490d", + "hash": "0000631d920e582069e47149adc53dfe8bb009163c94715d33e81e71b7a8dca3", + "height": 2, + "miner": "xel1qqqd2jtz9f2u3z6uznpx8mqdkh6llt3yn3eg3a5tpsfn8jcsthufg5q08670u", + "nonce": 1113, + "reward": 877379, + "supply": 2632138, + "timestamp": 1678215668843, + "tips": [ + "000011152d66bfe7a2b1d2e18a09a94c1d1593ae8ddeafcfc8f1b8c2b03b7995" + ], + "topoheight": 2, + "total_fees": null, + "total_size_in_bytes": 131, + "txs_hashes": [], + "version": 0 + } + ] +} +``` + + +#### Get Blocks Range By Height +Retrieve a specific range of blocks (up to 20 maximum) based on height + +##### Method `get_blocks_range_by_height` + +##### Parameters +| Name | Type | Required | Note | +|:------------:|:-------:|:--------:|:----------------------------------------:| +| start_height | Integer | Optional | If not set, will retrieve last 20 blocks | +| end_height | Integer | Optional | Must be under current height | + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "get_blocks_range_by_height", + "params": { + "start_height": 0, + "end_height": 2 + } +} +``` + ##### Response ```json { diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 25d45211..1264a8a2 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -138,11 +138,17 @@ pub struct P2pStatusResult<'a> { } #[derive(Serialize, Deserialize)] -pub struct GetRangeParams { +pub struct GetTopoHeightRangeParams { pub start_topoheight: Option, pub end_topoheight: Option } +#[derive(Serialize, Deserialize)] +pub struct GetHeightRangeParams { + pub start_height: Option, + pub end_height: Option +} + #[derive(Serialize, Deserialize)] pub struct GetTransactionsParams { pub tx_hashes: Vec diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index cf3462dc..1025df37 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -17,7 +17,7 @@ use xelis_common::{ GetTransactionParams, P2pStatusResult, GetBlocksAtHeightParams, - GetRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse + GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams }, DataHash}, async_handler, serializer::Serializer, @@ -125,9 +125,9 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_mempool", async_handler!(get_mempool)); handler.register_method("get_tips", async_handler!(get_tips)); handler.register_method("get_dag_order", async_handler!(get_dag_order)); - handler.register_method("get_blocks", async_handler!(get_blocks)); + handler.register_method("get_blocks_range_by_topoheight", async_handler!(get_blocks_range_by_topoheight)); + handler.register_method("get_blocks_range_by_height", async_handler!(get_blocks_range_by_height)); handler.register_method("get_transactions", async_handler!(get_transactions)); - } async fn version(_: Arc>, body: Value) -> Result { @@ -380,7 +380,7 @@ const MAX_DAG_ORDER: u64 = 64; // get dag order based on params // if no params found, get order of last 64 blocks async fn get_dag_order(blockchain: Arc>, body: Value) -> Result { - let params: GetRangeParams = parse_params(body)?; + let params: GetTopoHeightRangeParams = parse_params(body)?; let current_topoheight = blockchain.get_topo_height(); let start_topoheight = params.start_topoheight.unwrap_or_else(|| { @@ -414,34 +414,41 @@ async fn get_dag_order(blockchain: Arc>, body: Value) } const MAX_BLOCKS: u64 = 20; -// get blocks between range of topoheight -// if no params found, get last 20 blocks header -async fn get_blocks(blockchain: Arc>, body: Value) -> Result { - let params: GetRangeParams = parse_params(body)?; - let current_topoheight = blockchain.get_topo_height(); - let start_topoheight = params.start_topoheight.unwrap_or_else(|| { - if params.end_topoheight.is_none() && current_topoheight > MAX_BLOCKS { - current_topoheight - MAX_BLOCKS +fn get_range(start: Option, end: Option, current: u64) -> Result<(u64, u64), InternalRpcError> { + let range_start = start.unwrap_or_else(|| { + if end.is_none() && current > MAX_BLOCKS { + current - MAX_BLOCKS } else { 0 } }); - let end_topoheight = params.end_topoheight.unwrap_or(current_topoheight); - if end_topoheight < start_topoheight || end_topoheight > current_topoheight { - debug!("get blocks range: start = {}, end = {}, max = {}", start_topoheight, end_topoheight, current_topoheight); + let range_end = end.unwrap_or(current); + if range_end < range_start || range_end > current { + debug!("get blocks range by topo height: start = {}, end = {}, max = {}", range_start, range_end, current); return Err(InternalRpcError::InvalidRequest) } - let count = end_topoheight - start_topoheight; + let count = range_end - range_start; if count > MAX_BLOCKS { // only retrieve max 20 blocks hash per request debug!("get blocks requested count: {}", count); return Err(InternalRpcError::InvalidRequest) } + Ok((range_start, range_end)) +} + +// get blocks between range of topoheight +// if no params found, get last 20 blocks header +async fn get_blocks_range_by_topoheight(blockchain: Arc>, body: Value) -> Result { + let params: GetTopoHeightRangeParams = parse_params(body)?; + + let current_topoheight = blockchain.get_topo_height(); + let (start_topoheight, end_topoheight) = get_range(params.start_topoheight, params.end_topoheight, current_topoheight)?; + let storage = blockchain.get_storage().read().await; - let mut blocks = Vec::with_capacity(count as usize); + let mut blocks = Vec::with_capacity((end_topoheight - start_topoheight) as usize); for i in start_topoheight..=end_topoheight { let hash = storage.get_hash_at_topo_height(i).await.context("Error while retrieving hash at topo height")?; let response = get_block_response_for_hash(&blockchain, &storage, hash, false).await?; @@ -451,6 +458,27 @@ async fn get_blocks(blockchain: Arc>, body: Value) -> Ok(json!(blocks)) } +// get blocks between range of height +// if no params found, get last 20 blocks header +// you can only request +async fn get_blocks_range_by_height(blockchain: Arc>, body: Value) -> Result { + let params: GetHeightRangeParams = parse_params(body)?; + let current_height = blockchain.get_height(); + let (start_height, end_height) = get_range(params.start_height, params.end_height, current_height)?; + + let storage = blockchain.get_storage().read().await; + let mut blocks = Vec::with_capacity((end_height - start_height) as usize); + for i in start_height..=end_height { + let blocks_at_height = storage.get_blocks_at_height(i).await.context("Error while retrieving blocks at height")?; + for hash in blocks_at_height { + let response = get_block_response_for_hash(&blockchain, &storage, hash, false).await?; + blocks.push(response); + } + } + + Ok(json!(blocks)) +} + const MAX_TXS: usize = 20; // get up to 20 transactions at once // if a tx hash is not present, we keep the order and put json "null" value From 40a4baca718a2f021e56b59b2ab48f1166e60e08 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 10 May 2023 00:25:32 +0200 Subject: [PATCH 48/74] common: fix unsubscribe --- xelis_common/src/rpc_server/websocket/handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_common/src/rpc_server/websocket/handler.rs b/xelis_common/src/rpc_server/websocket/handler.rs index c11354f6..7a8cc94b 100644 --- a/xelis_common/src/rpc_server/websocket/handler.rs +++ b/xelis_common/src/rpc_server/websocket/handler.rs @@ -55,7 +55,7 @@ where async fn unsubscribe_session_from_event(&self, session: &WebSocketSessionShared, event: E, id: Option) -> Result<(), RpcResponseError> { let mut sessions = self.sessions.lock().await; let events = sessions.entry(session.clone()).or_insert_with(HashMap::new); - if events.contains_key(&event) { + if !events.contains_key(&event) { return Err(RpcResponseError::new(id, InternalRpcError::EventNotSubscribed)); } From f9a58afd7901b47bdb9cc59995f6c9776036e72c Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 10 May 2023 00:52:53 +0200 Subject: [PATCH 49/74] common: add temporary debug messages --- xelis_common/src/rpc_server/websocket/mod.rs | 22 +++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/xelis_common/src/rpc_server/websocket/mod.rs b/xelis_common/src/rpc_server/websocket/mod.rs index 35e1614c..662a21a3 100644 --- a/xelis_common/src/rpc_server/websocket/mod.rs +++ b/xelis_common/src/rpc_server/websocket/mod.rs @@ -116,16 +116,21 @@ impl WebSocketServer where H: WebSocketHandler + 'static { } pub async fn handle_connection(self: &Arc, request: &HttpRequest, body: Payload) -> Result { + debug!("Handling new WebSocket connection"); let (response, session, stream) = actix_ws::handle(request, body)?; + let id = self.next_id(); + debug!("Created new WebSocketSession with id {}", id); let session = Arc::new(WebSocketSession { - id: self.next_id(), + id, server: Arc::clone(&self), inner: Mutex::new(Some(session)), }); { + debug!("Inserting session #{} into sessions", id); let mut sessions = self.sessions.lock().await; - sessions.insert(Arc::clone(&session)); + let res = sessions.insert(Arc::clone(&session)); + debug!("Session #{} has been inserted into sessions: {}", id, res); } actix_rt::spawn(Arc::clone(self).handle_ws_internal(session.clone(), stream)); @@ -175,24 +180,31 @@ impl WebSocketServer where H: WebSocketHandler + 'static { break Some(CloseReason::from(CloseCode::Error)); } }, - None => break None, + None => { + debug!("Stream closed for session #{}", session.id); + break None + }, }; // handle message match msg { Message::Text(text) => { + debug!("Received text message for session #{}: {}", session.id, text); if let Err(e) = self.handler.on_message(&session, text.as_bytes()).await { debug!("Error while calling on_message: {}", e); break Some(CloseReason::from(CloseCode::Error)); } }, - Message::Close(reason) => break reason, + Message::Close(reason) => { + debug!("Received close message for session #{}: {:?}", session.id, reason); + break reason; + }, msg => { debug!("Received websocket message not supported: {:?}", msg); } } }; - + // attempt to close connection gracefully self.delete_session(&session, reason).await; } From 572aefa7600ffe0557c80a1e6c94a033a9f56c40 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 10 May 2023 17:17:32 +0200 Subject: [PATCH 50/74] common: add heartbeat system to websocket sessions --- xelis_common/src/rpc_server/websocket/mod.rs | 113 +++++++++++++------ 1 file changed, 81 insertions(+), 32 deletions(-) diff --git a/xelis_common/src/rpc_server/websocket/mod.rs b/xelis_common/src/rpc_server/websocket/mod.rs index 662a21a3..8cbfe4a3 100644 --- a/xelis_common/src/rpc_server/websocket/mod.rs +++ b/xelis_common/src/rpc_server/websocket/mod.rs @@ -1,18 +1,21 @@ mod handler; -use std::{sync::{Arc, atomic::{AtomicU64, Ordering}}, collections::HashSet, hash::{Hash, Hasher}}; +use std::{sync::{Arc, atomic::{AtomicU64, Ordering}}, collections::HashSet, hash::{Hash, Hasher}, time::{Duration, Instant}}; use actix_web::{HttpRequest, web::Payload, HttpResponse}; use actix_ws::{Session, MessageStream, Message, CloseReason, CloseCode}; use async_trait::async_trait; use futures_util::StreamExt; -use log::debug; -use tokio::sync::Mutex; +use log::{debug, trace}; +use tokio::{sync::Mutex, select}; pub use self::handler::EventWebSocketHandler; pub type WebSocketServerShared = Arc>; pub type WebSocketSessionShared = Arc>; +const KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); +const KEEP_ALIVE_TIME_OUT: Duration = Duration::from_secs(30); + #[derive(Debug, thiserror::Error)] pub enum WebSocketError { #[error(transparent)] @@ -39,6 +42,20 @@ where res } + pub async fn ping(&self) -> Result<(), WebSocketError> { + let mut inner = self.inner.lock().await; + let session = inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?; + session.ping(b"").await?; + Ok(()) + } + + pub async fn pong(&self) -> Result<(), WebSocketError> { + let mut inner = self.inner.lock().await; + let session = inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?; + session.pong(b"").await?; + Ok(()) + } + async fn send_text_internal>(&self, value: S) -> Result<(), WebSocketError> { let mut inner = self.inner.lock().await; inner.as_mut().ok_or(WebSocketError::SessionAlreadyClosed)?.text(value.into()).await?; @@ -169,40 +186,72 @@ impl WebSocketServer where H: WebSocketHandler + 'static { return; } - // TODO implement heartbeat + let mut interval = actix_rt::time::interval(KEEP_ALIVE_INTERVAL); + let mut last_pong_received = Instant::now(); let reason = loop { - // wait for next message - let msg = match stream.next().await { - Some(msg) => match msg { - Ok(msg) => msg, - Err(e) => { - debug!("Error while receiving message: {}", e); - break Some(CloseReason::from(CloseCode::Error)); + select! { + // heartbeat + _ = interval.tick() => { + trace!("Sending ping to session #{}", session.id); + if let Err(e) = session.ping().await { + debug!("Error while sending ping to session #{}: {}", session.id, e); + break None; } - }, - None => { - debug!("Stream closed for session #{}", session.id); - break None - }, - }; - - // handle message - match msg { - Message::Text(text) => { - debug!("Received text message for session #{}: {}", session.id, text); - if let Err(e) = self.handler.on_message(&session, text.as_bytes()).await { - debug!("Error while calling on_message: {}", e); - break Some(CloseReason::from(CloseCode::Error)); + + if last_pong_received.elapsed() > KEEP_ALIVE_TIME_OUT { + debug!("session #{} didn't respond in time from our ping", session.id); + break None; } }, - Message::Close(reason) => { - debug!("Received close message for session #{}: {:?}", session.id, reason); - break reason; - }, - msg => { - debug!("Received websocket message not supported: {:?}", msg); + // wait for next message + res = stream.next() => { + let msg = match res { + Some(msg) => match msg { + Ok(msg) => msg, + Err(e) => { + debug!("Error while receiving message: {}", e); + break Some(CloseReason::from(CloseCode::Error)); + } + }, + None => { + debug!("Stream closed for session #{}", session.id); + break None + }, + }; + + // handle message + match msg { + Message::Text(text) => { + debug!("Received text message for session #{}: {}", session.id, text); + if let Err(e) = self.handler.on_message(&session, text.as_bytes()).await { + debug!("Error while calling on_message: {}", e); + break Some(CloseReason::from(CloseCode::Error)); + } + }, + Message::Close(reason) => { + debug!("Received close message for session #{}: {:?}", session.id, reason); + break reason; + }, + Message::Ping(data) => { + debug!("Received ping message with size {} bytes from session #{}", data.len(), session.id); + if let Err(e) = session.pong().await { + debug!("Error received while sending pong response to session #{}: {}", session.id, e); + break None; + } + }, + Message::Pong(data) => { + if !data.is_empty() { + debug!("Data in pong message is not empty for session #{}", session.id); + break None; + } + last_pong_received = Instant::now(); + }, + msg => { + debug!("Received websocket message not supported: {:?}", msg); + } + } } - } + }; }; // attempt to close connection gracefully From 64396a2c39703fd887c89d1f6385d30c237ddcf7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 00:45:00 +0200 Subject: [PATCH 51/74] daemon: add `pruned_height` logic --- API.md | 1 + xelis_common/src/api/daemon.rs | 1 + xelis_common/src/serializer/mod.rs | 30 ++++++++++++++++- xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/core/storage/mod.rs | 5 ++- xelis_daemon/src/core/storage/sled.rs | 23 +++++++++++-- xelis_daemon/src/p2p/mod.rs | 40 ++++++++++++++-------- xelis_daemon/src/p2p/packet/handshake.rs | 14 +++++--- xelis_daemon/src/p2p/packet/ping.rs | 42 ++++++++++++++++++------ xelis_daemon/src/p2p/peer.rs | 40 +++++++++++++++++++--- xelis_daemon/src/rpc/rpc.rs | 6 ++-- 11 files changed, 164 insertions(+), 40 deletions(-) diff --git a/API.md b/API.md index c9aad98b..7e37be81 100644 --- a/API.md +++ b/API.md @@ -56,6 +56,7 @@ No parameters "block_time_target": 15000, "difficulty": 310532, "height": 9, + "pruned_height": null, "mempool_size": 0, "native_supply": 8773780, "stableheight": 1, diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 1264a8a2..35a2dd50 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -107,6 +107,7 @@ pub struct GetInfoResult { pub height: u64, pub topoheight: u64, pub stableheight: u64, + pub pruned_height: Option, pub top_hash: Hash, pub native_supply: u64, pub difficulty: Difficulty, diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index dedcedb1..245d4825 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -71,6 +71,17 @@ impl Writer { }; } + pub fn write_optional_u64(&mut self, opt: &Option) { + match opt { + Some(v) => { + self.write_u64(v); + }, + None => { + self.bytes.push(0); + } + }; + } + pub fn total_write(&self) -> usize { self.bytes.len() } @@ -104,7 +115,12 @@ impl<'a> Reader<'a> { } pub fn read_bool(&mut self) -> Result { - Ok(self.read_u8()? == 1) + let byte = self.read_u8()?; + match byte { + 0 => Ok(false), + 1 => Ok(true), + _ => Err(ReaderError::InvalidValue) + } } pub fn read_bytes(&mut self, n: usize) -> Result @@ -191,6 +207,18 @@ impl<'a> Reader<'a> { } } + pub fn read_optional_u64(&mut self) -> Result, ReaderError> { + let byte = self.read_u8()?; + if byte == 0 { + return Ok(None) + } + + let mut array = [0; 8]; + array[0] = byte; + array[1..].copy_from_slice(&self.read_bytes::<[u8; 7]>(7)?); + Ok(Some(u64::from_be_bytes(array))) + } + pub fn read_big_uint(&mut self) -> Result { let size = self.read_u8()?; let bytes = self.read_bytes_ref(size as usize)?; diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3d45727b..782ea12f 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1358,7 +1358,7 @@ impl Blockchain { if broadcast { if let Some(p2p) = self.p2p.lock().await.as_ref() { debug!("broadcast block to peers"); - p2p.broadcast_block(&block, cumulative_difficulty, current_topoheight, current_height, &block_hash).await; + p2p.broadcast_block(&block, cumulative_difficulty, current_topoheight, current_height, storage.get_pruned_height()?, &block_hash).await; } } diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 307278a3..c0c9967a 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -25,7 +25,10 @@ pub trait DifficultyProvider { } #[async_trait] -pub trait Storage: DifficultyProvider + Sync + Send + 'static { // TODO delete these traits +pub trait Storage: DifficultyProvider + Sync + Send + 'static { + fn get_pruned_height(&self) -> Result, BlockchainError>; + fn set_pruned_height(&mut self, pruned_height: u64) -> Result<(), BlockchainError>; + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 9bf4dca5..e2f48bbf 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -23,7 +23,7 @@ const TIPS: &[u8; 4] = b"TIPS"; const TOP_TOPO_HEIGHT: &[u8; 4] = b"TOPO"; const TOP_HEIGHT: &[u8; 4] = b"TOPH"; const NETWORK: &[u8] = b"NET"; - +const PRUNED_HEIGHT: &[u8; 4] = b"PRUN"; pub struct SledStorage { transactions: Tree, // all txs stored on disk @@ -51,7 +51,8 @@ pub struct SledStorage { assets_cache: Option>>, nonces_cache: Option>>, balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute - tips_cache: Tips + tips_cache: Tips, + pruned_height: Option } macro_rules! init_cache { @@ -92,7 +93,8 @@ impl SledStorage { assets_cache: init_cache!(cache_size), nonces_cache: init_cache!(cache_size), balances_trees_cache: init_cache!(cache_size), - tips_cache: HashSet::new() + tips_cache: HashSet::new(), + pruned_height: None }; if storage.has_network()? { @@ -109,6 +111,11 @@ impl SledStorage { storage.tips_cache = tips; } + if let Ok(pruned_height) = storage.load_from_disk::(&storage.extra, PRUNED_HEIGHT) { + debug!("Found pruned height: {}", pruned_height); + storage.pruned_height = Some(pruned_height); + } + Ok(storage) } @@ -298,6 +305,16 @@ impl DifficultyProvider for SledStorage { #[async_trait] impl Storage for SledStorage { + fn get_pruned_height(&self) -> Result, BlockchainError> { + Ok(self.pruned_height) + } + + fn set_pruned_height(&mut self, pruned_height: u64) -> Result<(), BlockchainError> { + self.pruned_height = Some(pruned_height); + self.extra.insert(PRUNED_HEIGHT, &pruned_height.to_be_bytes())?; + Ok(()) + } + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result { self.load_from_disk(&self.txs_executed, tx.as_bytes()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index e6cd547e..cc730e93 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -342,8 +342,9 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; let (block, top_hash) = storage.get_top_block_header().await?; let topoheight = self.blockchain.get_topo_height(); + let pruned_height = storage.get_pruned_height()?; let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&top_hash).await.unwrap_or(0); - Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty, peers)) + Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), pruned_height, top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty, peers)) } // this function handle all new connections @@ -427,20 +428,28 @@ impl P2pServer { // build a ping packet with the current state of the blockchain // if a peer is given, we will check and update the peers list async fn build_generic_ping_packet(&self) -> Ping<'_> { - let (cumulative_difficulty, block_top_hash) = { + let (cumulative_difficulty, block_top_hash, pruned_height) = { let storage = self.blockchain.get_storage().read().await; + let pruned_height = match storage.get_pruned_height() { + Ok(pruned_height) => pruned_height, + Err(e) => { + error!("Couldn't get the pruned height from storage for generic ping packet: {}", e); + None + } + }; + match storage.get_top_block_hash().await { Err(e) => { error!("Couldn't get the top block hash from storage for generic ping packet: {}", e); - (0, Hash::zero()) + (0, Hash::zero(), pruned_height) }, - Ok(hash) => (storage.get_cumulative_difficulty_for_block_hash(&hash).await.unwrap_or(0), hash) + Ok(hash) => (storage.get_cumulative_difficulty_for_block_hash(&hash).await.unwrap_or(0), hash, pruned_height) } }; let highest_topo_height = self.blockchain.get_topo_height(); let highest_height = self.blockchain.get_height(); let new_peers = Vec::new(); - Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, cumulative_difficulty, new_peers) + Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, pruned_height, cumulative_difficulty, new_peers) } // select a random peer which is greater than us to sync chain @@ -450,7 +459,12 @@ impl P2pServer { let peer_list = self.peer_list.read().await; let our_height = self.blockchain.get_height(); let our_topoheight = self.blockchain.get_topo_height(); - let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| p.get_height() > our_height || p.get_topoheight() > our_topoheight).collect(); + // search for peers which are greater than us + // and that are pruned but before our height so we can sync correctly + let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| + p.get_pruned_height().unwrap_or(0) < our_height + && (p.get_height() > our_height || p.get_topoheight() > our_topoheight) + ).collect(); let count = peers.len(); trace!("peers available for random selection: {}", count); if count == 0 { @@ -665,7 +679,7 @@ impl P2pServer { } txs_cache.put(hash.clone(), ()); - ping.into_owned().update_peer(peer).await; + ping.into_owned().update_peer(peer).await?; let mempool = self.blockchain.get_mempool().read().await; if !mempool.contains_tx(&hash) { let zelf = Arc::clone(self); @@ -700,7 +714,7 @@ impl P2pServer { Packet::BlockPropagation(packet_wrapper) => { trace!("Received a block propagation packet from {}", peer); let (header, ping) = packet_wrapper.consume(); - ping.into_owned().update_peer(peer).await; + ping.into_owned().update_peer(peer).await?; let block_height = header.get_height(); // check that the block height is valid @@ -788,7 +802,7 @@ impl P2pServer { Packet::ChainRequest(packet_wrapper) => { trace!("Received a chain request from {}", peer); let (request, ping) = packet_wrapper.consume(); - ping.into_owned().update_peer(peer).await; + ping.into_owned().update_peer(peer).await?; let request = request.into_owned(); let last_request = peer.get_last_chain_sync(); let time = get_current_time(); @@ -897,7 +911,7 @@ impl P2pServer { self.try_to_connect_to_peer(peer, false); } } - ping.into_owned().update_peer(peer).await; + ping.into_owned().update_peer(peer).await?; }, Packet::ObjectRequest(request) => { trace!("Received a object request from {}", peer); @@ -964,7 +978,7 @@ impl P2pServer { trace!("Received a notify inventory from {}", peer); let (inventory, ping) = packet_wrapper.consume(); let inventory = inventory.into_owned(); - ping.into_owned().update_peer(peer).await; + ping.into_owned().update_peer(peer).await?; if !peer.has_requested_inventory() { debug!("Received a notify inventory from {} but we didn't request it", peer); @@ -1306,11 +1320,11 @@ impl P2pServer { } // broadcast block to all peers that can accept directly this new block - pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: u64, highest_topoheight: u64, highest_height: u64, hash: &Hash) { + pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: u64, highest_topoheight: u64, highest_height: u64, pruned_height: Option, hash: &Hash) { trace!("Broadcast block: {}", hash); // we build the ping packet ourself this time (we have enough data for it) // because this function can be call from Blockchain, which would lead to a deadlock - let ping = Ping::new(Cow::Borrowed(hash), highest_topoheight, highest_height, cumulative_difficulty, Vec::new()); + let ping = Ping::new(Cow::Borrowed(hash), highest_topoheight, highest_height, pruned_height, cumulative_difficulty, Vec::new()); let block_packet = Packet::BlockPropagation(PacketWrapper::new(Cow::Borrowed(block), Cow::Borrowed(&ping))); let bytes = Bytes::from(block_packet.to_bytes()); diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index 4d498c9e..26178465 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -1,7 +1,7 @@ use xelis_common::{ serializer::{Serializer, Writer, ReaderError, Reader}, globals::{ip_from_bytes, ip_to_bytes}, - crypto::hash::Hash, network::Network + crypto::hash::Hash, network::Network, block::Difficulty }; use crate::p2p::peer_list::SharedPeerList; @@ -26,16 +26,17 @@ pub struct Handshake { utc_time: u64, // current time in seconds topoheight: u64, // current topo height height: u64, // current block height + pruned_height: Option, // until when the node is pruned (if it is) top_hash: Hash, // current block top hash genesis_hash: Hash, // genesis hash - cumulative_difficulty: u64, + cumulative_difficulty: Difficulty, peers: Vec // all peers that we are already connected to } // Server reply with his own list of peers, but we remove all already known by requester for the response. impl Handshake { pub const MAX_LEN: usize = 16; - pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { + pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, pruned_height: Option, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { debug_assert!(version.len() > 0 && version.len() <= Handshake::MAX_LEN); // version cannot be greater than 16 chars if let Some(node_tag) = &node_tag { debug_assert!(node_tag.len() > 0 && node_tag.len() <= Handshake::MAX_LEN); // node tag cannot be greater than 16 chars @@ -53,6 +54,7 @@ impl Handshake { utc_time, topoheight, height, + pruned_height, top_hash, genesis_hash, cumulative_difficulty, @@ -65,7 +67,7 @@ impl Handshake { for peer in &self.peers { peers.insert(peer.clone()); } - (Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, out, priority, self.cumulative_difficulty, peer_list, peers), self.peers) + (Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, self.pruned_height, out, priority, self.cumulative_difficulty, peer_list, peers), self.peers) } pub fn get_version(&self) -> &String { @@ -127,6 +129,7 @@ impl Serializer for Handshake { writer.write_u64(&self.utc_time); // UTC Time writer.write_u64(&self.topoheight); // Topo height writer.write_u64(&self.height); // Block Height + writer.write_optional_u64(&self.pruned_height); // Pruned Height writer.write_hash(&self.top_hash); // Block Top Hash (32 bytes) writer.write_hash(&self.genesis_hash); // Genesis Hash writer.write_u64(&self.cumulative_difficulty); @@ -165,6 +168,7 @@ impl Serializer for Handshake { let utc_time = reader.read_u64()?; let topoheight = reader.read_u64()?; let height = reader.read_u64()?; + let pruned_height = reader.read_optional_u64()?; let top_hash = reader.read_hash()?; let genesis_hash = reader.read_hash()?; let cumulative_difficulty = reader.read_u64()?; @@ -178,7 +182,7 @@ impl Serializer for Handshake { let peer = ip_from_bytes(reader)?; peers.push(peer); } - Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, top_hash, genesis_hash, cumulative_difficulty, peers)) + Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_height, top_hash, genesis_hash, cumulative_difficulty, peers)) } } diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 49a2a2d9..8e1d3695 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -10,50 +10,70 @@ use xelis_common::{ globals::{ ip_to_bytes, ip_from_bytes - } + }, block::Difficulty }; -use crate::p2p::peer::Peer; +use crate::p2p::{peer::Peer, error::P2pError}; use std::{ fmt::Display, borrow::Cow, net::SocketAddr, sync::Arc }; -use log::trace; +use log::{error, trace}; + #[derive(Clone, Debug)] pub struct Ping<'a> { top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, - cumulative_difficulty: u64, + pruned_height: Option, + cumulative_difficulty: Difficulty, peer_list: Vec } impl<'a> Ping<'a> { - pub fn new(top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, cumulative_difficulty: u64, peer_list: Vec) -> Self { + pub fn new(top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, pruned_height: Option, cumulative_difficulty: u64, peer_list: Vec) -> Self { Self { top_hash, topoheight, height, + pruned_height, cumulative_difficulty, peer_list } } - pub async fn update_peer(self, peer: &Arc) { + pub async fn update_peer(self, peer: &Arc) -> Result<(), P2pError> { trace!("Updating {} with {}", peer, self); peer.set_block_top_hash(self.top_hash.into_owned()).await; peer.set_topoheight(self.topoheight); peer.set_height(self.height); + + if peer.is_pruned() != self.pruned_height.is_some() { + error!("Invalid protocol rules: impossible to change the pruned state, from {} in ping packet", peer); + return Err(P2pError::InvalidProtocolRules) + } + + if let Some(pruned_height) = self.pruned_height { + if pruned_height > self.height { + error!("Invalid protocol rules: pruned height {} is greater than height {} in ping packet", pruned_height, self.height); + return Err(P2pError::InvalidProtocolRules) + } + } + + peer.set_pruned_height(self.pruned_height); peer.set_cumulative_difficulty(self.cumulative_difficulty); let mut peers = peer.get_peers().lock().await; - for peer in self.peer_list { - if !peers.contains(&peer) { - peers.insert(peer); + for addr in self.peer_list { + if peers.contains(&addr) { + error!("Invalid protocol rules: received duplicated peer {} from {} in ping packet", peer, addr); + return Err(P2pError::InvalidProtocolRules) } + peers.insert(addr); } + Ok(()) } pub fn get_height(&self) -> u64 { @@ -74,6 +94,7 @@ impl Serializer for Ping<'_> { writer.write_hash(&self.top_hash); writer.write_u64(&self.topoheight); writer.write_u64(&self.height); + writer.write_optional_u64(&self.pruned_height); writer.write_u64(&self.cumulative_difficulty); writer.write_u8(self.peer_list.len() as u8); for peer in &self.peer_list { @@ -85,6 +106,7 @@ impl Serializer for Ping<'_> { let top_hash = Cow::Owned(reader.read_hash()?); let topoheight = reader.read_u64()?; let height = reader.read_u64()?; + let pruned_height = reader.read_optional_u64()?; let cumulative_difficulty = reader.read_u64()?; let peers_len = reader.read_u8()? as usize; if peers_len > P2P_PING_PEER_LIST_LIMIT { @@ -97,7 +119,7 @@ impl Serializer for Ping<'_> { peer_list.push(peer); } - Ok(Self { top_hash, topoheight, height, cumulative_difficulty, peer_list }) + Ok(Self { top_hash, topoheight, height, pruned_height, cumulative_difficulty, peer_list }) } } diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 44a2adf1..6b9ead38 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -49,11 +49,13 @@ pub struct Peer { txs_cache: Mutex>, // All transactions propagated to/from this peer blocks_propagation: Mutex>, // last blocks propagated to/from this peer last_inventory: AtomicU64, // last time we got an inventory packet from this peer - requested_inventory: AtomicBool // if we requested this peer to send us an inventory notification + requested_inventory: AtomicBool, // if we requested this peer to send us an inventory notification + pruned_height: AtomicU64, + is_pruned: AtomicBool } impl Peer { - pub fn new(connection: Connection, id: u64, node_tag: Option, local_port: u16, version: String, top_hash: Hash, topoheight: u64, height: u64, out: bool, priority: bool, cumulative_difficulty: u64, peer_list: SharedPeerList, peers: HashSet) -> Self { + pub fn new(connection: Connection, id: u64, node_tag: Option, local_port: u16, version: String, top_hash: Hash, topoheight: u64, height: u64, pruned_height: Option, out: bool, priority: bool, cumulative_difficulty: u64, peer_list: SharedPeerList, peers: HashSet) -> Self { Self { connection, id, @@ -78,7 +80,9 @@ impl Peer { txs_cache: Mutex::new(LruCache::new(128)), blocks_propagation: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)), last_inventory: AtomicU64::new(0), - requested_inventory: AtomicBool::new(false) + requested_inventory: AtomicBool::new(false), + pruned_height: AtomicU64::new(pruned_height.unwrap_or(0)), + is_pruned: AtomicBool::new(pruned_height.is_some()) } } @@ -126,6 +130,27 @@ impl Peer { self.height.store(height, Ordering::Release); } + pub fn is_pruned(&self) -> bool { + self.is_pruned.load(Ordering::Acquire) + } + + pub fn get_pruned_height(&self) -> Option { + if self.is_pruned() { + Some(self.pruned_height.load(Ordering::Acquire)) + } else { + None + } + } + + pub fn set_pruned_height(&self, pruned_height: Option) { + if let Some(pruned_height) = pruned_height { + self.is_pruned.store(true, Ordering::Release); + self.height.store(pruned_height, Ordering::Release); + } else { + self.is_pruned.store(false, Ordering::Release); + } + } + pub async fn set_block_top_hash(&self, hash: Hash) { *self.top_hash.lock().await = hash } @@ -313,12 +338,19 @@ impl Display for Peer { "Couldn't retrieve data".to_string() }; - write!(f, "Peer[connection: {}, id: {}, topoheight: {}, top hash: {}, height: {}, priority: {}, tag: {}, version: {}, fail count: {}, out: {}, peers: {}]", + let pruned_state = if let Some(value) = self.get_pruned_height() { + format!("Yes ({})", value) + } else { + "No".to_string() + }; + + write!(f, "Peer[connection: {}, id: {}, topoheight: {}, top hash: {}, height: {}, pruned: {:?}, priority: {}, tag: {}, version: {}, fail count: {}, out: {}, peers: {}]", self.get_connection(), self.get_id(), self.get_topoheight(), top_hash, self.get_height(), + pruned_state, self.is_priority(), self.get_node_tag().as_ref().unwrap_or(&"None".to_owned()), self.get_version(), diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 1025df37..5f32211c 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -227,11 +227,12 @@ async fn get_info(blockchain: Arc>, body: Value) -> Re let height = blockchain.get_height(); let topoheight = blockchain.get_topo_height(); let stableheight = blockchain.get_stable_height(); - let (top_hash, native_supply) = { + let (top_hash, native_supply, pruned_height) = { let storage = blockchain.get_storage().read().await; let top_hash = storage.get_hash_at_topo_height(topoheight).await.context("Error while retrieving hash at topo height")?; let supply = storage.get_supply_for_block_hash(&top_hash).context("Error while supply for hash")?; - (top_hash, supply) + let pruned_height = storage.get_pruned_height().context("Error while retrieving pruned height")?; + (top_hash, supply, pruned_height) }; let difficulty = blockchain.get_difficulty(); let block_time_target = BLOCK_TIME_MILLIS; @@ -243,6 +244,7 @@ async fn get_info(blockchain: Arc>, body: Value) -> Re height, topoheight, stableheight, + pruned_height, top_hash, native_supply, difficulty, From 33f76e236af1054a3a88531693fbf571f4e0c577 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 00:54:24 +0200 Subject: [PATCH 52/74] daemon: pruned display fix --- xelis_daemon/src/p2p/peer.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 6b9ead38..f8e9b294 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -344,7 +344,7 @@ impl Display for Peer { "No".to_string() }; - write!(f, "Peer[connection: {}, id: {}, topoheight: {}, top hash: {}, height: {}, pruned: {:?}, priority: {}, tag: {}, version: {}, fail count: {}, out: {}, peers: {}]", + write!(f, "Peer[connection: {}, id: {}, topoheight: {}, top hash: {}, height: {}, pruned: {}, priority: {}, tag: {}, version: {}, fail count: {}, out: {}, peers: {}]", self.get_connection(), self.get_id(), self.get_topoheight(), From 775f3af690f9138fda2b9fb184a4c3bf619f5aa6 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 15:30:55 +0200 Subject: [PATCH 53/74] daemon: pruned topoheight --- API.md | 2 +- xelis_common/src/api/daemon.rs | 2 +- xelis_daemon/src/core/blockchain.rs | 59 +++++++++++++++++++- xelis_daemon/src/core/error.rs | 4 +- xelis_daemon/src/core/storage/mod.rs | 8 ++- xelis_daemon/src/core/storage/sled.rs | 70 ++++++++++++++++++++---- xelis_daemon/src/p2p/mod.rs | 27 ++++----- xelis_daemon/src/p2p/packet/handshake.rs | 14 ++--- xelis_daemon/src/p2p/packet/ping.rs | 22 ++++---- xelis_daemon/src/p2p/peer.rs | 20 +++---- xelis_daemon/src/rpc/rpc.rs | 8 +-- 11 files changed, 173 insertions(+), 63 deletions(-) diff --git a/API.md b/API.md index 7e37be81..8b066639 100644 --- a/API.md +++ b/API.md @@ -56,7 +56,7 @@ No parameters "block_time_target": 15000, "difficulty": 310532, "height": 9, - "pruned_height": null, + "pruned_topoheight": null, "mempool_size": 0, "native_supply": 8773780, "stableheight": 1, diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 35a2dd50..4a7130fd 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -107,7 +107,7 @@ pub struct GetInfoResult { pub height: u64, pub topoheight: u64, pub stableheight: u64, - pub pruned_height: Option, + pub pruned_topoheight: Option, pub top_hash: Hash, pub native_supply: u64, pub difficulty: Difficulty, diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 782ea12f..0231a6f0 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -113,7 +113,7 @@ impl Blockchain { network, tip_base_cache: Mutex::new(LruCache::new(1024)), tip_work_score_cache: Mutex::new(LruCache::new(1024)), - full_order_cache: Mutex::new(LruCache::new(1024)) + full_order_cache: Mutex::new(LruCache::new(1024)), }; // include genesis block @@ -272,6 +272,61 @@ impl Blockchain { Ok(()) } + // delete all blocks / versioned balances / txs until height in param + // for this, we have to locate the nearest Sync block for DAG under the limit height + // and then delete all blocks before it + pub async fn prune_until_height(&self, height: u64) -> Result<(), BlockchainError> { + let current_height = self.get_height(); + if height >= current_height || current_height - height < STABLE_LIMIT * 2 { + return Err(BlockchainError::PruneHeightTooHigh) + } + + let mut storage = self.storage.write().await; + let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); + // find both stable point through sync block + let located_sync_topoheight = self.locate_nearest_sync_block_for_height(&storage, height, current_height).await?; + // delete all blocks until the new topoheight + let assets = storage.get_assets().await?; + for topoheight in last_pruned_topoheight..located_sync_topoheight { + // delete block + let block_header = storage.delete_block_at_topoheight(topoheight).await?; + + // delete balances for all assets + for asset in &assets { + storage.delete_versioned_balances_for_asset_at_topoheight(asset, topoheight).await?; + } + + // delete transactions for this block + for tx_hash in block_header.get_txs_hashes() { + if storage.has_transaction(tx_hash).await? { + storage.delete_tx(tx_hash).await?; + } + } + } + + storage.set_pruned_topoheight(located_sync_topoheight)?; + + Ok(()) + } + + // determine the topoheight of the nearest sync block until block height + pub async fn locate_nearest_sync_block_for_height(&self, storage: &S, mut height: u64, current_height: u64) -> Result { + while height > 0 { + let blocks = storage.get_blocks_at_height(height).await?; + for hash in blocks { + if self.is_sync_block_at_height(storage, &hash, current_height).await? { + let topoheight = storage.get_topo_height_for_hash(&hash).await?; + return Ok(topoheight) + } + } + + height -= 1; + } + + // genesis block is always a sync block + Ok(0) + } + // returns the highest (unstable) height on the chain pub fn get_height(&self) -> u64 { self.height.load(Ordering::Acquire) @@ -1358,7 +1413,7 @@ impl Blockchain { if broadcast { if let Some(p2p) = self.p2p.lock().await.as_ref() { debug!("broadcast block to peers"); - p2p.broadcast_block(&block, cumulative_difficulty, current_topoheight, current_height, storage.get_pruned_height()?, &block_hash).await; + p2p.broadcast_block(&block, cumulative_difficulty, current_topoheight, current_height, storage.get_pruned_topoheight()?, &block_hash).await; } } diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 326d8779..ed886ed6 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -158,7 +158,9 @@ pub enum BlockchainError { #[error("A non-zero value is required for burn")] NoValueForBurn, #[error("TX {} is already in blockchain", _0)] - TxAlreadyInBlockchain(Hash) + TxAlreadyInBlockchain(Hash), + #[error("Cannot prune, not enough blocks")] + PruneHeightTooHigh } impl From> for BlockchainError { diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index c0c9967a..1cd7e10f 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -26,8 +26,12 @@ pub trait DifficultyProvider { #[async_trait] pub trait Storage: DifficultyProvider + Sync + Send + 'static { - fn get_pruned_height(&self) -> Result, BlockchainError>; - fn set_pruned_height(&mut self, pruned_height: u64) -> Result<(), BlockchainError>; + fn get_pruned_topoheight(&self) -> Result, BlockchainError>; + fn set_pruned_topoheight(&mut self, pruned_topoheight: u64) -> Result<(), BlockchainError>; + + async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError>; + async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError>; + async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index e2f48bbf..42eb99d8 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -23,7 +23,7 @@ const TIPS: &[u8; 4] = b"TIPS"; const TOP_TOPO_HEIGHT: &[u8; 4] = b"TOPO"; const TOP_HEIGHT: &[u8; 4] = b"TOPH"; const NETWORK: &[u8] = b"NET"; -const PRUNED_HEIGHT: &[u8; 4] = b"PRUN"; +const PRUNED_TOPOHEIGHT: &[u8; 4] = b"PRUN"; pub struct SledStorage { transactions: Tree, // all txs stored on disk @@ -52,7 +52,7 @@ pub struct SledStorage { nonces_cache: Option>>, balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute tips_cache: Tips, - pruned_height: Option + pruned_topoheight: Option } macro_rules! init_cache { @@ -94,7 +94,7 @@ impl SledStorage { nonces_cache: init_cache!(cache_size), balances_trees_cache: init_cache!(cache_size), tips_cache: HashSet::new(), - pruned_height: None + pruned_topoheight: None }; if storage.has_network()? { @@ -111,9 +111,9 @@ impl SledStorage { storage.tips_cache = tips; } - if let Ok(pruned_height) = storage.load_from_disk::(&storage.extra, PRUNED_HEIGHT) { - debug!("Found pruned height: {}", pruned_height); - storage.pruned_height = Some(pruned_height); + if let Ok(pruned_topoheight) = storage.load_from_disk::(&storage.extra, PRUNED_TOPOHEIGHT) { + debug!("Found pruned topoheight: {}", pruned_topoheight); + storage.pruned_topoheight = Some(pruned_topoheight); } Ok(storage) @@ -305,13 +305,59 @@ impl DifficultyProvider for SledStorage { #[async_trait] impl Storage for SledStorage { - fn get_pruned_height(&self) -> Result, BlockchainError> { - Ok(self.pruned_height) + fn get_pruned_topoheight(&self) -> Result, BlockchainError> { + Ok(self.pruned_topoheight) } - fn set_pruned_height(&mut self, pruned_height: u64) -> Result<(), BlockchainError> { - self.pruned_height = Some(pruned_height); - self.extra.insert(PRUNED_HEIGHT, &pruned_height.to_be_bytes())?; + fn set_pruned_topoheight(&mut self, pruned_topoheight: u64) -> Result<(), BlockchainError> { + self.pruned_topoheight = Some(pruned_topoheight); + self.extra.insert(PRUNED_TOPOHEIGHT, &pruned_topoheight.to_be_bytes())?; + Ok(()) + } + + async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError> { + // delete topoheight<->hash pointers + let hash = self.delete_data_no_arc(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; + self.delete_data_no_arc::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; + + let topoheight_bytes = topoheight.to_be_bytes(); + // delete block reward + self.rewards.remove(topoheight_bytes)?; + // delete supply + self.supply.remove(topoheight_bytes)?; + // delete difficulty + self.difficulty.remove(topoheight_bytes)?; + // delete cummulative difficulty + self.cumulative_difficulty.remove(topoheight_bytes)?; + + // delete block header + let block_header = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; + // remove the block hash from the set, and delete the set if empty + let mut blocks = self.get_blocks_at_height(block_header.get_height()).await?; + blocks.remove(&hash); + let height_bytes = block_header.get_height().to_be_bytes(); + if blocks.is_empty() { + self.blocks_at_height.remove(height_bytes)?; + } else { + self.blocks_at_height.insert(height_bytes, blocks.to_bytes())?; + } + + if let Some(cache) = &self.past_blocks_cache { + let mut cache = cache.lock().await; + cache.pop(&hash); + } + + Ok(block_header) + } + + async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError> { + self.delete_data_no_arc::>(&self.tx_blocks, &None, hash).await?; + self.delete_data(&self.transactions, &self.transactions_cache, hash).await + } + + async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError> { + let tree = self.get_versioned_balance_tree(asset, topoheight).await?; + self.db.drop_tree(tree.name())?; Ok(()) } @@ -326,6 +372,8 @@ impl Storage for SledStorage { fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError> { self.txs_executed.remove(tx.as_bytes())?; + self.remove_tx_executed(tx)?; + Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index cc730e93..cd998f19 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -342,9 +342,9 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; let (block, top_hash) = storage.get_top_block_header().await?; let topoheight = self.blockchain.get_topo_height(); - let pruned_height = storage.get_pruned_height()?; + let pruned_topoheight = storage.get_pruned_topoheight()?; let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&top_hash).await.unwrap_or(0); - Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), pruned_height, top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty, peers)) + Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), pruned_topoheight, top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty, peers)) } // this function handle all new connections @@ -428,12 +428,12 @@ impl P2pServer { // build a ping packet with the current state of the blockchain // if a peer is given, we will check and update the peers list async fn build_generic_ping_packet(&self) -> Ping<'_> { - let (cumulative_difficulty, block_top_hash, pruned_height) = { + let (cumulative_difficulty, block_top_hash, pruned_topoheight) = { let storage = self.blockchain.get_storage().read().await; - let pruned_height = match storage.get_pruned_height() { - Ok(pruned_height) => pruned_height, + let pruned_topoheight = match storage.get_pruned_topoheight() { + Ok(pruned_topoheight) => pruned_topoheight, Err(e) => { - error!("Couldn't get the pruned height from storage for generic ping packet: {}", e); + error!("Couldn't get the pruned topoheight from storage for generic ping packet: {}", e); None } }; @@ -441,15 +441,15 @@ impl P2pServer { match storage.get_top_block_hash().await { Err(e) => { error!("Couldn't get the top block hash from storage for generic ping packet: {}", e); - (0, Hash::zero(), pruned_height) + (0, Hash::zero(), pruned_topoheight) }, - Ok(hash) => (storage.get_cumulative_difficulty_for_block_hash(&hash).await.unwrap_or(0), hash, pruned_height) + Ok(hash) => (storage.get_cumulative_difficulty_for_block_hash(&hash).await.unwrap_or(0), hash, pruned_topoheight) } }; let highest_topo_height = self.blockchain.get_topo_height(); let highest_height = self.blockchain.get_height(); let new_peers = Vec::new(); - Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, pruned_height, cumulative_difficulty, new_peers) + Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, pruned_topoheight, cumulative_difficulty, new_peers) } // select a random peer which is greater than us to sync chain @@ -462,7 +462,7 @@ impl P2pServer { // search for peers which are greater than us // and that are pruned but before our height so we can sync correctly let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| - p.get_pruned_height().unwrap_or(0) < our_height + p.get_pruned_topoheight().unwrap_or(0) < our_height && (p.get_height() > our_height || p.get_topoheight() > our_topoheight) ).collect(); let count = peers.len(); @@ -1320,11 +1320,11 @@ impl P2pServer { } // broadcast block to all peers that can accept directly this new block - pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: u64, highest_topoheight: u64, highest_height: u64, pruned_height: Option, hash: &Hash) { + pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: u64, highest_topoheight: u64, highest_height: u64, pruned_topoheight: Option, hash: &Hash) { trace!("Broadcast block: {}", hash); // we build the ping packet ourself this time (we have enough data for it) // because this function can be call from Blockchain, which would lead to a deadlock - let ping = Ping::new(Cow::Borrowed(hash), highest_topoheight, highest_height, pruned_height, cumulative_difficulty, Vec::new()); + let ping = Ping::new(Cow::Borrowed(hash), highest_topoheight, highest_height, pruned_topoheight, cumulative_difficulty, Vec::new()); let block_packet = Packet::BlockPropagation(PacketWrapper::new(Cow::Borrowed(block), Cow::Borrowed(&ping))); let bytes = Bytes::from(block_packet.to_bytes()); @@ -1369,10 +1369,11 @@ impl P2pServer { { let storage = self.blockchain.get_storage().read().await; let topoheight = self.blockchain.get_topo_height(); + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); let mut i = 0; // we add 1 for the genesis block added below - while i < topoheight && request.size() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { + while i < topoheight && topoheight - i >= pruned_topoheight && request.size() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { trace!("Requesting hash at topo {}", topoheight - i); let hash = storage.get_hash_at_topo_height(topoheight - i).await?; request.add_block_id(hash, topoheight - i); diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index 26178465..ac06f98a 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -26,7 +26,7 @@ pub struct Handshake { utc_time: u64, // current time in seconds topoheight: u64, // current topo height height: u64, // current block height - pruned_height: Option, // until when the node is pruned (if it is) + pruned_topoheight: Option, // until when the node is pruned (if it is) top_hash: Hash, // current block top hash genesis_hash: Hash, // genesis hash cumulative_difficulty: Difficulty, @@ -36,7 +36,7 @@ pub struct Handshake { impl Handshake { pub const MAX_LEN: usize = 16; - pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, pruned_height: Option, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { + pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, pruned_topoheight: Option, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { debug_assert!(version.len() > 0 && version.len() <= Handshake::MAX_LEN); // version cannot be greater than 16 chars if let Some(node_tag) = &node_tag { debug_assert!(node_tag.len() > 0 && node_tag.len() <= Handshake::MAX_LEN); // node tag cannot be greater than 16 chars @@ -54,7 +54,7 @@ impl Handshake { utc_time, topoheight, height, - pruned_height, + pruned_topoheight, top_hash, genesis_hash, cumulative_difficulty, @@ -67,7 +67,7 @@ impl Handshake { for peer in &self.peers { peers.insert(peer.clone()); } - (Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, self.pruned_height, out, priority, self.cumulative_difficulty, peer_list, peers), self.peers) + (Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, self.pruned_topoheight, out, priority, self.cumulative_difficulty, peer_list, peers), self.peers) } pub fn get_version(&self) -> &String { @@ -129,7 +129,7 @@ impl Serializer for Handshake { writer.write_u64(&self.utc_time); // UTC Time writer.write_u64(&self.topoheight); // Topo height writer.write_u64(&self.height); // Block Height - writer.write_optional_u64(&self.pruned_height); // Pruned Height + writer.write_optional_u64(&self.pruned_topoheight); // Pruned Topo Height writer.write_hash(&self.top_hash); // Block Top Hash (32 bytes) writer.write_hash(&self.genesis_hash); // Genesis Hash writer.write_u64(&self.cumulative_difficulty); @@ -168,7 +168,7 @@ impl Serializer for Handshake { let utc_time = reader.read_u64()?; let topoheight = reader.read_u64()?; let height = reader.read_u64()?; - let pruned_height = reader.read_optional_u64()?; + let pruned_topoheight = reader.read_optional_u64()?; let top_hash = reader.read_hash()?; let genesis_hash = reader.read_hash()?; let cumulative_difficulty = reader.read_u64()?; @@ -182,7 +182,7 @@ impl Serializer for Handshake { let peer = ip_from_bytes(reader)?; peers.push(peer); } - Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_height, top_hash, genesis_hash, cumulative_difficulty, peers)) + Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_topoheight, top_hash, genesis_hash, cumulative_difficulty, peers)) } } diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 8e1d3695..7f818157 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -27,18 +27,18 @@ pub struct Ping<'a> { top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, - pruned_height: Option, + pruned_topoheight: Option, cumulative_difficulty: Difficulty, peer_list: Vec } impl<'a> Ping<'a> { - pub fn new(top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, pruned_height: Option, cumulative_difficulty: u64, peer_list: Vec) -> Self { + pub fn new(top_hash: Cow<'a, Hash>, topoheight: u64, height: u64, pruned_topoheight: Option, cumulative_difficulty: u64, peer_list: Vec) -> Self { Self { top_hash, topoheight, height, - pruned_height, + pruned_topoheight, cumulative_difficulty, peer_list } @@ -50,19 +50,19 @@ impl<'a> Ping<'a> { peer.set_topoheight(self.topoheight); peer.set_height(self.height); - if peer.is_pruned() != self.pruned_height.is_some() { + if peer.is_pruned() != self.pruned_topoheight.is_some() { error!("Invalid protocol rules: impossible to change the pruned state, from {} in ping packet", peer); return Err(P2pError::InvalidProtocolRules) } - if let Some(pruned_height) = self.pruned_height { - if pruned_height > self.height { - error!("Invalid protocol rules: pruned height {} is greater than height {} in ping packet", pruned_height, self.height); + if let Some(pruned_topoheight) = self.pruned_topoheight { + if pruned_topoheight > self.height { + error!("Invalid protocol rules: pruned topoheight {} is greater than height {} in ping packet", pruned_topoheight, self.height); return Err(P2pError::InvalidProtocolRules) } } - peer.set_pruned_height(self.pruned_height); + peer.set_pruned_topoheight(self.pruned_topoheight); peer.set_cumulative_difficulty(self.cumulative_difficulty); let mut peers = peer.get_peers().lock().await; @@ -94,7 +94,7 @@ impl Serializer for Ping<'_> { writer.write_hash(&self.top_hash); writer.write_u64(&self.topoheight); writer.write_u64(&self.height); - writer.write_optional_u64(&self.pruned_height); + writer.write_optional_u64(&self.pruned_topoheight); writer.write_u64(&self.cumulative_difficulty); writer.write_u8(self.peer_list.len() as u8); for peer in &self.peer_list { @@ -106,7 +106,7 @@ impl Serializer for Ping<'_> { let top_hash = Cow::Owned(reader.read_hash()?); let topoheight = reader.read_u64()?; let height = reader.read_u64()?; - let pruned_height = reader.read_optional_u64()?; + let pruned_topoheight = reader.read_optional_u64()?; let cumulative_difficulty = reader.read_u64()?; let peers_len = reader.read_u8()? as usize; if peers_len > P2P_PING_PEER_LIST_LIMIT { @@ -119,7 +119,7 @@ impl Serializer for Ping<'_> { peer_list.push(peer); } - Ok(Self { top_hash, topoheight, height, pruned_height, cumulative_difficulty, peer_list }) + Ok(Self { top_hash, topoheight, height, pruned_topoheight, cumulative_difficulty, peer_list }) } } diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index f8e9b294..0893d831 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -50,12 +50,12 @@ pub struct Peer { blocks_propagation: Mutex>, // last blocks propagated to/from this peer last_inventory: AtomicU64, // last time we got an inventory packet from this peer requested_inventory: AtomicBool, // if we requested this peer to send us an inventory notification - pruned_height: AtomicU64, + pruned_topoheight: AtomicU64, is_pruned: AtomicBool } impl Peer { - pub fn new(connection: Connection, id: u64, node_tag: Option, local_port: u16, version: String, top_hash: Hash, topoheight: u64, height: u64, pruned_height: Option, out: bool, priority: bool, cumulative_difficulty: u64, peer_list: SharedPeerList, peers: HashSet) -> Self { + pub fn new(connection: Connection, id: u64, node_tag: Option, local_port: u16, version: String, top_hash: Hash, topoheight: u64, height: u64, pruned_topoheight: Option, out: bool, priority: bool, cumulative_difficulty: u64, peer_list: SharedPeerList, peers: HashSet) -> Self { Self { connection, id, @@ -81,8 +81,8 @@ impl Peer { blocks_propagation: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)), last_inventory: AtomicU64::new(0), requested_inventory: AtomicBool::new(false), - pruned_height: AtomicU64::new(pruned_height.unwrap_or(0)), - is_pruned: AtomicBool::new(pruned_height.is_some()) + pruned_topoheight: AtomicU64::new(pruned_topoheight.unwrap_or(0)), + is_pruned: AtomicBool::new(pruned_topoheight.is_some()) } } @@ -134,18 +134,18 @@ impl Peer { self.is_pruned.load(Ordering::Acquire) } - pub fn get_pruned_height(&self) -> Option { + pub fn get_pruned_topoheight(&self) -> Option { if self.is_pruned() { - Some(self.pruned_height.load(Ordering::Acquire)) + Some(self.pruned_topoheight.load(Ordering::Acquire)) } else { None } } - pub fn set_pruned_height(&self, pruned_height: Option) { - if let Some(pruned_height) = pruned_height { + pub fn set_pruned_topoheight(&self, pruned_topoheight: Option) { + if let Some(pruned_topoheight) = pruned_topoheight { self.is_pruned.store(true, Ordering::Release); - self.height.store(pruned_height, Ordering::Release); + self.height.store(pruned_topoheight, Ordering::Release); } else { self.is_pruned.store(false, Ordering::Release); } @@ -338,7 +338,7 @@ impl Display for Peer { "Couldn't retrieve data".to_string() }; - let pruned_state = if let Some(value) = self.get_pruned_height() { + let pruned_state = if let Some(value) = self.get_pruned_topoheight() { format!("Yes ({})", value) } else { "No".to_string() diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 5f32211c..062fe059 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -227,12 +227,12 @@ async fn get_info(blockchain: Arc>, body: Value) -> Re let height = blockchain.get_height(); let topoheight = blockchain.get_topo_height(); let stableheight = blockchain.get_stable_height(); - let (top_hash, native_supply, pruned_height) = { + let (top_hash, native_supply, pruned_topoheight) = { let storage = blockchain.get_storage().read().await; let top_hash = storage.get_hash_at_topo_height(topoheight).await.context("Error while retrieving hash at topo height")?; let supply = storage.get_supply_for_block_hash(&top_hash).context("Error while supply for hash")?; - let pruned_height = storage.get_pruned_height().context("Error while retrieving pruned height")?; - (top_hash, supply, pruned_height) + let pruned_topoheight = storage.get_pruned_topoheight().context("Error while retrieving pruned topoheight")?; + (top_hash, supply, pruned_topoheight) }; let difficulty = blockchain.get_difficulty(); let block_time_target = BLOCK_TIME_MILLIS; @@ -244,7 +244,7 @@ async fn get_info(blockchain: Arc>, body: Value) -> Re height, topoheight, stableheight, - pruned_height, + pruned_topoheight, top_hash, native_supply, difficulty, From 25af78cec981f53981e92938a36f302ecf55406d Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 15:41:46 +0200 Subject: [PATCH 54/74] daemon: don't share peers on handshake --- xelis_daemon/src/p2p/mod.rs | 40 +++--------------------- xelis_daemon/src/p2p/packet/handshake.rs | 37 +++++----------------- 2 files changed, 12 insertions(+), 65 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index cd998f19..0442ba3a 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -292,7 +292,7 @@ impl P2pServer { // Verify handshake send by a new connection // based on data size, network ID, peers address validity // block height and block top hash of this peer (to know if we are on the same chain) - async fn verify_handshake(&self, mut connection: Connection, handshake: Handshake, out: bool, priority: bool) -> Result<(Peer, Vec), P2pError> { + async fn verify_handshake(&self, mut connection: Connection, handshake: Handshake, out: bool, priority: bool) -> Result { if handshake.get_network() != self.blockchain.get_network() { trace!("{} has an invalid network: {}", connection, handshake.get_network()); return Err(P2pError::InvalidNetwork) @@ -316,35 +316,17 @@ impl P2pServer { } connection.set_state(State::Success); - let (peer, peers) = handshake.create_peer(connection, out, priority, Arc::clone(&self.peer_list)); - Ok((peer, peers)) + let peer = handshake.create_peer(connection, out, priority, Arc::clone(&self.peer_list)); + Ok(peer) } async fn build_handshake(&self) -> Result { - let mut peers: Vec = Vec::new(); - { - let peer_list = self.peer_list.read().await; - let mut iter = peer_list.get_peers().iter(); - while peers.len() < Handshake::MAX_LEN { - match iter.next() { - Some((_, v)) => { - let mut addr: SocketAddr = v.get_connection().get_address().clone(); - if !v.is_out() { - addr.set_port(v.get_local_port()); - } - peers.push(addr); - }, - None => break - }; - } - } - let storage = self.blockchain.get_storage().read().await; let (block, top_hash) = storage.get_top_block_header().await?; let topoheight = self.blockchain.get_topo_height(); let pruned_topoheight = storage.get_pruned_topoheight()?; let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&top_hash).await.unwrap_or(0); - Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), pruned_topoheight, top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty, peers)) + Ok(Handshake::new(VERSION.to_owned(), *self.blockchain.get_network(), self.get_tag().clone(), NETWORK_ID, self.get_peer_id(), self.bind_address.port(), get_current_time(), topoheight, block.get_height(), pruned_topoheight, top_hash, GENESIS_BLOCK_HASH.clone(), cumulative_difficulty)) } // this function handle all new connections @@ -358,7 +340,7 @@ impl P2pServer { }; trace!("received handshake packet!"); connection.set_state(State::Handshake); - let (peer, peers) = self.verify_handshake(connection, handshake, out, priority).await?; + let peer = self.verify_handshake(connection, handshake, out, priority).await?; trace!("Handshake has been verified"); // if it's a outgoing connection, don't send the handshake back // because we have already sent it @@ -386,18 +368,6 @@ impl P2pServer { peer_list.add_peer(peer_id, peer) }; - // try to extend our peer list - for peer_addr in peers { // should we limit to X peers only ? - if !self.accept_new_connections().await { - break - } - - if !self.is_connected_to_addr(&peer_addr).await? { - debug!("Trying to extend peer list with {} from {}", peer_addr, peer); - self.try_to_connect_to_peer(peer_addr, false); - } - } - self.handle_connection(peer.clone()).await } diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index ac06f98a..c8d556ce 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -1,6 +1,5 @@ use xelis_common::{ serializer::{Serializer, Writer, ReaderError, Reader}, - globals::{ip_from_bytes, ip_to_bytes}, crypto::hash::Hash, network::Network, block::Difficulty }; @@ -9,7 +8,6 @@ use crate::p2p::connection::Connection; use crate::p2p::peer::Peer; use std::collections::HashSet; use std::fmt::{Display, Error, Formatter}; -use std::net::SocketAddr; // this Handshake is the first data sent when connecting to the server // If handshake is valid, server reply with his own handshake @@ -30,20 +28,17 @@ pub struct Handshake { top_hash: Hash, // current block top hash genesis_hash: Hash, // genesis hash cumulative_difficulty: Difficulty, - peers: Vec // all peers that we are already connected to } // Server reply with his own list of peers, but we remove all already known by requester for the response. impl Handshake { pub const MAX_LEN: usize = 16; - pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, pruned_topoheight: Option, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64, peers: Vec) -> Self { + pub fn new(version: String, network: Network, node_tag: Option, network_id: [u8; 16], peer_id: u64, local_port: u16, utc_time: u64, topoheight: u64, height: u64, pruned_topoheight: Option, top_hash: Hash, genesis_hash: Hash, cumulative_difficulty: u64) -> Self { debug_assert!(version.len() > 0 && version.len() <= Handshake::MAX_LEN); // version cannot be greater than 16 chars if let Some(node_tag) = &node_tag { debug_assert!(node_tag.len() > 0 && node_tag.len() <= Handshake::MAX_LEN); // node tag cannot be greater than 16 chars } - debug_assert!(peers.len() <= Handshake::MAX_LEN); // maximum 16 peers allowed - Self { version, network, @@ -57,17 +52,13 @@ impl Handshake { pruned_topoheight, top_hash, genesis_hash, - cumulative_difficulty, - peers + cumulative_difficulty } } - pub fn create_peer(self, connection: Connection, out: bool, priority: bool, peer_list: SharedPeerList) -> (Peer, Vec) { - let mut peers = HashSet::new(); - for peer in &self.peers { - peers.insert(peer.clone()); - } - (Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, self.pruned_topoheight, out, priority, self.cumulative_difficulty, peer_list, peers), self.peers) + pub fn create_peer(self, connection: Connection, out: bool, priority: bool, peer_list: SharedPeerList) -> Peer { + let peers = HashSet::new(); + Peer::new(connection, self.get_peer_id(), self.node_tag, self.local_port, self.version, self.top_hash, self.topoheight, self.height, self.pruned_topoheight, out, priority, self.cumulative_difficulty, peer_list, peers) } pub fn get_version(&self) -> &String { @@ -105,10 +96,6 @@ impl Handshake { pub fn get_block_genesis_hash(&self) -> &Hash { &self.genesis_hash } - - pub fn get_peers(&self) -> &Vec { - &self.peers - } } impl Serializer for Handshake { @@ -133,11 +120,6 @@ impl Serializer for Handshake { writer.write_hash(&self.top_hash); // Block Top Hash (32 bytes) writer.write_hash(&self.genesis_hash); // Genesis Hash writer.write_u64(&self.cumulative_difficulty); - - writer.write_u8(self.peers.len() as u8); - for peer in &self.peers { - writer.write_bytes(&ip_to_bytes(peer)); - } } fn read(reader: &mut Reader) -> Result { @@ -177,12 +159,7 @@ impl Serializer for Handshake { return Err(ReaderError::InvalidSize) } - let mut peers = Vec::with_capacity(peers_len); - for _ in 0..peers_len { - let peer = ip_from_bytes(reader)?; - peers.push(peer); - } - Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_topoheight, top_hash, genesis_hash, cumulative_difficulty, peers)) + Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_topoheight, top_hash, genesis_hash, cumulative_difficulty)) } } @@ -195,6 +172,6 @@ impl Display for Handshake { } else { &NO_NODE_TAG }; - write!(f, "Handshake[version: {}, node tag: {}, network_id: {}, peer_id: {}, utc_time: {}, block_height: {}, block_top_hash: {}, peers: ({})]", self.get_version(), node_tag, hex::encode(self.get_network_id()), self.get_peer_id(), self.get_utc_time(), self.get_block_height(), self.get_block_top_hash(), self.get_peers().len()) + write!(f, "Handshake[version: {}, node tag: {}, network_id: {}, peer_id: {}, utc_time: {}, block_height: {}, block_top_hash: {}]", self.get_version(), node_tag, hex::encode(self.get_network_id()), self.get_peer_id(), self.get_utc_time(), self.get_block_height(), self.get_block_top_hash()) } } \ No newline at end of file From 629b3780f07796dd38a6134a86bff5bbf37f8369 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 15:52:44 +0200 Subject: [PATCH 55/74] daemon: add `prune_chain` command --- xelis_daemon/src/core/blockchain.rs | 8 +++++--- xelis_daemon/src/main.rs | 10 ++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0231a6f0..a685b6f6 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -275,7 +275,7 @@ impl Blockchain { // delete all blocks / versioned balances / txs until height in param // for this, we have to locate the nearest Sync block for DAG under the limit height // and then delete all blocks before it - pub async fn prune_until_height(&self, height: u64) -> Result<(), BlockchainError> { + pub async fn prune_until_height(&self, height: u64) -> Result { let current_height = self.get_height(); if height >= current_height || current_height - height < STABLE_LIMIT * 2 { return Err(BlockchainError::PruneHeightTooHigh) @@ -285,9 +285,12 @@ impl Blockchain { let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); // find both stable point through sync block let located_sync_topoheight = self.locate_nearest_sync_block_for_height(&storage, height, current_height).await?; + debug!("Located sync topoheight found: {}", located_sync_topoheight); + // delete all blocks until the new topoheight let assets = storage.get_assets().await?; for topoheight in last_pruned_topoheight..located_sync_topoheight { + trace!("Pruning block at topoheight {}", topoheight); // delete block let block_header = storage.delete_block_at_topoheight(topoheight).await?; @@ -305,8 +308,7 @@ impl Blockchain { } storage.set_pruned_topoheight(located_sync_topoheight)?; - - Ok(()) + Ok(located_sync_topoheight) } // determine the topoheight of the nearest sync block until block height diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 9e67a422..6412e8b1 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -87,6 +87,7 @@ async fn run_prompt(prompt: &Arc, blockchain: Arc>> = match blockchain.get_p2p().lock().await.as_ref() { Some(p2p) => Some(p2p.clone()), @@ -296,4 +297,13 @@ async fn add_tx(manager: &CommandManager>>, mut ar blockchain.add_tx_with_hash_to_mempool(tx, hash, broadcast).await.context("Error while adding TX to mempool")?; manager.message("TX has been added to mempool"); Ok(()) +} + +async fn prune_chain(manager: &CommandManager>>, mut arguments: ArgumentManager) -> Result<(), CommandError> { + let height = arguments.get_value("height")?.to_number()?; + let blockchain = manager.get_data()?; + manager.message(format!("Pruning chain until maximum height {}", height)); + let pruned_topoheight = blockchain.prune_until_height(height).await.context("Error while pruning chain")?; + manager.message(format!("Chain has been pruned until topoheight {}", pruned_topoheight)); + Ok(()) } \ No newline at end of file From 9e1789cf95578bdc16b2b1b0ac8b621b5847b0b3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 12 May 2023 22:20:20 +0200 Subject: [PATCH 56/74] daemon: remove unused code --- xelis_daemon/src/p2p/packet/handshake.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index c8d556ce..bdac5922 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -154,10 +154,6 @@ impl Serializer for Handshake { let top_hash = reader.read_hash()?; let genesis_hash = reader.read_hash()?; let cumulative_difficulty = reader.read_u64()?; - let peers_len = reader.read_u8()? as usize; - if peers_len > Handshake::MAX_LEN { - return Err(ReaderError::InvalidSize) - } Ok(Handshake::new(version, network, node_tag, network_id, peer_id, local_port, utc_time, topoheight, height, pruned_topoheight, top_hash, genesis_hash, cumulative_difficulty)) } From 4f9690545727718d15380999d11c3a3a0ec7c276 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 14:19:15 +0200 Subject: [PATCH 57/74] update README, add status command, pruning based on topoheight --- README.md | 10 +++++++ xelis_common/src/config.rs | 3 +- xelis_common/src/prompt/command.rs | 12 ++++++-- xelis_daemon/src/core/blockchain.rs | 33 +++++++++++---------- xelis_daemon/src/core/storage/sled.rs | 6 ++++ xelis_daemon/src/main.rs | 41 ++++++++++++++++++++++++--- 6 files changed, 80 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index 1c35ce6a..e0c888a3 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,16 @@ Miners software are recommended to update themselves the block timestamp (or at Actually, the POW Hashing algorithm is `Keccak256` which is until we develop (or choose) our own algorithm. +## Pruning Mode + +This allows anyone who want to run a light node to reduce the blockchain size by deleting blocks, transactions and versioned balances. +The pruned topoheight can only be at a `Sync Block` and behind at least `PRUNE_SAFETY_LIMIT` blocks of the top topoheight. + +For wallets connected to a pruned node, you can't retrieve transactions history and miner rewards which happened before the pruned topoheight. +But your balances are still up-to-date with the chain and if your wallets already synced them, they stay in your wallet database. + +The security of the chain is not reduced as all your blocks were already verified by your own node locally. + ## Client Protocol XELIS integrate along with BlockDAG a way to accept multiple times the same TX and only execute it one time. diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 3a012f22..83a9ad13 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -19,7 +19,8 @@ pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); // 1.25 MB pub const FEE_PER_KB: u64 = 1000; // 0.01000 XLS per KB pub const DEV_FEE_PERCENT: u64 = 5; // 5% per block going to dev address pub const TIPS_LIMIT: usize = 3; // maximum 3 previous blocks -pub const STABLE_LIMIT: u64 = 8; +pub const STABLE_LIMIT: u64 = 8; // in how many height we consider the block stable +pub const PRUNE_SAFETY_LIMIT: u64 = STABLE_LIMIT * 10; // keep at least last N blocks until top topoheight pub const TIMESTAMP_IN_FUTURE_LIMIT: u128 = 2 * 1000; // 2 seconds maximum in future pub const PREFIX_ADDRESS: &str = "xel"; // mainnet prefix address diff --git a/xelis_common/src/prompt/command.rs b/xelis_common/src/prompt/command.rs index 007d22a5..24f186eb 100644 --- a/xelis_common/src/prompt/command.rs +++ b/xelis_common/src/prompt/command.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, pin::Pin, future::Future, fmt::Display}; +use std::{collections::HashMap, pin::Pin, future::Future, fmt::Display, time::Instant}; use crate::config::VERSION; @@ -105,14 +105,16 @@ impl Command { pub struct CommandManager { commands: Vec>, - data: Option + data: Option, + running_since: Instant } impl CommandManager { pub fn new(data: Option) -> Self { Self { commands: Vec::new(), - data + data, + running_since: Instant::now() } } @@ -180,6 +182,10 @@ impl CommandManager { pub fn error(&self, message: D) { error!("{}", message); } + + pub fn running_since(&self) -> Instant { + self.running_since + } } fn help(manager: &CommandManager, mut args: ArgumentManager) -> Result<(), CommandError> { diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index a685b6f6..05bcc3b5 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -2,7 +2,7 @@ use anyhow::Error; use lru::LruCache; use serde_json::{Value, json}; use xelis_common::{ - config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME}, + config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT}, crypto::{key::PublicKey, hash::{Hashable, Hash}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, @@ -272,19 +272,20 @@ impl Blockchain { Ok(()) } - // delete all blocks / versioned balances / txs until height in param - // for this, we have to locate the nearest Sync block for DAG under the limit height + // delete all blocks / versioned balances / txs until topoheight in param + // for this, we have to locate the nearest Sync block for DAG under the limit topoheight // and then delete all blocks before it - pub async fn prune_until_height(&self, height: u64) -> Result { - let current_height = self.get_height(); - if height >= current_height || current_height - height < STABLE_LIMIT * 2 { + // keep a marge of STABLE_LIMIT * 10 + pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { + let current_topoheight = self.get_topo_height(); + if topoheight >= current_topoheight || current_topoheight - topoheight < PRUNE_SAFETY_LIMIT { return Err(BlockchainError::PruneHeightTooHigh) } let mut storage = self.storage.write().await; let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); // find both stable point through sync block - let located_sync_topoheight = self.locate_nearest_sync_block_for_height(&storage, height, current_height).await?; + let located_sync_topoheight = self.locate_nearest_sync_block_for_topoheight(&storage, topoheight, self.get_height()).await?; debug!("Located sync topoheight found: {}", located_sync_topoheight); // delete all blocks until the new topoheight @@ -311,18 +312,16 @@ impl Blockchain { Ok(located_sync_topoheight) } - // determine the topoheight of the nearest sync block until block height - pub async fn locate_nearest_sync_block_for_height(&self, storage: &S, mut height: u64, current_height: u64) -> Result { - while height > 0 { - let blocks = storage.get_blocks_at_height(height).await?; - for hash in blocks { - if self.is_sync_block_at_height(storage, &hash, current_height).await? { - let topoheight = storage.get_topo_height_for_hash(&hash).await?; - return Ok(topoheight) - } + // determine the topoheight of the nearest sync block until limit topoheight + pub async fn locate_nearest_sync_block_for_topoheight(&self, storage: &S, mut topoheight: u64, current_height: u64) -> Result { + while topoheight > 0 { + let block_hash = storage.get_hash_at_topo_height(topoheight).await?; + if self.is_sync_block_at_height(storage, &block_hash, current_height).await? { + let topoheight = storage.get_topo_height_for_hash(&block_hash).await?; + return Ok(topoheight) } - height -= 1; + topoheight -= 1; } // genesis block is always a sync block diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 42eb99d8..75c02ef4 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -735,6 +735,12 @@ impl Storage for SledStorage { } trace!("Lowest topoheight for rewind: {}", lowest_topo); + let pruned_topoheight = self.get_pruned_topoheight()?.unwrap_or(0); + if lowest_topo <= pruned_topoheight { + warn!("Pruned topoheight is {}, lowest topoheight is {}, rewind only until {}", pruned_topoheight, lowest_topo, pruned_topoheight + 1); + lowest_topo = pruned_topoheight + 1; + } + // new TIPS for chain let mut tips = self.get_tips().await?; // all txs to be rewinded diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 6412e8b1..a7799d3d 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -87,7 +87,8 @@ async fn run_prompt(prompt: &Arc, blockchain: Arc>> = match blockchain.get_p2p().lock().await.as_ref() { Some(p2p) => Some(p2p.clone()), @@ -300,10 +301,42 @@ async fn add_tx(manager: &CommandManager>>, mut ar } async fn prune_chain(manager: &CommandManager>>, mut arguments: ArgumentManager) -> Result<(), CommandError> { - let height = arguments.get_value("height")?.to_number()?; + let topoheight = arguments.get_value("topoheight")?.to_number()?; let blockchain = manager.get_data()?; - manager.message(format!("Pruning chain until maximum height {}", height)); - let pruned_topoheight = blockchain.prune_until_height(height).await.context("Error while pruning chain")?; + manager.message(format!("Pruning chain until maximum topoheight {}", topoheight)); + let pruned_topoheight = blockchain.prune_until_topoheight(topoheight).await.context("Error while pruning chain")?; manager.message(format!("Chain has been pruned until topoheight {}", pruned_topoheight)); Ok(()) +} + +async fn status(manager: &CommandManager>>, _: ArgumentManager) -> Result<(), CommandError> { + let blockchain = manager.get_data()?; + let storage = blockchain.get_storage().read().await; + + let height = blockchain.get_height(); + let topoheight = blockchain.get_topo_height(); + let stableheight = blockchain.get_stable_height(); + let difficulty = blockchain.get_difficulty(); + let tips = storage.get_tips().await.context("Error while retrieving tips")?; + let top_block_hash = blockchain.get_top_block_hash().await.context("Error while retrieving top block hash")?; + + manager.message(format!("Height: {}", height)); + manager.message(format!("Stable Height: {}", stableheight)); + manager.message(format!("Topo Height: {}", topoheight)); + manager.message(format!("Difficulty: {}", difficulty)); + manager.message(format!("Top block hash: {}", top_block_hash)); + + manager.message(format!("Tips ({}):", tips.len())); + for hash in tips { + manager.message(format!("- {}", hash)); + } + + if let Some(pruned_topoheight) = storage.get_pruned_topoheight().context("Error while retrieving pruned topoheight")? { + manager.message(format!("Chain is pruned until topoheight {}", pruned_topoheight)); + } else { + manager.message("Chain is in full mode"); + } + + //manager.message(format!("Running since: {}", manager.running_since().elapsed().format("%Y-%m-%d %H:%M:%S"))); + Ok(()) } \ No newline at end of file From 57310cde170acd5d8aa677c80ef4bfecec2fa6c1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 17:12:16 +0200 Subject: [PATCH 58/74] daemon: use Little Endian for pruned topoheight --- xelis_common/src/serializer/mod.rs | 27 ++++++++++++++++-------- xelis_daemon/src/p2p/packet/handshake.rs | 4 ++-- xelis_daemon/src/p2p/packet/ping.rs | 4 ++-- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index 245d4825..8b03b6d6 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -71,10 +71,14 @@ impl Writer { }; } - pub fn write_optional_u64(&mut self, opt: &Option) { + fn write_u64_le(&mut self, value: &u64) { + self.bytes.extend(value.to_le_bytes()); + } + + pub fn write_optional_non_zero_u64(&mut self, opt: &Option) { match opt { Some(v) => { - self.write_u64(v); + self.write_u64_le(v); }, None => { self.bytes.push(0); @@ -114,6 +118,10 @@ impl<'a> Reader<'a> { } } + fn view_byte(&self) -> Result { + self.bytes.get(self.total).ok_or(ReaderError::InvalidSize).map(|v| *v) + } + pub fn read_bool(&mut self) -> Result { let byte = self.read_u8()?; match byte { @@ -207,16 +215,17 @@ impl<'a> Reader<'a> { } } - pub fn read_optional_u64(&mut self) -> Result, ReaderError> { - let byte = self.read_u8()?; - if byte == 0 { + fn read_u64_le(&mut self) -> Result { + Ok(u64::from_le_bytes(self.read_bytes(8)?)) + } + + pub fn read_optional_non_zero_u64(&mut self) -> Result, ReaderError> { + if self.view_byte()? == 0 { + self.read_u8()?; // consume the byte return Ok(None) } - let mut array = [0; 8]; - array[0] = byte; - array[1..].copy_from_slice(&self.read_bytes::<[u8; 7]>(7)?); - Ok(Some(u64::from_be_bytes(array))) + Ok(Some(self.read_u64_le()?)) } pub fn read_big_uint(&mut self) -> Result { diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index bdac5922..79a1b105 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -116,7 +116,7 @@ impl Serializer for Handshake { writer.write_u64(&self.utc_time); // UTC Time writer.write_u64(&self.topoheight); // Topo height writer.write_u64(&self.height); // Block Height - writer.write_optional_u64(&self.pruned_topoheight); // Pruned Topo Height + writer.write_optional_non_zero_u64(&self.pruned_topoheight); // Pruned Topo Height writer.write_hash(&self.top_hash); // Block Top Hash (32 bytes) writer.write_hash(&self.genesis_hash); // Genesis Hash writer.write_u64(&self.cumulative_difficulty); @@ -150,7 +150,7 @@ impl Serializer for Handshake { let utc_time = reader.read_u64()?; let topoheight = reader.read_u64()?; let height = reader.read_u64()?; - let pruned_topoheight = reader.read_optional_u64()?; + let pruned_topoheight = reader.read_optional_non_zero_u64()?; let top_hash = reader.read_hash()?; let genesis_hash = reader.read_hash()?; let cumulative_difficulty = reader.read_u64()?; diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 7f818157..d77ae907 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -94,7 +94,7 @@ impl Serializer for Ping<'_> { writer.write_hash(&self.top_hash); writer.write_u64(&self.topoheight); writer.write_u64(&self.height); - writer.write_optional_u64(&self.pruned_topoheight); + writer.write_optional_non_zero_u64(&self.pruned_topoheight); writer.write_u64(&self.cumulative_difficulty); writer.write_u8(self.peer_list.len() as u8); for peer in &self.peer_list { @@ -106,7 +106,7 @@ impl Serializer for Ping<'_> { let top_hash = Cow::Owned(reader.read_hash()?); let topoheight = reader.read_u64()?; let height = reader.read_u64()?; - let pruned_topoheight = reader.read_optional_u64()?; + let pruned_topoheight = reader.read_optional_non_zero_u64()?; let cumulative_difficulty = reader.read_u64()?; let peers_len = reader.read_u8()? as usize; if peers_len > P2P_PING_PEER_LIST_LIMIT { From 7c04af061c182cf05b418102564bea406bf61530 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 17:39:33 +0200 Subject: [PATCH 59/74] daemon: verify pruned topoheight --- xelis_daemon/src/p2p/mod.rs | 8 ++++++++ xelis_daemon/src/p2p/packet/handshake.rs | 8 ++++++++ xelis_daemon/src/p2p/packet/ping.rs | 4 ++-- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 0442ba3a..c03bbffe 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -315,6 +315,14 @@ impl P2pServer { return Err(P2pError::InvalidHandshake) } + if let Some(pruned_topoheight) = handshake.get_pruned_topoheight() { + let topoheight = handshake.get_topoheight(); + if *pruned_topoheight > topoheight { + debug!("Peer {} has a pruned topoheight {} higher than its topoheight {}", connection, pruned_topoheight, topoheight); + return Err(P2pError::InvalidHandshake) + } + } + connection.set_state(State::Success); let peer = handshake.create_peer(connection, out, priority, Arc::clone(&self.peer_list)); Ok(peer) diff --git a/xelis_daemon/src/p2p/packet/handshake.rs b/xelis_daemon/src/p2p/packet/handshake.rs index 79a1b105..75f964f3 100644 --- a/xelis_daemon/src/p2p/packet/handshake.rs +++ b/xelis_daemon/src/p2p/packet/handshake.rs @@ -89,6 +89,10 @@ impl Handshake { self.height } + pub fn get_topoheight(&self) -> u64 { + self.topoheight + } + pub fn get_block_top_hash(&self) -> &Hash { &self.top_hash } @@ -96,6 +100,10 @@ impl Handshake { pub fn get_block_genesis_hash(&self) -> &Hash { &self.genesis_hash } + + pub fn get_pruned_topoheight(&self) -> &Option { + &self.pruned_topoheight + } } impl Serializer for Handshake { diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index d77ae907..5ff8db81 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -49,14 +49,14 @@ impl<'a> Ping<'a> { peer.set_block_top_hash(self.top_hash.into_owned()).await; peer.set_topoheight(self.topoheight); peer.set_height(self.height); - + if peer.is_pruned() != self.pruned_topoheight.is_some() { error!("Invalid protocol rules: impossible to change the pruned state, from {} in ping packet", peer); return Err(P2pError::InvalidProtocolRules) } if let Some(pruned_topoheight) = self.pruned_topoheight { - if pruned_topoheight > self.height { + if pruned_topoheight > self.topoheight { error!("Invalid protocol rules: pruned topoheight {} is greater than height {} in ping packet", pruned_topoheight, self.height); return Err(P2pError::InvalidProtocolRules) } From d9c9aa640e7a5d730dd599afbd657d52d3ca190f Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 18:12:58 +0200 Subject: [PATCH 60/74] daemon: add uptime in status command --- Cargo.lock | 7 +++++++ xelis_common/src/prompt/command.rs | 6 +++--- xelis_daemon/Cargo.toml | 1 + xelis_daemon/src/main.rs | 5 ++++- xelis_daemon/src/p2p/packet/ping.rs | 2 +- 5 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d483e47..677824e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1066,6 +1066,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.26" @@ -2584,6 +2590,7 @@ dependencies = [ "ed25519-dalek", "fern", "hex", + "humantime", "log", "lru", "rand 0.8.5", diff --git a/xelis_common/src/prompt/command.rs b/xelis_common/src/prompt/command.rs index 24f186eb..5dda23a1 100644 --- a/xelis_common/src/prompt/command.rs +++ b/xelis_common/src/prompt/command.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, pin::Pin, future::Future, fmt::Display, time::Instant}; +use std::{collections::HashMap, pin::Pin, future::Future, fmt::Display, time::{Instant, Duration}}; use crate::config::VERSION; @@ -183,8 +183,8 @@ impl CommandManager { error!("{}", message); } - pub fn running_since(&self) -> Instant { - self.running_since + pub fn running_since(&self) -> Duration { + self.running_since.elapsed() } } diff --git a/xelis_daemon/Cargo.toml b/xelis_daemon/Cargo.toml index 01df96b4..f528d74c 100644 --- a/xelis_daemon/Cargo.toml +++ b/xelis_daemon/Cargo.toml @@ -17,6 +17,7 @@ sled = "0.34.7" lru = "0.7.8" async-recursion = "1" async-trait = "0.1.64" +humantime = "2.1.0" # Common dependencies tokio = { version = "1", features = ["rt-multi-thread", "io-util", "io-std", "time", "macros", "sync", "net"] } diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index a7799d3d..2e66f969 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -3,6 +3,7 @@ pub mod p2p; pub mod core; use fern::colors::Color; +use humantime::format_duration; use log::{info, error, warn}; use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; @@ -337,6 +338,8 @@ async fn status(manager: &CommandManager>>, _: Arg manager.message("Chain is in full mode"); } - //manager.message(format!("Running since: {}", manager.running_since().elapsed().format("%Y-%m-%d %H:%M:%S"))); + let elapsed_seconds = manager.running_since().as_secs(); + let elapsed = format_duration(Duration::from_secs(elapsed_seconds)).to_string(); + manager.message(format!("Uptime: {}", elapsed)); Ok(()) } \ No newline at end of file diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 5ff8db81..5852f74c 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -59,7 +59,7 @@ impl<'a> Ping<'a> { if pruned_topoheight > self.topoheight { error!("Invalid protocol rules: pruned topoheight {} is greater than height {} in ping packet", pruned_topoheight, self.height); return Err(P2pError::InvalidProtocolRules) - } + } } peer.set_pruned_topoheight(self.pruned_topoheight); From 06031e29f5804cb069fc4401b460baa4845420d7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 18:20:00 +0200 Subject: [PATCH 61/74] daemon: sanity check for pruning function --- xelis_daemon/src/core/blockchain.rs | 12 ++++++++++-- xelis_daemon/src/core/error.rs | 6 +++++- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 05bcc3b5..b7931b4e 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -275,8 +275,12 @@ impl Blockchain { // delete all blocks / versioned balances / txs until topoheight in param // for this, we have to locate the nearest Sync block for DAG under the limit topoheight // and then delete all blocks before it - // keep a marge of STABLE_LIMIT * 10 + // keep a marge of PRUNE_SAFETY_LIMIT pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { + if topoheight == 0 { + return Err(BlockchainError::PruneZero) + } + let current_topoheight = self.get_topo_height(); if topoheight >= current_topoheight || current_topoheight - topoheight < PRUNE_SAFETY_LIMIT { return Err(BlockchainError::PruneHeightTooHigh) @@ -284,7 +288,11 @@ impl Blockchain { let mut storage = self.storage.write().await; let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); - // find both stable point through sync block + if last_pruned_topoheight <= topoheight { + return Err(BlockchainError::PruneLowerThanLastPruned) + } + + // find new stable point based on a sync block under the limit topoheight let located_sync_topoheight = self.locate_nearest_sync_block_for_topoheight(&storage, topoheight, self.get_height()).await?; debug!("Located sync topoheight found: {}", located_sync_topoheight); diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index ed886ed6..97fa0518 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -160,7 +160,11 @@ pub enum BlockchainError { #[error("TX {} is already in blockchain", _0)] TxAlreadyInBlockchain(Hash), #[error("Cannot prune, not enough blocks")] - PruneHeightTooHigh + PruneHeightTooHigh, + #[error("Cannot prune until topoheight 0, provide a positive number")] + PruneZero, + #[error("Prune topoheight is lower or equal than previous pruned topoheight")] + PruneLowerThanLastPruned } impl From> for BlockchainError { From c1786fa862aab15d1aab1d7836bdf6ceb126962e Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 18:23:49 +0200 Subject: [PATCH 62/74] daemon: display prune error in case of failure --- xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/main.rs | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index b7931b4e..5d51384f 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -288,7 +288,7 @@ impl Blockchain { let mut storage = self.storage.write().await; let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); - if last_pruned_topoheight <= topoheight { + if topoheight <= last_pruned_topoheight { return Err(BlockchainError::PruneLowerThanLastPruned) } diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 2e66f969..3191cbda 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -305,7 +305,13 @@ async fn prune_chain(manager: &CommandManager>>, m let topoheight = arguments.get_value("topoheight")?.to_number()?; let blockchain = manager.get_data()?; manager.message(format!("Pruning chain until maximum topoheight {}", topoheight)); - let pruned_topoheight = blockchain.prune_until_topoheight(topoheight).await.context("Error while pruning chain")?; + let pruned_topoheight = match blockchain.prune_until_topoheight(topoheight).await { + Ok(topoheight) => topoheight, + Err(e) => { + manager.error(format!("Error while pruning chain: {}", e)); + return Ok(()); + } + }; manager.message(format!("Chain has been pruned until topoheight {}", pruned_topoheight)); Ok(()) } From c611ee2b32e4790997d11568cadbd7d109827203 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 13 May 2023 19:25:12 +0200 Subject: [PATCH 63/74] wallet: verify provided fees --- xelis_wallet/src/transaction_builder.rs | 25 +++++++++++++++++-------- xelis_wallet/src/wallet.rs | 9 ++++++--- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 5b7d4566..175a0873 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -33,15 +33,17 @@ impl TransactionBuilder { writer } - fn estimate_fees_internal(&self, writer: &Writer) -> u64 { - match &self.fee_builder { - FeeBuilder::Multiplier(multiplier) => { - // 8 represent the field 'fee' in bytes size - let total_bytes = SIGNATURE_LENGTH + 8 + writer.total_write(); - (calculate_tx_fee(total_bytes) as f64 * multiplier) as u64 - }, + fn verify_fees_internal(&self, calculated_fees: u64) -> Result { + let provided_fees = match &self.fee_builder { + FeeBuilder::Multiplier(multiplier) => (calculated_fees as f64 * multiplier) as u64, FeeBuilder::Value(value) => *value + }; + + if provided_fees < calculated_fees { + return Err(WalletError::InvalidFeeProvided(calculated_fees, provided_fees)) } + + Ok(provided_fees) } pub fn total_spent(&self) -> HashMap<&Hash, u64> { @@ -79,6 +81,13 @@ impl TransactionBuilder { total_size } + fn estimate_fees_internal(&self, writer: &Writer) -> u64 { + // 8 represent the field 'fee' in bytes size + let total_bytes = SIGNATURE_LENGTH + 8 + writer.total_write(); + let calculated_fees = calculate_tx_fee(total_bytes); + calculated_fees + } + pub fn estimate_fees(&self) -> u64 { let writer = self.serialize(); self.estimate_fees_internal(&writer) @@ -111,7 +120,7 @@ impl TransactionBuilder { } let mut writer = self.serialize(); - let fee = self.estimate_fees_internal(&writer); + let fee = self.verify_fees_internal(self.estimate_fees_internal(&writer))?; writer.write_u64(&fee); let signature = keypair.sign(&writer.bytes()); diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 0f7d534c..929782a1 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -9,6 +9,7 @@ use xelis_common::config::XELIS_ASSET; use xelis_common::crypto::address::Address; use xelis_common::crypto::hash::Hash; use xelis_common::crypto::key::{KeyPair, PublicKey}; +use xelis_common::globals::format_coin; use xelis_common::network::Network; use xelis_common::serializer::{Serializer, Writer}; use xelis_common::transaction::{TransactionType, Transfer, Transaction, EXTRA_DATA_LIMIT_SIZE}; @@ -52,9 +53,9 @@ pub enum WalletError { InvalidSaltSize, #[error("Error while fetching password salt from DB")] NoSaltFound, - #[error("Your wallet contains only {} instead of {} for asset {}", _0, _1, _2)] + #[error("Your wallet contains only {} instead of {} for asset {}", format_coin(*_0), format_coin(*_1), _2)] NotEnoughFunds(u64, u64, Hash), - #[error("Your wallet don't have enough funds to pay fees: expected {} but have only {}", _0, _1)] + #[error("Your wallet don't have enough funds to pay fees: expected {} but have only {}", format_coin(*_0), format_coin(*_1))] NotEnoughFundsForFee(u64, u64), #[error("Invalid address params")] InvalidAddressParams, @@ -73,7 +74,9 @@ pub enum WalletError { #[error("RPC Server is not running")] RPCServerNotRunning, #[error("RPC Server is already running")] - RPCServerAlreadyRunning + RPCServerAlreadyRunning, + #[error("Invalid fees provided, minimum fees calculated: {}, provided: {}", format_coin(*_0), format_coin(*_1))] + InvalidFeeProvided(u64, u64) } pub struct Wallet { From 1299ef17776bf137f796321e8d2859ee3e028ac9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 14 May 2023 14:50:27 +0200 Subject: [PATCH 64/74] daemon: sanity check pruned topoheight --- xelis_daemon/src/core/blockchain.rs | 36 ++++++++++++++++------------- xelis_daemon/src/p2p/mod.rs | 11 ++++++--- xelis_daemon/src/p2p/packet/ping.rs | 7 ++++++ 3 files changed, 35 insertions(+), 19 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 5d51384f..808bd0d8 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -295,25 +295,29 @@ impl Blockchain { // find new stable point based on a sync block under the limit topoheight let located_sync_topoheight = self.locate_nearest_sync_block_for_topoheight(&storage, topoheight, self.get_height()).await?; debug!("Located sync topoheight found: {}", located_sync_topoheight); + + if located_sync_topoheight > last_pruned_topoheight { + // delete all blocks until the new topoheight + let assets = storage.get_assets().await?; + for topoheight in last_pruned_topoheight..located_sync_topoheight { + trace!("Pruning block at topoheight {}", topoheight); + // delete block + let block_header = storage.delete_block_at_topoheight(topoheight).await?; + + // delete balances for all assets + for asset in &assets { + storage.delete_versioned_balances_for_asset_at_topoheight(asset, topoheight).await?; + } - // delete all blocks until the new topoheight - let assets = storage.get_assets().await?; - for topoheight in last_pruned_topoheight..located_sync_topoheight { - trace!("Pruning block at topoheight {}", topoheight); - // delete block - let block_header = storage.delete_block_at_topoheight(topoheight).await?; - - // delete balances for all assets - for asset in &assets { - storage.delete_versioned_balances_for_asset_at_topoheight(asset, topoheight).await?; - } - - // delete transactions for this block - for tx_hash in block_header.get_txs_hashes() { - if storage.has_transaction(tx_hash).await? { - storage.delete_tx(tx_hash).await?; + // delete transactions for this block + for tx_hash in block_header.get_txs_hashes() { + if storage.has_transaction(tx_hash).await? { + storage.delete_tx(tx_hash).await?; + } } } + } else { + debug!("located_sync_topoheight <= topoheight, no pruning needed"); } storage.set_pruned_topoheight(located_sync_topoheight)?; diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index c03bbffe..b3046712 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -439,9 +439,14 @@ impl P2pServer { let our_topoheight = self.blockchain.get_topo_height(); // search for peers which are greater than us // and that are pruned but before our height so we can sync correctly - let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| - p.get_pruned_topoheight().unwrap_or(0) < our_height - && (p.get_height() > our_height || p.get_topoheight() > our_topoheight) + let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| { + if let Some(pruned_topoheight) = p.get_pruned_topoheight() { + if pruned_topoheight > our_topoheight { + return false + } + } + p.get_height() > our_height || p.get_topoheight() > our_topoheight + } ).collect(); let count = peers.len(); trace!("peers available for random selection: {}", count); diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 5852f74c..01b976b7 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -60,6 +60,13 @@ impl<'a> Ping<'a> { error!("Invalid protocol rules: pruned topoheight {} is greater than height {} in ping packet", pruned_topoheight, self.height); return Err(P2pError::InvalidProtocolRules) } + + if let Some(old_pruned_topoheight) = peer.get_pruned_topoheight() { + if pruned_topoheight < old_pruned_topoheight { + error!("Invalid protocol rules: pruned topoheight {} is less than old pruned topoheight {} in ping packet", pruned_topoheight, old_pruned_topoheight); + return Err(P2pError::InvalidProtocolRules) + } + } } peer.set_pruned_topoheight(self.pruned_topoheight); From 9d99f574f323c7153a2504d2705d30912eebbac0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 15 May 2023 11:34:29 +0200 Subject: [PATCH 65/74] daemon: fix pruned chain function, create snapshot balances --- xelis_common/src/serializer/mod.rs | 8 ++++- xelis_daemon/src/core/blockchain.rs | 5 ++- xelis_daemon/src/core/storage/mod.rs | 1 + xelis_daemon/src/core/storage/sled.rs | 45 +++++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 2 deletions(-) diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index 8b03b6d6..404356ed 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -287,9 +287,15 @@ pub trait Serializer { match hex::decode(&hex) { Ok(bytes) => { let mut reader = Reader::new(&bytes); - Serializer::read(&mut reader) + Self::read(&mut reader) }, Err(_) => Err(ReaderError::InvalidHex) } } + + fn from_bytes(bytes: &[u8]) -> Result + where Self: Sized { + let mut reader = Reader::new(bytes); + Self::read(&mut reader) + } } \ No newline at end of file diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 808bd0d8..3339935a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -297,8 +297,11 @@ impl Blockchain { debug!("Located sync topoheight found: {}", located_sync_topoheight); if located_sync_topoheight > last_pruned_topoheight { - // delete all blocks until the new topoheight let assets = storage.get_assets().await?; + // create snapshots of balances to located_sync_topoheight + storage.create_snapshot_balances_at_topoheight(&assets, located_sync_topoheight).await?; + + // delete all blocks until the new topoheight for topoheight in last_pruned_topoheight..located_sync_topoheight { trace!("Pruning block at topoheight {}", topoheight); // delete block diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 1cd7e10f..3a1e587a 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -32,6 +32,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError>; async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError>; async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; + async fn create_snapshot_balances_at_topoheight(&mut self, assets: &Vec, topoheight: u64) -> Result<(), BlockchainError>; fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 75c02ef4..bce9282c 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -361,6 +361,51 @@ impl Storage for SledStorage { Ok(()) } + async fn create_snapshot_balances_at_topoheight(&mut self, assets: &Vec, topoheight: u64) -> Result<(), BlockchainError> { + for asset in assets { + // tree where VersionedBalance are stored + let versioned_tree = self.get_versioned_balance_tree(asset, topoheight).await?; + // asset tree where PublicKey are stored with the highest balance topoheight in it + let tree = self.db.open_tree(asset.as_bytes())?; + for el in tree.iter() { + let (key_bytes, value) = el?; + let key = PublicKey::from_bytes(&key_bytes)?; + let highest_balance_topoheight = u64::from_bytes(&value)?; + + // retrieve the highest versioned balance + let mut versioned_balance = self.get_balance_at_exact_topoheight(&key, asset, highest_balance_topoheight).await?; + + // if the highest topoheight for this account is less than the snapshot topoheight + // update it to the topoheight + // otherwise, delete the previous topoheight in VersionedBalance which is under topoheight + if highest_balance_topoheight < topoheight { + // save the new highest topoheight + tree.insert(&key_bytes, &topoheight.to_be_bytes())?; + // remove the previous topoheight + versioned_balance.set_previous_topoheight(None); + + // save it + versioned_tree.insert(key_bytes, versioned_balance.to_bytes())?; + } else { + // find the first VersionedBalance which is under topoheight + while let Some(previous_topoheight) = versioned_balance.get_previous_topoheight() { + if previous_topoheight < topoheight { + versioned_balance.set_previous_topoheight(None); + // save it + versioned_tree.insert(key_bytes, versioned_balance.to_bytes())?; + break; + } + + // keep searching + versioned_balance = self.get_balance_at_exact_topoheight(&key, asset, previous_topoheight).await?; + } + } + } + } + + Ok(()) + } + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result { self.load_from_disk(&self.txs_executed, tx.as_bytes()) } From c571108a6a5f1ee5651d99c271c079accba7ece1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 16 May 2023 10:33:22 +0200 Subject: [PATCH 66/74] daemon: auto prune mode --- xelis_daemon/src/core/blockchain.rs | 49 ++++++++++++++++++++++++----- xelis_daemon/src/core/error.rs | 4 ++- 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3339935a..895e4f36 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -57,7 +57,12 @@ pub struct Config { pub simulator: bool, /// Disable the p2p connections #[clap(long)] - pub disable_p2p_server: bool + pub disable_p2p_server: bool, + /// Enable the auto prune mode and prune the chain + /// at each new block by keeping at least N blocks + /// before the top. + #[clap(long)] + pub auto_prune_keep_n_blocks: Option } pub struct Blockchain { @@ -81,6 +86,7 @@ pub struct Blockchain { // tip work score is used to determine the best tip based on a block, tip base ands a base height tip_work_score_cache: Mutex, Difficulty)>>, full_order_cache: Mutex>>, + auto_prune_keep_n_blocks: Option } impl Blockchain { @@ -90,6 +96,13 @@ impl Blockchain { return Err(BlockchainError::InvalidNetwork.into()) } + if let Some(keep_only) = config.auto_prune_keep_n_blocks { + if keep_only < PRUNE_SAFETY_LIMIT { + error!("Auto prune mode should keep at least 80 blocks"); + return Err(BlockchainError::AutoPruneMode.into()) + } + } + let on_disk = storage.has_blocks(); let (height, topoheight) = if on_disk { info!("Reading last metadata available..."); @@ -114,6 +127,7 @@ impl Blockchain { tip_base_cache: Mutex::new(LruCache::new(1024)), tip_work_score_cache: Mutex::new(LruCache::new(1024)), full_order_cache: Mutex::new(LruCache::new(1024)), + auto_prune_keep_n_blocks: config.auto_prune_keep_n_blocks }; // include genesis block @@ -272,11 +286,16 @@ impl Blockchain { Ok(()) } + pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { + let mut storage = self.storage.write().await; + self.prune_until_topoheight_for_storage(topoheight, &mut storage).await + } + // delete all blocks / versioned balances / txs until topoheight in param // for this, we have to locate the nearest Sync block for DAG under the limit topoheight // and then delete all blocks before it // keep a marge of PRUNE_SAFETY_LIMIT - pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { + pub async fn prune_until_topoheight_for_storage(&self, topoheight: u64, storage: &mut S) -> Result { if topoheight == 0 { return Err(BlockchainError::PruneZero) } @@ -286,7 +305,6 @@ impl Blockchain { return Err(BlockchainError::PruneHeightTooHigh) } - let mut storage = self.storage.write().await; let last_pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); if topoheight <= last_pruned_topoheight { return Err(BlockchainError::PruneLowerThanLastPruned) @@ -319,12 +337,12 @@ impl Blockchain { } } } + storage.set_pruned_topoheight(located_sync_topoheight)?; + Ok(located_sync_topoheight) } else { debug!("located_sync_topoheight <= topoheight, no pruning needed"); + Ok(last_pruned_topoheight) } - - storage.set_pruned_topoheight(located_sync_topoheight)?; - Ok(located_sync_topoheight) } // determine the topoheight of the nearest sync block until limit topoheight @@ -1388,12 +1406,28 @@ impl Blockchain { // save highest topo height debug!("Highest topo height found: {}", highest_topo); - if current_height == 0 || highest_topo > current_topoheight { + let extended = highest_topo > current_topoheight; + if current_height == 0 || extended { debug!("Blockchain height extended, current topoheight is now {} (previous was {})", highest_topo, current_topoheight); storage.set_top_topoheight(highest_topo)?; self.topoheight.store(highest_topo, Ordering::Release); current_topoheight = highest_topo; } + + // auto prune mode + if extended { + if let Some(keep_only) = self.auto_prune_keep_n_blocks { + // check that the topoheight is greater than the safety limit + // and that we can prune the chain using the config while respecting the safety limit + if current_topoheight % keep_only == 0 { + info!("Auto pruning chain until topoheight {} (keep only {} blocks)", current_topoheight - keep_only, keep_only); + if let Err(e) = self.prune_until_topoheight_for_storage(current_topoheight - keep_only, storage).await { + warn!("Error while trying to auto prune chain: {}", e); + } + } + } + } + storage.store_tips(&tips)?; let mut current_height = current_height; @@ -1403,7 +1437,6 @@ impl Blockchain { self.height.store(block.get_height(), Ordering::Release); current_height = block.get_height(); } - if storage.is_block_topological_ordered(&block_hash).await { let topoheight = storage.get_topo_height_for_hash(&block_hash).await?; debug!("Adding new '{}' {} at topoheight {}", block_hash, block, topoheight); diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 97fa0518..3847c93e 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -164,7 +164,9 @@ pub enum BlockchainError { #[error("Cannot prune until topoheight 0, provide a positive number")] PruneZero, #[error("Prune topoheight is lower or equal than previous pruned topoheight")] - PruneLowerThanLastPruned + PruneLowerThanLastPruned, + #[error("Auto prune mode is misconfigured")] + AutoPruneMode } impl From> for BlockchainError { From 1c8ef6c781ef218008ea5a1b81d224be82f173f1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 18 May 2023 13:59:09 +0200 Subject: [PATCH 67/74] fast sync wip --- xelis_common/src/account/mod.rs | 4 + xelis_common/src/config.rs | 1 + xelis_common/src/crypto/key.rs | 13 + xelis_common/src/serializer/defaults.rs | 92 +++++- xelis_common/src/serializer/mod.rs | 20 ++ xelis_daemon/src/core/blockchain.rs | 28 +- xelis_daemon/src/core/storage/mod.rs | 10 +- xelis_daemon/src/core/storage/sled.rs | 37 ++- xelis_daemon/src/p2p/error.rs | 5 +- xelis_daemon/src/p2p/mod.rs | 307 ++++++++++++++++- .../src/p2p/packet/bootstrap_chain.rs | 308 ++++++++++++++++++ xelis_daemon/src/p2p/packet/inventory.rs | 53 ++- xelis_daemon/src/p2p/packet/mod.rs | 23 +- xelis_daemon/src/p2p/packet/ping.rs | 4 + xelis_daemon/src/p2p/peer.rs | 47 ++- 15 files changed, 911 insertions(+), 41 deletions(-) create mode 100644 xelis_daemon/src/p2p/packet/bootstrap_chain.rs diff --git a/xelis_common/src/account/mod.rs b/xelis_common/src/account/mod.rs index 9053c2fe..6b6adec0 100644 --- a/xelis_common/src/account/mod.rs +++ b/xelis_common/src/account/mod.rs @@ -20,6 +20,10 @@ impl VersionedBalance { self.balance } + pub fn set_balance(&mut self, value: u64) { + self.balance = value; + } + pub fn add_balance(&mut self, amount: u64) { self.balance += amount; } diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 83a9ad13..07b02fdc 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -44,6 +44,7 @@ pub const P2P_DEFAULT_MAX_PEERS: usize = 32; // default number of maximum peers pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout +pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 10000; // millis until we timeout // Wallet config pub const DEFAULT_DAEMON_ADDRESS: &str = "http://127.0.0.1:8080"; diff --git a/xelis_common/src/crypto/key.rs b/xelis_common/src/crypto/key.rs index 1717c64c..4f9e1fe3 100644 --- a/xelis_common/src/crypto/key.rs +++ b/xelis_common/src/crypto/key.rs @@ -4,6 +4,7 @@ use crate::serializer::{Reader, ReaderError, Serializer, Writer}; use super::address::{Address, AddressType}; use super::hash::Hash; use std::borrow::Cow; +use std::cmp::Ordering; use std::fmt::{Display, Error, Formatter}; use rand::{rngs::OsRng, RngCore}; use std::hash::Hasher; @@ -67,6 +68,18 @@ impl PartialEq for PublicKey { } } +impl PartialOrd for PublicKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.as_bytes().cmp(other.as_bytes())) + } +} + +impl Ord for PublicKey { + fn cmp(&self, other: &Self) -> Ordering { + self.as_bytes().cmp(other.as_bytes()) + } +} + impl std::hash::Hash for PublicKey { fn hash(&self, state: &mut H) { self.as_bytes().hash(state); diff --git a/xelis_common/src/serializer/defaults.rs b/xelis_common/src/serializer/defaults.rs index 4a2127fd..d3079798 100644 --- a/xelis_common/src/serializer/defaults.rs +++ b/xelis_common/src/serializer/defaults.rs @@ -1,8 +1,9 @@ use crate::crypto::hash::Hash; use super::{Serializer, Writer, Reader, ReaderError}; -use std::collections::HashSet; -use log::error; +use std::{collections::{HashSet, BTreeSet}, borrow::Cow}; +use log::{error, warn}; +// Used for Tips storage impl Serializer for HashSet { fn write(&self, writer: &mut Writer) { for hash in self { @@ -23,6 +24,12 @@ impl Serializer for HashSet { let hash = reader.read_hash()?; tips.insert(hash); } + + if tips.len() != count { + error!("Invalid size: received {} elements while sending {}", tips.len(), count); + return Err(ReaderError::InvalidSize) + } + Ok(tips) } } @@ -36,4 +43,85 @@ impl Serializer for u64 { fn read(reader: &mut Reader) -> Result { Ok(reader.read_u64()?) } +} + + +const MAX_ITEMS: usize = 1024; + +impl Serializer for BTreeSet { + fn read(reader: &mut Reader) -> Result { + let count = reader.read_u16()?; + if count > MAX_ITEMS as u16 { + warn!("Received {} while maximum is set to {}", count, MAX_ITEMS); + return Err(ReaderError::InvalidSize) + } + + let mut set = BTreeSet::new(); + for _ in 0..count { + let value = T::read(reader)?; + if !set.insert(value) { + error!("Value is duplicated in BTreeSet"); + return Err(ReaderError::InvalidSize) + } + } + Ok(set) + } + + fn write(&self, writer: &mut Writer) { + writer.write_u16(self.len() as u16); + for el in self { + el.write(writer); + } + } +} + +impl Serializer for Cow<'_, T> { + fn read(reader: &mut Reader) -> Result { + Ok(Cow::Owned(T::read(reader)?)) + } + + fn write(&self, writer: &mut Writer) { + self.as_ref().write(writer); + } +} + +impl Serializer for Option { + fn read(reader: &mut Reader) -> Result { + if reader.read_bool()? { + Ok(Some(T::read(reader)?)) + } else { + Ok(None) + } + } + + fn write(&self, writer: &mut Writer) { + writer.write_bool(&self.is_some()); + if let Some(value) = self { + value.write(writer); + } + } +} + +impl Serializer for Vec { + fn read(reader: &mut Reader) -> Result { + let count = reader.read_u16()?; + if count > MAX_ITEMS as u16 { + warn!("Received {} while maximum is set to {}", count, MAX_ITEMS); + return Err(ReaderError::InvalidSize) + } + + let mut values = Vec::with_capacity(count as usize); + for _ in 0..count { + values.push(T::read(reader)?); + } + + Ok(values) + } + + fn write(&self, writer: &mut Writer) { + writer.write_u16(self.len() as u16); + for el in self { + el.write(writer); + } + } } \ No newline at end of file diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index 404356ed..88a68715 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -86,6 +86,17 @@ impl Writer { }; } + pub fn write_optional_non_zero_u8(&mut self, opt: Option) { + match opt { + Some(v) => { + self.write_u8(v); + }, + None => { + self.bytes.push(0); + } + }; + } + pub fn total_write(&self) -> usize { self.bytes.len() } @@ -228,6 +239,15 @@ impl<'a> Reader<'a> { Ok(Some(self.read_u64_le()?)) } + pub fn read_optional_non_zero_u8(&mut self) -> Result, ReaderError> { + let byte = self.read_u8()?; + if byte == 0 { + return Ok(None) + } + + Ok(Some(byte)) + } + pub fn read_big_uint(&mut self) -> Result { let size = self.read_u8()?; let bytes = self.read_bytes_ref(size as usize)?; diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 895e4f36..4c827d15 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -62,7 +62,12 @@ pub struct Config { /// at each new block by keeping at least N blocks /// before the top. #[clap(long)] - pub auto_prune_keep_n_blocks: Option + pub auto_prune_keep_n_blocks: Option, + /// Sync a bootstrapped chain if your local copy is outdated. + /// It will not store any blocks / TXs and will not verify the history locally. + /// Use it with extreme cautions and trusted nodes to have a valid bootstrapped chain + #[clap(long)] + pub allow_fast_sync: bool } pub struct Blockchain { @@ -86,7 +91,11 @@ pub struct Blockchain { // tip work score is used to determine the best tip based on a block, tip base ands a base height tip_work_score_cache: Mutex, Difficulty)>>, full_order_cache: Mutex>>, - auto_prune_keep_n_blocks: Option + // auto prune mode if enabled, will delete all blocks every N and keep only N top blocks (topoheight based) + auto_prune_keep_n_blocks: Option, + // allow fast syncing (only balances / assets / Smart Contracts changes) + // without syncing the history + allow_fast_sync_mode: bool } impl Blockchain { @@ -127,13 +136,15 @@ impl Blockchain { tip_base_cache: Mutex::new(LruCache::new(1024)), tip_work_score_cache: Mutex::new(LruCache::new(1024)), full_order_cache: Mutex::new(LruCache::new(1024)), - auto_prune_keep_n_blocks: config.auto_prune_keep_n_blocks + auto_prune_keep_n_blocks: config.auto_prune_keep_n_blocks, + allow_fast_sync_mode: config.allow_fast_sync }; // include genesis block if !on_disk { blockchain.create_genesis_block().await?; } else { + debug!("Retrieving tips for computing current difficulty"); let storage = blockchain.get_storage().read().await; let tips_set = storage.get_tips().await?; let mut tips = Vec::with_capacity(tips_set.len()); @@ -147,6 +158,7 @@ impl Blockchain { // now compute the stable height { + debug!("Retrieving tips for computing current stable height"); let storage = blockchain.get_storage().read().await; let tips = storage.get_tips().await?; let (_, stable_height) = blockchain.find_common_base(&storage, &tips).await?; @@ -286,6 +298,14 @@ impl Blockchain { Ok(()) } + // verify if we can do fast sync with this peer + // for this, we check that user allowed the fast sync mode + // we also check that the peer topoheight is greater than 2x times the prune safety limit + // and we should be sure to not perform fast sync on a already-synced chain. + pub fn allow_fast_sync(&self, peer_topoheight: u64) -> bool { + self.allow_fast_sync_mode && peer_topoheight > PRUNE_SAFETY_LIMIT * 2 && self.get_topo_height() == 0 + } + pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { let mut storage = self.storage.write().await; self.prune_until_topoheight_for_storage(topoheight, &mut storage).await @@ -1310,7 +1330,7 @@ impl Blockchain { for (tx, tx_hash) in block.get_transactions().iter().zip(block.get_txs_hashes()) { // execute all txs // TODO improve it (too much read/write that can be refactored) if !storage.has_block_linked_to_tx(&tx_hash, &hash)? { - storage.add_block_for_tx(&tx_hash, hash.clone())?; + storage.add_block_for_tx(&tx_hash, &hash)?; trace!("Block {} is now linked to tx {}", hash, tx_hash); } diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 3a1e587a..370b0108 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -1,7 +1,7 @@ mod sled; pub use self::sled::SledStorage; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::{HashSet, BTreeSet}, sync::Arc}; use async_trait::async_trait; use xelis_common::{ crypto::{key::PublicKey, hash::Hash}, @@ -34,6 +34,12 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; async fn create_snapshot_balances_at_topoheight(&mut self, assets: &Vec, topoheight: u64) -> Result<(), BlockchainError>; + fn get_partial_assets(&self, maximum: usize, skip: usize) -> Result, BlockchainError>; + fn get_partial_keys(&self, maximum: usize, skip: usize) -> Result, BlockchainError>; + + async fn get_balances + Send + Sync, I: Iterator + Send>(&self, asset: &Hash, keys: I) -> Result>, BlockchainError>; + + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError>; @@ -52,7 +58,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn has_tx_blocks(&self, hash: &Hash) -> Result; fn has_block_linked_to_tx(&self, tx: &Hash, block: &Hash) -> Result; fn get_blocks_for_tx(&self, hash: &Hash) -> Result; - fn add_block_for_tx(&mut self, tx: &Hash, block: Hash) -> Result<(), BlockchainError>; + fn add_block_for_tx(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; fn set_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; fn delete_last_topoheight_for_balance(&mut self, key: &PublicKey, asset: &Hash) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index bce9282c..facdde3c 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -8,7 +8,7 @@ use xelis_common::{ block::{BlockHeader, Block, Difficulty}, account::VersionedBalance, network::Network, }; use std::{ - collections::HashSet, + collections::{HashSet, BTreeSet}, hash::Hash as StdHash, sync::Arc }; @@ -406,6 +406,37 @@ impl Storage for SledStorage { Ok(()) } + fn get_partial_assets(&self, maximum: usize, skip: usize) -> Result, BlockchainError> { + let mut assets: BTreeSet = BTreeSet::new(); + for el in self.assets.iter().keys().skip(skip).take(maximum) { + let key = el?; + assets.insert(Hash::from_bytes(&key)?); + } + Ok(assets) + } + + fn get_partial_keys(&self, maximum: usize, skip: usize) -> Result, BlockchainError> { + let mut assets: BTreeSet = BTreeSet::new(); + for el in self.nonces.iter().keys().skip(skip).take(maximum) { + let key = el?; + assets.insert(PublicKey::from_bytes(&key)?); + } + Ok(assets) + } + + async fn get_balances + Send + Sync, I: Iterator + Send>(&self, asset: &Hash, keys: I) -> Result>, BlockchainError> { + let mut balances = Vec::new(); + for key in keys { + if self.has_balance_for(key.as_ref(), asset).await? { + let (_, versioned_balance) = self.get_last_balance(key.as_ref(), asset).await?; + balances.push(Some(versioned_balance.get_balance())); + } else { + balances.push(None); + } + } + Ok(balances) + } + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result { self.load_from_disk(&self.txs_executed, tx.as_bytes()) } @@ -496,7 +527,7 @@ impl Storage for SledStorage { self.load_from_disk(&self.tx_blocks, hash.as_bytes()) } - fn add_block_for_tx(&mut self, tx: &Hash, block: Hash) -> Result<(), BlockchainError> { + fn add_block_for_tx(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError> { trace!("add block {} for tx {}", block, tx); let mut blocks = if self.has_tx_blocks(tx)? { self.get_blocks_for_tx(tx)? @@ -505,7 +536,7 @@ impl Storage for SledStorage { }; if !blocks.contains(&block) { - blocks.insert(block); + blocks.insert(block.clone()); self.set_blocks_for_tx(tx, &blocks)?; } diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index 42711b34..56e51c1c 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -11,6 +11,7 @@ use std::io::Error as IOError; use std::sync::PoisonError; use thiserror::Error; +use super::packet::bootstrap_chain::StepKind; use super::packet::object::ObjectRequest; #[derive(Error, Debug)] @@ -74,7 +75,9 @@ pub enum P2pError { #[error(transparent)] BlockchainError(#[from] Box), #[error("Invalid content in peerlist file")] - InvalidPeerlist + InvalidPeerlist, + #[error("Invalid bootstrap chain step, expected {:?}, got {:?}", _0, _1)] + InvalidBootstrapStep(StepKind, StepKind) } impl From for P2pError { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index b3046712..2f0afd73 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -7,13 +7,13 @@ pub mod chain_validator; use serde_json::Value; use xelis_common::{ - config::{VERSION, NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH}, + config::{VERSION, NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT}, serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, globals::get_current_time, immutable::Immutable }; -use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::chain_validator::ChainValidator}; +use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}}}; use crate::core::error::BlockchainError; use crate::p2p::connection::ConnectionMessage; use crate::p2p::packet::chain::CommonPoint; @@ -30,7 +30,7 @@ use tokio::{net::{TcpListener, TcpStream}, sync::mpsc::{self, UnboundedSender, U use log::{info, warn, error, debug, trace}; use tokio::io::AsyncWriteExt; use tokio::time::{interval, timeout, sleep}; -use std::{borrow::Cow, fs, path::Path, sync::atomic::{AtomicBool, Ordering}}; +use std::{borrow::Cow, fs, path::Path, sync::atomic::{AtomicBool, Ordering}, collections::HashSet}; use std::convert::TryInto; use std::net::SocketAddr; use std::time::Duration; @@ -470,8 +470,17 @@ impl P2pServer { } if let Some(peer) = self.select_random_best_peer().await { trace!("Selected for chain sync is {}", peer); - if let Err(e) = self.request_sync_chain_for(&peer).await { - debug!("Error occured on chain sync: {}", e); + // check if we can maybe fast sync first + // otherwise, fallback on the normal chain sync + let peer_topoheight = peer.get_topoheight(); + if self.blockchain.allow_fast_sync(peer_topoheight) { + if let Err(e) = self.bootstrap_chain(&peer).await { + warn!("Error occured while fast syncing with {}: {}", peer, e); + } + } else { + if let Err(e) = self.request_sync_chain_for(&peer).await { + debug!("Error occured on chain sync with {}: {}", peer, e); + } } } } @@ -957,12 +966,40 @@ impl P2pServer { error!("Error while sending object response to sender!"); } }, - Packet::NotifyInventory(packet_wrapper) => { - trace!("Received a notify inventory from {}", peer); - let (inventory, ping) = packet_wrapper.consume(); - let inventory = inventory.into_owned(); + Packet::NotifyInventoryRequest(packet_wrapper) => { + trace!("Received a inventory request from {}", peer); + let (request, ping) = packet_wrapper.consume(); ping.into_owned().update_peer(peer).await?; + let request = request.into_owned(); + let mut hashes = HashSet::new(); + + let mempool = self.blockchain.get_mempool().read().await; + let txs = mempool.get_txs(); + let next_page = { + let page_id = request.page().unwrap_or(0); + let skip = page_id as usize * 1024; + + if skip < txs.len() { + for tx_hash in txs.keys().skip(skip).take(NOTIFY_MAX_LEN) { + hashes.insert(Cow::Borrowed(tx_hash.as_ref())); + } + let left = txs.len() - (hashes.len() + skip); + if left > 0 { + Some(page_id + 1) + } else { + None + } + } else { + None + } + }; + + let packet = NotifyInventoryResponse::new(next_page, Cow::Owned(hashes)); + peer.send_packet(Packet::NotifyInventoryResponse(packet)).await? + }, + Packet::NotifyInventoryResponse(inventory) => { + trace!("Received a notify inventory from {}", peer); if !peer.has_requested_inventory() { debug!("Received a notify inventory from {} but we didn't request it", peer); return Err(P2pError::InvalidPacket) @@ -974,10 +1011,22 @@ impl P2pServer { // check and add if we are missing a TX in our mempool or storage let mut missing_txs: Vec = Vec::new(); + let next_page = inventory.next(); { + let txs = inventory.get_txs(); + let total_count = txs.len(); + + // check that the response was really full if he send us another "page" + if next_page.is_some() { + if total_count != NOTIFY_MAX_LEN { + error!("Received only {} while maximum is {} elements, and tell us that there is another page", total_count, NOTIFY_MAX_LEN); + return Err(P2pError::InvalidProtocolRules) + } + } + let mempool = self.blockchain.get_mempool().read().await; let storage = self.blockchain.get_storage().read().await; - for hash in inventory.get_txs().into_owned() { + for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? { missing_txs.push(hash.into_owned()); } @@ -1018,7 +1067,31 @@ impl P2pServer { } }); } + + + // request the next page + if next_page.is_some() { + let packet = Cow::Owned(NotifyInventoryRequest::new(next_page)); + let ping = Cow::Owned(self.build_generic_ping_packet().await); + peer.send_packet(Packet::NotifyInventoryRequest(PacketWrapper::new(packet, ping))).await?; + peer.set_requested_inventory(true); + } + }, + Packet::BootstrapChainRequest(request) => { + self.handle_bootstrap_chain_request(peer, request.step()).await?; + }, + Packet::BootstrapChainResponse(response) => { + if let Some(sender) = peer.get_bootstrap_chain_channel().lock().await.take() { + let response = response.response(); + if let Err(e) = sender.send(response) { + error!("Error while sending bootstrap response to channel: {:?}", e); + } + } else { + error!("{} send us a bootstrap chain response but we didn't asked it", peer); + return Err(P2pError::InvalidProtocolRules) + } + } }; Ok(()) } @@ -1111,6 +1184,7 @@ impl P2pServer { let (mut blocks, top_blocks) = response.consume(); debug!("handling chain response from {}, {} blocks, {} top blocks, pop count {}", peer, blocks.len(), top_blocks.len(), pop_count); + let our_previous_topoheight = self.blockchain.get_topo_height(); let top_len = top_blocks.len(); let blocks_len = blocks.len(); @@ -1190,6 +1264,19 @@ impl P2pServer { debug!("we've synced {} on {} blocks and {} top blocks from {}", total_requested, blocks_len, top_len, peer); } + let peer_topoheight = peer.get_topoheight(); + // ask inventory of this peer if we sync from too far + // if we are not further than one sync, request the inventory + if peer_topoheight > our_previous_topoheight && peer_topoheight - our_previous_topoheight < CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64 { + let our_topoheight = self.blockchain.get_topo_height(); + // verify that we synced it partially well + if peer_topoheight > our_topoheight && peer_topoheight - our_topoheight < PRUNE_SAFETY_LIMIT { + if let Err(e) = self.request_inventory_of(&peer).await { + error!("Error while asking inventory to {}: {}", peer, e); + }; + } + } + Ok(()) } @@ -1277,7 +1364,7 @@ impl P2pServer { pub async fn broadcast_tx_hash(&self, tx: &Hash) { let ping = self.build_generic_ping_packet().await; - let current_height = ping.get_height(); + let current_topoheight = ping.get_topoheight(); let packet = Packet::TransactionPropagation(PacketWrapper::new(Cow::Borrowed(tx), Cow::Owned(ping))); // transform packet to bytes (so we don't need to transform it for each peer) let bytes = Bytes::from(packet.to_bytes()); @@ -1285,7 +1372,7 @@ impl P2pServer { for peer in peer_list.get_peers().values() { // check that the peer is not too far from us // otherwise we may spam him for nothing - if peer.get_height() + CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64 > current_height { + if peer.get_topoheight() + CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64 > current_topoheight { trace!("Peer {} is not too far from us, checking cache for tx hash {}", peer, tx); let mut txs_cache = peer.get_txs_cache().lock().await; // check that we didn't already send this tx to this peer or that he don't already have it @@ -1342,6 +1429,202 @@ impl P2pServer { peer_list.broadcast(packet).await; } + async fn handle_bootstrap_chain_request(self: &Arc, peer: &Arc, request: StepRequest<'_>) -> Result<(), BlockchainError> { + debug!("Handle bootstrap chain request from {}", peer); + let storage = self.blockchain.get_storage().read().await; + match request { + StepRequest::Assets(page) => { + let page = page.unwrap_or(0); + let assets = storage.get_partial_assets(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE)?; + let page = if assets.len() == MAX_ITEMS_PER_PAGE { + Some(page + 1) + } else { + None + }; + + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Assets(assets, page)))).await?; + }, + StepRequest::Balances(asset, keys) => { + let balances = storage.get_balances(&asset, keys.iter()).await?; + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Balances(balances)))).await?; + }, + StepRequest::Nonces(keys) => { + let mut nonces = Vec::with_capacity(keys.len()); + for key in keys.iter() { + let nonce = storage.get_nonce(key).await?; + nonces.push(nonce); + } + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Nonces(nonces)))).await?; + }, + StepRequest::Keys(page) => { + let page = page.unwrap_or(0); + let keys = storage.get_partial_keys(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE)?; + let page = if keys.len() == MAX_ITEMS_PER_PAGE { + Some(page + 1) + } else { + None + }; + + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Keys(keys, page)))).await?; + }, + StepRequest::BlocksMetadata(begin_topoheight) => { + let our_topoheight = self.blockchain.get_topo_height(); + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); + if pruned_topoheight > begin_topoheight || our_topoheight < STABLE_LIMIT || begin_topoheight > our_topoheight - STABLE_LIMIT { + debug!("Invalid begin topoheight received from {}", peer); + return Err(P2pError::InvalidPacket.into()) + } + + let mut blocks = Vec::with_capacity(STABLE_LIMIT as usize); + for topoheight in begin_topoheight..begin_topoheight + STABLE_LIMIT { + let hash = storage.get_hash_at_topo_height(topoheight).await?; + let supply = storage.get_supply_for_block_hash(&hash)?; + let reward = storage.get_block_reward(&hash)?; + let difficulty = storage.get_difficulty_for_block_hash(&hash).await?; + let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&hash).await?; + + blocks.push(BlockMetadata { hash, supply, reward, difficulty, cumulative_difficulty }); + } + + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::BlocksMetadata(blocks)))).await?; + }, + StepRequest::Tips => { + let tips = storage.get_tips().await?; + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Tips(tips)))).await?; + } + } + Ok(()) + } + + // first, fetch all assets from peer + // then, fetch all keys with its nonces and its balances + // and for the last step, retrieve last TOP_TOPOHEIGHT - STABLE_LIMIT block headers + async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { + debug!("Starting fast sync with {}", peer); + + let stable_topoheight = peer.get_topoheight() - STABLE_LIMIT; + + let mut storage = self.blockchain.get_storage().write().await; + let mut step: Option = Some(StepRequest::Assets(None)); + let mut top_height = 0; + loop { + let response = if let Some(step) = step.take() { + peer.request_boostrap_chain(step).await? + } else { + break; + }; + + step = match response { + // fetch all assets from peer + StepResponse::Assets(assets, next_page) => { + for asset in assets { + debug!("Saving asset {}", asset); + storage.add_asset(&asset).await?; + } + + if next_page.is_some() { + Some(StepRequest::Assets(next_page)) + } else { + // Go to next step + Some(StepRequest::Keys(None)) + } + }, + // fetch all accounts + StepResponse::Keys(keys, next_page) => { + let borrowed_keys = keys.iter().map(|v: &xelis_common::crypto::key::PublicKey| Cow::Borrowed(v)).collect(); + let StepResponse::Nonces(nonces) = peer.request_boostrap_chain(StepRequest::Nonces(Cow::Borrowed(&borrowed_keys))).await? else { + // shouldn't happen + error!("Received an invalid StepResponse (how ?) while fetching nonces"); + return Err(P2pError::InvalidPacket.into()) + }; + + // TODO don't retrieve ALL each time but one by one + // otherwise in really long time, it may consume lot of memory + for asset in storage.get_assets().await? { + debug!("Request balances for asset {}", asset); + let StepResponse::Balances(balances) = peer.request_boostrap_chain(StepRequest::Balances(Cow::Borrowed(&asset), Cow::Borrowed(&borrowed_keys))).await? else { + // shouldn't happen + error!("Received an invalid StepResponse (how ?) while fetching balances"); + return Err(P2pError::InvalidPacket.into()) + }; + + // save all balances for this asset + for (key, balance) in keys.iter().zip(balances) { + // check that the account have balance for this asset + if let Some(balance) = balance { + debug!("Saving balance {} for key {}", balance, key); + let mut versioned_balance = storage.get_new_versioned_balance(key, &asset, stable_topoheight).await?; + versioned_balance.set_balance(balance); + storage.set_balance_to(key, &asset, stable_topoheight, &versioned_balance).await?; + storage.set_last_topoheight_for_balance(key, &asset, stable_topoheight)?; + } + } + } + + // save all nonces + for (key, nonce) in keys.into_iter().zip(nonces) { + debug!("Saving nonce {} for {}", nonce, key); + storage.set_nonce(&key, nonce).await?; + } + + if next_page.is_some() { + Some(StepRequest::Keys(next_page)) + } else { + // Go to next step + Some(StepRequest::BlocksMetadata(stable_topoheight)) + } + }, + StepResponse::BlocksMetadata(blocks) => { + let mut topoheight = stable_topoheight; + for metadata in blocks { + debug!("Saving block metadata {}", metadata.hash); + let OwnedObjectResponse::BlockHeader(header, hash) = peer.request_blocking_object(ObjectRequest::BlockHeader(metadata.hash)).await? else { + error!("Received an invalid requested object while fetching blocks metadata"); + return Err(P2pError::InvalidPacket.into()) + }; + + for tx_hash in header.get_txs_hashes() { + storage.add_block_for_tx(tx_hash, &hash)?; + } + + storage.set_supply_for_block_hash(&hash, metadata.supply)?; + storage.set_cumulative_difficulty_for_block_hash(&hash, metadata.cumulative_difficulty).await?; + storage.set_block_reward(&hash, metadata.reward)?; + + storage.set_topo_height_for_block(&hash, topoheight).await?; + + top_height = header.get_height(); + storage.add_new_block(Arc::new(header), &Vec::new(), metadata.difficulty, hash).await?; + topoheight += 1; + } + + Some(StepRequest::Tips) + }, + StepResponse::Tips(tips) => { + storage.store_tips(&tips)?; + None + }, + response => { // shouldn't happens + error!("Received bootstrap chain response {:?} but didn't asked for it", response); + return Err(P2pError::InvalidPacket.into()); + } + }; + } + + storage.set_top_topoheight(stable_topoheight + STABLE_LIMIT)?; + storage.set_top_height(top_height)?; + + Ok(()) + } + + async fn request_inventory_of(&self, peer: &Arc) -> Result<(), BlockchainError> { + debug!("Requesting inventory of {}", peer); + let packet = Cow::Owned(NotifyInventoryRequest::new(None)); + let ping = Cow::Owned(self.build_generic_ping_packet().await); + peer.send_packet(Packet::NotifyInventoryRequest(PacketWrapper::new(packet, ping))).await?; + Ok(()) + } + // this function basically send all our blocks based on topological order (topoheight) // we send up to CHAIN_SYNC_REQUEST_MAX_BLOCKS blocks id (combinaison of block hash and topoheight) // we add at the end the genesis block to be sure to be on the same chain as others peers diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs new file mode 100644 index 00000000..b2663558 --- /dev/null +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -0,0 +1,308 @@ +use std::{collections::{BTreeSet, HashSet}, borrow::Cow}; + +use log::debug; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty}; + +// this file implements the protocol for the fast sync (bootstrapped chain) +// You will have to request through StepRequest::FetchAssets all the registered assets +// based on the size of the chain, you can have pagination or not. +// With the set of assets, you can retrieve all registered keys for it and then its balances +// Nonces need to be retrieve only one time because its common for all assets. +// The protocol is based on +// how many items we can answer per request + +pub const MAX_ITEMS_PER_PAGE: usize = 1024; + +#[derive(Debug)] +pub struct BlockMetadata { + pub hash: Hash, + pub supply: u64, + pub reward: u64, + pub difficulty: Difficulty, + pub cumulative_difficulty: Difficulty +} + +impl Serializer for BlockMetadata { + fn read(reader: &mut Reader) -> Result { + let hash = reader.read_hash()?; + let supply = reader.read_u64()?; + let reward = reader.read_u64()?; + let difficulty = reader.read_u64()?; + let cumulative_difficulty = reader.read_u64()?; + + Ok(Self { + hash, + supply, + reward, + difficulty, + cumulative_difficulty + }) + } + + fn write(&self, writer: &mut Writer) { + writer.write_hash(&self.hash); + writer.write_u64(&self.supply); + writer.write_u64(&self.reward); + writer.write_u64(&self.difficulty); + writer.write_u64(&self.cumulative_difficulty); + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum StepKind { + Assets, + Keys, + Balances, + Nonces, + BlocksMetadata, + Tips +} + +#[derive(Debug)] +pub enum StepRequest<'a> { + // Pagination + Assets(Option), + // Asset, pagination + Keys(Option), + // Asset, Accounts + Balances(Cow<'a, Hash>, Cow<'a, BTreeSet>>), + // Accounts + Nonces(Cow<'a, BTreeSet>>), + // Request blocks metadata starting topoheight + BlocksMetadata(u64), + Tips +} + +impl<'a> StepRequest<'a> { + pub fn kind(&self) -> StepKind { + match self { + Self::Assets(_) => StepKind::Assets, + Self::Keys(_) => StepKind::Keys, + Self::Balances(_, _) => StepKind::Balances, + Self::Nonces(_) => StepKind::Nonces, + Self::BlocksMetadata(_) => StepKind::BlocksMetadata, + Self::Tips => StepKind::Tips + } + } +} + +impl Serializer for StepRequest<'_> { + fn read(reader: &mut Reader) -> Result { + Ok(match reader.read_u8()? { + 0 => { + let page = reader.read_optional_non_zero_u64()?; + Self::Assets(page) + }, + 1 => { + let page = reader.read_optional_non_zero_u64()?; + Self::Keys(page) + }, + 2 => { + let hash = Cow::<'_, Hash>::read(reader)?; + let keys = Cow::<'_, BTreeSet>>::read(reader)?; + Self::Balances(hash, keys) + }, + 3 => { + Self::Nonces(Cow::<'_, BTreeSet>>::read(reader)?) + }, + 4 => { + Self::BlocksMetadata(reader.read_u64()?) + }, + 5 => { + Self::Tips + }, + id => { + debug!("Received invalid value for StepResponse: {}", id); + return Err(ReaderError::InvalidValue) + } + }) + } + + fn write(&self, writer: &mut Writer) { + match self { + Self::Assets(page) => { + writer.write_u8(0); + writer.write_optional_non_zero_u64(page); + }, + Self::Keys(page) => { + writer.write_u8(1); + writer.write_optional_non_zero_u64(page); + }, + Self::Balances(asset, accounts) => { + writer.write_u8(2); + writer.write_hash(asset); + accounts.write(writer); + }, + Self::Nonces(nonces) => { + writer.write_u8(3); + nonces.write(writer); + }, + Self::BlocksMetadata(blocks) => { + writer.write_u8(4); + blocks.write(writer); + }, + Self::Tips => { + writer.write_u8(5); + } + }; + } +} + +#[derive(Debug)] +pub enum StepResponse { + Assets(BTreeSet, Option), // Set of assets, pagination + Keys(BTreeSet, Option), // Set of keys, pagination + Balances(Vec>), // Balances requested + Nonces(Vec), // Nonces for requested accounts + BlocksMetadata(Vec), // top blocks metadata + Tips(HashSet) // chain tips +} + +impl StepResponse { + pub fn kind(&self) -> StepKind { + match self { + Self::Assets(_, _) => StepKind::Assets, + Self::Keys(_, _) => StepKind::Keys, + Self::Balances(_) => StepKind::Balances, + Self::Nonces(_) => StepKind::Nonces, + Self::BlocksMetadata(_) => StepKind::BlocksMetadata, + Self::Tips(_) => StepKind::Tips + } + } +} + +impl Serializer for StepResponse { + fn read(reader: &mut Reader) -> Result { + Ok(match reader.read_u8()? { + 0 => { + let assets = BTreeSet::::read(reader)?; + let page = reader.read_optional_non_zero_u64()?; + Self::Assets(assets, page) + }, + 1 => { + let keys = BTreeSet::::read(reader)?; + let page = reader.read_optional_non_zero_u64()?; + Self::Keys(keys, page) + }, + 2 => { + Self::Balances(Vec::>::read(reader)?) + }, + 3 => { + Self::Nonces(Vec::::read(reader)?) + }, + 4 => { + Self::BlocksMetadata(Vec::::read(reader)?) + }, + 5 => { + let count = reader.read_u8()? as usize; + let mut set = HashSet::with_capacity(count); + for _ in 0..count { + let hash = reader.read_hash()?; + set.insert(hash); + } + Self::Tips(set) + } + id => { + debug!("Received invalid value for StepResponse: {}", id); + return Err(ReaderError::InvalidValue) + } + }) + } + + fn write(&self, writer: &mut Writer) { + match self { + Self::Assets(assets, page) => { + writer.write_u8(0); + assets.write(writer); + writer.write_optional_non_zero_u64(page); + }, + Self::Keys(keys, page) => { + writer.write_u8(1); + keys.write(writer); + writer.write_optional_non_zero_u64(page); + }, + Self::Balances(balances) => { + writer.write_u8(2); + balances.write(writer); + }, + Self::Nonces(nonces) => { + writer.write_u8(3); + nonces.write(writer); + }, + Self::BlocksMetadata(blocks) => { + writer.write_u8(4); + blocks.write(writer); + }, + Self::Tips(tips) => { + writer.write_u8(5); + + writer.write_u8(tips.len() as u8); + for hash in tips { + writer.write_hash(hash); + } + } + }; + } +} + +#[derive(Debug)] +pub struct BootstrapChainRequest<'a> { + step: StepRequest<'a> +} + +impl<'a> BootstrapChainRequest<'a> { + pub fn new(step: StepRequest<'a>) -> Self { + Self { + step + } + } + + pub fn kind(&self) -> StepKind { + self.step.kind() + } + + pub fn step(self) -> StepRequest<'a> { + self.step + } +} + +impl Serializer for BootstrapChainRequest<'_> { + fn read(reader: &mut Reader) -> Result { + Ok(Self::new(StepRequest::read(reader)?)) + } + + fn write(&self, writer: &mut Writer) { + self.step.write(writer); + } +} + +#[derive(Debug)] +pub struct BootstrapChainResponse { + response: StepResponse +} + +impl BootstrapChainResponse { + pub fn new(response: StepResponse) -> Self { + Self { + response + } + } + + pub fn kind(&self) -> StepKind { + self.response.kind() + } + + pub fn response(self) -> StepResponse { + self.response + } +} + +impl Serializer for BootstrapChainResponse { + fn read(reader: &mut Reader) -> Result { + Ok(Self::new(StepResponse::read(reader)?)) + } + + fn write(&self, writer: &mut Writer) { + self.response.write(writer); + } +} diff --git a/xelis_daemon/src/p2p/packet/inventory.rs b/xelis_daemon/src/p2p/packet/inventory.rs index a95f7d50..1a1ff527 100644 --- a/xelis_daemon/src/p2p/packet/inventory.rs +++ b/xelis_daemon/src/p2p/packet/inventory.rs @@ -2,16 +2,52 @@ use std::{borrow::Cow, collections::HashSet}; use xelis_common::{crypto::hash::Hash, serializer::{Serializer, ReaderError, Reader, Writer}}; -pub const NOTIFY_MAX_LEN: usize = 512; // 512 * 32 bytes = 16KB +pub const NOTIFY_MAX_LEN: usize = 1024; // 1024 * 32 bytes = 32KB #[derive(Debug, Clone)] -pub struct NotifyInventory<'a> { +pub struct NotifyInventoryRequest { + page: Option, +} + +impl NotifyInventoryRequest { + pub fn new(page: Option) -> Self { + Self { + page + } + } + + pub fn page(self) -> Option { + self.page + } +} + +impl Serializer for NotifyInventoryRequest { + fn read(reader: &mut Reader) -> Result { + let page = reader.read_optional_non_zero_u8()?; + Ok(Self::new(page)) + } + + fn write(&self, writer: &mut Writer) { + writer.write_optional_non_zero_u8(self.page); + } +} + +#[derive(Debug)] +pub struct NotifyInventoryResponse<'a> { + next: Option, txs: Cow<'a, HashSet>>, } -impl<'a> NotifyInventory<'a> { - pub fn new(txs: Cow<'a, HashSet>>) -> Self { - Self { txs } +impl<'a> NotifyInventoryResponse<'a> { + pub fn new(next: Option, txs: Cow<'a, HashSet>>) -> Self { + Self { + next, + txs + } + } + + pub fn next(&self) -> Option { + self.next } pub fn get_txs(self) -> Cow<'a, HashSet>> { @@ -19,8 +55,9 @@ impl<'a> NotifyInventory<'a> { } } -impl<'a> Serializer for NotifyInventory<'a> { +impl<'a> Serializer for NotifyInventoryResponse<'a> { fn read(reader: &mut Reader) -> Result { + let next = reader.read_optional_non_zero_u8()?; let count = reader.read_u16()?; if count > NOTIFY_MAX_LEN as u16 { return Err(ReaderError::InvalidSize); @@ -30,11 +67,11 @@ impl<'a> Serializer for NotifyInventory<'a> { for _ in 0..count { txs.insert(Cow::Owned(reader.read_hash()?)); } - - Ok(Self::new(Cow::Owned(txs))) + Ok(Self::new(next, Cow::Owned(txs))) } fn write(&self, writer: &mut Writer) { + writer.write_optional_non_zero_u8(self.next); writer.write_u16(self.txs.len() as u16); for tx in self.txs.iter() { writer.write_hash(tx); diff --git a/xelis_daemon/src/p2p/packet/mod.rs b/xelis_daemon/src/p2p/packet/mod.rs index da64eb6a..ae40261a 100644 --- a/xelis_daemon/src/p2p/packet/mod.rs +++ b/xelis_daemon/src/p2p/packet/mod.rs @@ -3,8 +3,10 @@ pub mod chain; pub mod ping; pub mod object; pub mod inventory; +pub mod bootstrap_chain; -use self::inventory::NotifyInventory; +use self::bootstrap_chain::{BootstrapChainRequest, BootstrapChainResponse}; +use self::inventory::{NotifyInventoryResponse, NotifyInventoryRequest}; use self::object::{ObjectRequest, ObjectResponse}; use self::chain::{ChainRequest, ChainResponse}; use self::handshake::Handshake; @@ -26,7 +28,10 @@ const CHAIN_RESPONSE_ID: u8 = 4; const PING_ID: u8 = 5; const OBJECT_REQUEST_ID: u8 = 6; const OBJECT_RESPONSE_ID: u8 = 7; -const NOTIFY_INV_ID: u8 = 8; +const NOTIFY_INV_REQUEST_ID: u8 = 8; +const NOTIFY_INV_RESPONSE_ID: u8 = 9; +const BOOTSTRAP_CHAIN_REQUEST_ID: u8 = 10; +const BOOTSTRAP_CHAIN_RESPONSE_ID: u8 = 11; // PacketWrapper allows us to link any Packet to a Ping #[derive(Debug)] @@ -77,7 +82,10 @@ pub enum Packet<'a> { Ping(Cow<'a, Ping<'a>>), ObjectRequest(Cow<'a, ObjectRequest>), ObjectResponse(ObjectResponse<'a>), - NotifyInventory(PacketWrapper<'a, NotifyInventory<'a>>) + NotifyInventoryRequest(PacketWrapper<'a, NotifyInventoryRequest>), + NotifyInventoryResponse(NotifyInventoryResponse<'a>), + BootstrapChainRequest(BootstrapChainRequest<'a>), + BootstrapChainResponse(BootstrapChainResponse) } impl<'a> Serializer for Packet<'a> { @@ -93,6 +101,10 @@ impl<'a> Serializer for Packet<'a> { PING_ID => Packet::Ping(Cow::Owned(Ping::read(reader)?)), OBJECT_REQUEST_ID => Packet::ObjectRequest(Cow::Owned(ObjectRequest::read(reader)?)), OBJECT_RESPONSE_ID => Packet::ObjectResponse(ObjectResponse::read(reader)?), + NOTIFY_INV_REQUEST_ID => Packet::NotifyInventoryRequest(PacketWrapper::read(reader)?), + NOTIFY_INV_RESPONSE_ID => Packet::NotifyInventoryResponse(NotifyInventoryResponse::read(reader)?), + BOOTSTRAP_CHAIN_REQUEST_ID => Packet::BootstrapChainRequest(BootstrapChainRequest::read(reader)?), + BOOTSTRAP_CHAIN_RESPONSE_ID => Packet::BootstrapChainResponse(BootstrapChainResponse::read(reader)?), id => { error!("invalid packet id received: {}", id); return Err(ReaderError::InvalidValue) @@ -110,7 +122,10 @@ impl<'a> Serializer for Packet<'a> { Packet::Ping(ping) => (PING_ID, ping.as_ref()), Packet::ObjectRequest(request) => (OBJECT_REQUEST_ID, request.as_ref()), Packet::ObjectResponse(response) => (OBJECT_RESPONSE_ID, response), - Packet::NotifyInventory(inventory) => (NOTIFY_INV_ID, inventory) + Packet::NotifyInventoryRequest(request) => (NOTIFY_INV_REQUEST_ID, request), + Packet::NotifyInventoryResponse(inventory) => (NOTIFY_INV_RESPONSE_ID, inventory), + Packet::BootstrapChainRequest(request) => (BOOTSTRAP_CHAIN_REQUEST_ID, request), + Packet::BootstrapChainResponse(response) => (BOOTSTRAP_CHAIN_RESPONSE_ID, response) }; let packet = serializer.to_bytes(); diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 01b976b7..cfdb60c0 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -87,6 +87,10 @@ impl<'a> Ping<'a> { self.height } + pub fn get_topoheight(&self) -> u64 { + self.topoheight + } + pub fn set_peers(&mut self, peers: Vec) { self.peer_list = peers; } diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 0893d831..7db73133 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -1,11 +1,12 @@ use lru::LruCache; -use xelis_common::config::{PEER_FAIL_TIME_RESET, STABLE_LIMIT, TIPS_LIMIT}; +use xelis_common::config::{PEER_FAIL_TIME_RESET, STABLE_LIMIT, TIPS_LIMIT, PEER_TIMEOUT_BOOTSTRAP_STEP}; use xelis_common::globals::get_current_time; use xelis_common::{ crypto::hash::Hash, config::PEER_TIMEOUT_REQUEST_OBJECT, serializer::Serializer }; +use super::packet::bootstrap_chain::{StepRequest, BootstrapChainRequest, StepResponse}; use super::packet::object::{ObjectRequest, OwnedObjectResponse}; use super::peer_list::SharedPeerList; use super::connection::{Connection, ConnectionMessage}; @@ -21,7 +22,7 @@ use std::collections::{HashMap, HashSet}; use tokio::sync::Mutex; use std::borrow::Cow; use bytes::Bytes; -use log::{warn, trace}; +use log::{warn, trace, debug}; pub type RequestedObjects = HashMap>; @@ -50,8 +51,9 @@ pub struct Peer { blocks_propagation: Mutex>, // last blocks propagated to/from this peer last_inventory: AtomicU64, // last time we got an inventory packet from this peer requested_inventory: AtomicBool, // if we requested this peer to send us an inventory notification - pruned_topoheight: AtomicU64, - is_pruned: AtomicBool + pruned_topoheight: AtomicU64, // pruned topoheight if its a pruned node + is_pruned: AtomicBool, // cannot be set to false if its already to true (protocol rules) + bootstrap_chain: Mutex>> } impl Peer { @@ -82,7 +84,8 @@ impl Peer { last_inventory: AtomicU64::new(0), requested_inventory: AtomicBool::new(false), pruned_topoheight: AtomicU64::new(pruned_topoheight.unwrap_or(0)), - is_pruned: AtomicBool::new(pruned_topoheight.is_some()) + is_pruned: AtomicBool::new(pruned_topoheight.is_some()), + bootstrap_chain: Mutex::new(None) } } @@ -266,6 +269,40 @@ impl Peer { Ok(object) } + pub async fn request_boostrap_chain(&self, step: StepRequest<'_>) -> Result { + debug!("Requesting bootstrap chain step: {:?}", step); + let step_kind = step.kind(); + let (sender, receiver) = tokio::sync::oneshot::channel(); + { + let mut sender_lock = self.bootstrap_chain.lock().await; + *sender_lock = Some(sender); + } + + // send the packet + self.send_packet(Packet::BootstrapChainRequest(BootstrapChainRequest::new(step))).await?; + + // wait on the response + let response: StepResponse = match timeout(Duration::from_millis(PEER_TIMEOUT_BOOTSTRAP_STEP), receiver).await { + Ok(res) => res?, + Err(e) => { + trace!("Requested bootstrap chain step {:?} has timed out", step_kind); + return Err(P2pError::AsyncTimeOut(e)); + } + }; + + // check that the response is what we asked for + let response_kind = response.kind(); + if response_kind != step_kind { + return Err(P2pError::InvalidBootstrapStep(step_kind, response_kind)) + } + + Ok(response) + } + + pub fn get_bootstrap_chain_channel(&self) -> &Mutex>> { + &self.bootstrap_chain + } + pub fn get_peers(&self) -> &Mutex> { &self.peers } From 49307526da21efc562ab021557e94d72b1f952c7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 01:31:27 +0200 Subject: [PATCH 68/74] daemon: fast sync --- README.md | 49 ++- xelis_common/src/account/mod.rs | 54 +++ xelis_common/src/api/daemon.rs | 13 +- xelis_daemon/src/core/blockchain.rs | 170 +++----- xelis_daemon/src/core/error.rs | 4 + xelis_daemon/src/core/storage/mod.rs | 33 +- xelis_daemon/src/core/storage/sled.rs | 381 +++++++++++++++--- xelis_daemon/src/p2p/mod.rs | 143 ++++--- .../src/p2p/packet/bootstrap_chain.rs | 161 ++++---- xelis_daemon/src/p2p/packet/ping.rs | 4 +- xelis_daemon/src/p2p/peer.rs | 1 + xelis_daemon/src/rpc/rpc.rs | 6 +- xelis_wallet/src/api.rs | 5 +- 13 files changed, 678 insertions(+), 346 deletions(-) diff --git a/README.md b/README.md index e0c888a3..75dd6b5a 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,14 @@ But your balances are still up-to-date with the chain and if your wallets alread The security of the chain is not reduced as all your blocks were already verified by your own node locally. +## Fast Sync + +Fast sync mode allow you to sync really fast the necessary data only to run a correct and valid version of the chain. For this we request a peer +to send us its chain state at a stable point, which include all accounts nonces, assets, balances, top blocks. +So in future, when the chain will be really heavy, anyone can still join it by using fast sync system, which is compatible with the pruning mode. + +**WARNING**: You should use fast sync mode only with a trusted peer, because they can send you a potential fake chain. + ## Client Protocol XELIS integrate along with BlockDAG a way to accept multiple times the same TX and only execute it one time. @@ -187,23 +195,30 @@ Its also backed by a cache per peer to knows if the transaction was already rece All theses data are saved in plaintext. -| Tree | Key Type | Value Type | Comment | -|:---------------------:|:----------:|:-----------------:|:---------------------------------------------------------:| -| transactions | Hash | Transaction | Save the whole transaction based on its hash | -| blocks | Hash | Block Header | Save the block header only based on its hash | -| blocks_at_height | Integer | Array of Hash | Save all blocks hash at a specific height | -| extra | Bytes | No specific type | Actually used to save the highest topo height and TIPS | -| topo_by_hash | Hash | Integer | Save a block hash at a specific topo height | -| hash_by_topo | Integer | Hash | Save a topo height for a specific block hash | -| cumulative_difficulty | Hash | Integer | Save the cumulative difficulty for each block hash | -| assets | Hash | No Value | Used to verify if an assets is well registered and usable | -| nonces | Public Key | Integer | Nonce used to prevent replay attacks on TXs | -| rewards | Hash | Integer | Save the block reward | -| supply | Hash | Integer | Calculated supply (past + block reward) at each block | -| difficulty | Hash | Integer | Difficulty for each block | -| tx_blocks | Hash | Array of Hash | All blocks in which this TX hash is included | -| assets_hash | Public Key | Integer | Asset hash with last topoheight of versioned balance | -| assets_balances | Public Key | Versioned Balance | Tree name is hash of asset + topoheight | +| Tree | Key Type | Value Type | Comment | +|:---------------------:|:----------:|:-----------------:|:------------------------------------------------------:| +| transactions | Hash | Transaction | Save the whole transaction based on its hash | +| blocks | Hash | Block Header | Save the block header only based on its hash | +| blocks_at_height | Integer | Array of Hash | Save all blocks hash at a specific height | +| extra | Bytes | No specific type | Actually used to save the highest topo height and TIPS | +| topo_by_hash | Hash | Integer | Save a block hash at a specific topo height | +| hash_by_topo | Integer | Hash | Save a topo height for a specific block hash | +| cumulative_difficulty | Hash | Integer | Save the cumulative difficulty for each block hash | +| assets | Hash | Integer | Verify if an assets exist and its registration height | +| nonces | Public Key | Integer | Store the highest topoheight of versioned nonce | +| nonces_topoheight | Public Key | Versioned Nonce | Tree name is composed of prefix + topoheight | +| rewards | Hash | Integer | Save the block reward | +| supply | Hash | Integer | Calculated supply (past + block reward) at each block | +| difficulty | Hash | Integer | Difficulty for each block | +| tx_blocks | Hash | Array of Hash | All blocks in which this TX hash is included | +| assets_hash | Public Key | Integer | Asset hash with last topoheight of versioned balance | +| assets_balances | Public Key | Versioned Balance | Tree name is hash of asset + topoheight | + +**NOTE**: +- Balances and nonces are versioned, which means they are stored each time a change happened on disk. +- Assets registered have in value their topoheight at which it was registered. + +The database engine used is sled. It may changes in future. ## Wallet diff --git a/xelis_common/src/account/mod.rs b/xelis_common/src/account/mod.rs index 6b6adec0..32c57a8c 100644 --- a/xelis_common/src/account/mod.rs +++ b/xelis_common/src/account/mod.rs @@ -62,4 +62,58 @@ impl Serializer for VersionedBalance { previous_topoheight }) } +} + +#[derive(Clone, Serialize, Deserialize)] +pub struct VersionedNonce { + nonce: u64, + previous_topoheight: Option, +} + +impl VersionedNonce { + pub fn new(nonce: u64, previous_topoheight: Option) -> Self { + Self { + nonce, + previous_topoheight + } + } + + pub fn get_nonce(&self) -> u64 { + self.nonce + } + + pub fn set_nonce(&mut self, value: u64) { + self.nonce = value; + } + + pub fn get_previous_topoheight(&self) -> Option { + self.previous_topoheight + } + + pub fn set_previous_topoheight(&mut self, previous_topoheight: Option) { + self.previous_topoheight = previous_topoheight; + } +} + +impl Serializer for VersionedNonce { + fn write(&self, writer: &mut Writer) { + writer.write_u64(&self.nonce); + if let Some(topo) = &self.previous_topoheight { + writer.write_u64(topo); + } + } + + fn read(reader: &mut Reader) -> Result { + let nonce = reader.read_u64()?; + let previous_topoheight = if reader.size() == 0 { + None + } else { + Some(reader.read_u64()?) + }; + + Ok(Self { + nonce, + previous_topoheight + }) + } } \ No newline at end of file diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 4a7130fd..447d7588 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -2,7 +2,7 @@ use std::{borrow::Cow, collections::HashSet}; use serde::{Deserialize, Serialize}; -use crate::{crypto::{hash::Hash, address::Address}, account::VersionedBalance, network::Network, block::Difficulty}; +use crate::{crypto::{hash::Hash, address::Address}, account::{VersionedBalance, VersionedNonce}, network::Network, block::Difficulty}; use super::DataHash; @@ -94,6 +94,15 @@ pub struct GetBalanceAtTopoHeightParams<'a> { #[derive(Serialize, Deserialize)] pub struct GetNonceParams<'a> { pub address: Cow<'a, Address<'a>>, + #[serde(default)] + pub topoheight: Option +} + +#[derive(Serialize, Deserialize)] +pub struct GetNonceResult { + pub topoheight: u64, + #[serde(flatten)] + pub version: VersionedNonce } #[derive(Serialize, Deserialize)] @@ -195,4 +204,4 @@ pub struct TransactionExecutedEvent<'a> { pub block_hash: Cow<'a, Hash>, pub tx_hash: Cow<'a, Hash>, pub topoheight: u64, -} \ No newline at end of file +} diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 4c827d15..9e14c811 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -229,13 +229,28 @@ impl Blockchain { info!("All modules are now stopped!"); } + pub async fn reload_from_disk(&self, storage: &S) -> Result<(), BlockchainError> { + let topoheight = storage.get_top_topoheight()?; + let height = storage.get_top_height()?; + self.topoheight.store(topoheight, Ordering::SeqCst); + self.height.store(height, Ordering::SeqCst); + + let tips = storage.get_tips().await?; + let (_, stable_height) = self.find_common_base(storage, &tips).await?; + self.stable_height.store(stable_height, Ordering::SeqCst); + + let difficulty = self.get_difficulty_at_tips(storage, &tips.into_iter().collect()).await?; + self.difficulty.store(difficulty, Ordering::SeqCst); + Ok(()) + } + // function to include the genesis block and register the public dev key. async fn create_genesis_block(&self) -> Result<(), BlockchainError> { let mut storage = self.storage.write().await; // register XELIS asset - debug!("Registering XELIS asset: {}", XELIS_ASSET); - storage.add_asset(&XELIS_ASSET).await?; + debug!("Registering XELIS asset: {} at topoheight 0", XELIS_ASSET); + storage.add_asset(&XELIS_ASSET, 0).await?; let genesis_block = if GENESIS_BLOCK.len() != 0 { info!("De-serializing genesis block..."); @@ -298,12 +313,9 @@ impl Blockchain { Ok(()) } - // verify if we can do fast sync with this peer - // for this, we check that user allowed the fast sync mode - // we also check that the peer topoheight is greater than 2x times the prune safety limit - // and we should be sure to not perform fast sync on a already-synced chain. - pub fn allow_fast_sync(&self, peer_topoheight: u64) -> bool { - self.allow_fast_sync_mode && peer_topoheight > PRUNE_SAFETY_LIMIT * 2 && self.get_topo_height() == 0 + // fast sync can only happens when we are at topoheight 0 (no blocks included) + pub fn is_fast_sync_mode_enabled(&self) -> bool { + self.allow_fast_sync_mode && self.get_topo_height() == 0 } pub async fn prune_until_topoheight(&self, topoheight: u64) -> Result { @@ -338,6 +350,7 @@ impl Blockchain { let assets = storage.get_assets().await?; // create snapshots of balances to located_sync_topoheight storage.create_snapshot_balances_at_topoheight(&assets, located_sync_topoheight).await?; + storage.create_snapshot_nonces_at_topoheight(located_sync_topoheight).await?; // delete all blocks until the new topoheight for topoheight in last_pruned_topoheight..located_sync_topoheight { @@ -350,6 +363,9 @@ impl Blockchain { storage.delete_versioned_balances_for_asset_at_topoheight(asset, topoheight).await?; } + // delete nonces versions + storage.delete_versioned_nonces_at_topoheight(topoheight).await?; + // delete transactions for this block for tx_hash in block_header.get_txs_hashes() { if storage.has_transaction(tx_hash).await? { @@ -531,7 +547,7 @@ impl Blockchain { } // find the common base (block hash and block height) of all tips - async fn find_common_base<'a, I: IntoIterator + Copy>(&self, storage: &S, tips: I) -> Result<(Hash, u64), BlockchainError> { + pub async fn find_common_base<'a, I: IntoIterator + Copy>(&self, storage: &S, tips: I) -> Result<(Hash, u64), BlockchainError> { debug!("Searching for common base for tips {}", tips.into_iter().map(|h| h.to_string()).collect::>().join(", ")); let mut best_height = 0; // first, we check the best (highest) height of all tips @@ -791,6 +807,7 @@ impl Blockchain { Ok(best_difficulty * 91 / 100 < block_difficulty) } + // TODO generic tips type pub async fn get_difficulty_at_tips(&self, provider: &D, tips: &Vec) -> Result { if tips.len() == 0 { // Genesis difficulty return Ok(GENESIS_BLOCK_DIFFICULTY) @@ -973,9 +990,9 @@ impl Blockchain { let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { *nonce } else { - let nonce = storage.get_nonce(transaction.get_owner()).await?; - nonces.insert(transaction.get_owner(), nonce); - nonce + let (_, version) = storage.get_last_nonce(transaction.get_owner()).await?; + nonces.insert(transaction.get_owner(), version.get_nonce()); + version.get_nonce() }; if account_nonce < transaction.get_nonce() { @@ -1327,6 +1344,7 @@ impl Blockchain { let mut total_fees = 0; // compute rewards & execute txs + let mut local_nonces = HashMap::new(); for (tx, tx_hash) in block.get_transactions().iter().zip(block.get_txs_hashes()) { // execute all txs // TODO improve it (too much read/write that can be refactored) if !storage.has_block_linked_to_tx(&tx_hash, &hash)? { @@ -1352,7 +1370,7 @@ impl Blockchain { trace!("Executing tx {} in block {}", tx_hash, hash); storage.set_tx_executed_in_block(tx_hash, &hash)?; - self.execute_transaction(storage, &tx, &mut nonces, &mut balances, highest_topo).await?; + self.execute_transaction(storage, &tx, &mut local_nonces, &mut balances, highest_topo).await?; // if the rpc_server is enable, track events if rpc_server.is_some() { let value = json!(TransactionExecutedEvent { @@ -1369,10 +1387,13 @@ impl Blockchain { // reward the miner self.reward_miner(storage, &block, block_reward, total_fees, &mut balances, highest_topo).await?; - // save nonces for each pubkey - for (key, nonce) in &nonces { - trace!("Saving nonce {} for {}", nonce, key); - storage.set_nonce(key, *nonce).await?; + // save nonces for each pubkey for new topoheight + for (key, nonce) in local_nonces { + trace!("Saving nonce {} for {} at topoheight {}", nonce, key, highest_topo); + storage.set_nonce_at_topoheight(&key, nonce, highest_topo).await?; + + // insert in "global" nonces map for easier mempool cleaning + nonces.insert(key, nonce); } // save balances for each topoheight @@ -1623,86 +1644,22 @@ impl Blockchain { let current_height = self.get_height(); let current_topoheight = self.get_topo_height(); warn!("Rewind chain with count = {}, height = {}, topoheight = {}", count, current_height, current_topoheight); - let (height, topoheight, txs, miners) = storage.pop_blocks(current_height, current_topoheight, count as u64).await?; - debug!("New topoheight: {} (diff: {})", topoheight, current_topoheight - topoheight); - // rewind all txs - { - let mut keys = HashSet::new(); - // merge miners keys - for key in &miners { - debug!("Adding miner key {}", key); - keys.insert(key); - } - - // Add dev address in rewinding in case we receive dev fees - if DEV_FEE_PERCENT != 0 { - debug!("Adding dev key {}", *DEV_PUBLIC_KEY); - keys.insert(&DEV_PUBLIC_KEY); - } - - let mut nonces = HashMap::new(); - let mut assets: HashSet<&Hash> = HashSet::new(); - // add native asset (because its necessary for fees) - assets.insert(&XELIS_ASSET); - - for (hash, tx) in &txs { - debug!("Rewinding tx hash: {}", hash); - self.rewind_transaction(storage, tx, &mut keys, &mut nonces, &mut assets).await?; - } - - // lowest previous versioned balances topoheight for each key - let mut balances: HashMap<&PublicKey, HashMap<&Hash, Option>> = HashMap::new(); - // delete all versioned balances topoheight per topoheight - for i in (topoheight..=current_topoheight).rev() { - debug!("Clearing balances at topoheight {}", i); - // do it for every keys detected - for key in &keys { - for asset in &assets { - if storage.has_balance_at_exact_topoheight(key, asset, i).await? { - debug!("Deleting balance {} at topoheight {} for {}", asset, i, key); - let version = storage.delete_balance_at_topoheight(key, &asset, i).await?; - let previous = version.get_previous_topoheight(); - debug!("Previous balance is {:?}", previous); - let assets = balances.entry(key).or_insert_with(HashMap::new); - assets.insert(asset, previous); - } - } - } - } - - // apply all changes: update last topoheight balances changes of each key - for (key, previous) in balances { - for (asset, last) in previous { - match last { - Some(topo) => { - debug!("Set last topoheight balance for {} {} to {}", key, asset, topo); - storage.set_last_topoheight_for_balance(key, asset, topo)?; - }, - None => { - debug!("delete last topoheight balance for {} {}", key, asset); - storage.delete_last_topoheight_for_balance(key, asset)?; - } - }; - } - } - - // apply all changes to nonce - for (key, nonce) in nonces { - debug!("Set nonce for {} to {}", key, nonce); - storage.set_nonce(key, nonce).await?; - } + let (new_height, new_topoheight, txs) = storage.pop_blocks(current_height, current_topoheight, count as u64).await?; + debug!("New topoheight: {} (diff: {})", new_topoheight, current_topoheight - new_topoheight); + { debug!("Locking mempool"); let mut mempool = self.mempool.write().await; for (hash, tx) in txs { - debug!("Adding TX {} to mempool", hash); + debug!("Trying to add TX {} to mempool again", hash); if let Err(e) = self.add_tx_for_mempool(&storage, &mut mempool, tx.as_ref().clone(), hash, false).await { debug!("TX rewinded is not compatible anymore: {}", e); } } } - self.height.store(height, Ordering::Release); - self.topoheight.store(topoheight, Ordering::Release); + + self.height.store(new_height, Ordering::Release); + self.topoheight.store(new_topoheight, Ordering::Release); // update stable height { let tips = storage.get_tips().await?; @@ -1710,7 +1667,7 @@ impl Blockchain { self.stable_height.store(height, Ordering::Release); } - Ok(topoheight) + Ok(new_topoheight) } // verify the transaction and returns fees available @@ -1809,7 +1766,8 @@ impl Blockchain { let nonce = match nonces.entry(tx.get_owner()) { Entry::Vacant(entry) => { let nonce = if storage.has_nonce(tx.get_owner()).await? { - storage.get_nonce(tx.get_owner()).await? + let (_, version) = storage.get_last_nonce(tx.get_owner()).await?; + version.get_nonce() } else { 0 }; @@ -1825,7 +1783,8 @@ impl Blockchain { // we increment it in case any new tx for same owner is following *nonce += 1; } else { - let nonce = storage.get_nonce(tx.get_owner()).await?; + let (_, version) = storage.get_last_nonce(tx.get_owner()).await?; + let nonce = version.get_nonce(); if nonce != tx.get_nonce() { return Err(BlockchainError::InvalidTxNonce(tx.get_nonce(), nonce, tx.get_owner().clone())) } @@ -1920,35 +1879,6 @@ impl Blockchain { Ok(()) } - - // rewind a transaction, save all keys used in a TX (sender / receiver) and update nonces with the lowest available - async fn rewind_transaction<'a>(&self, _: &mut S, transaction: &'a Transaction, keys: &mut HashSet<&'a PublicKey>, nonces: &mut HashMap<&'a PublicKey, u64>, assets: &mut HashSet<&'a Hash>) -> Result<(), BlockchainError> { - // add sender - keys.insert(transaction.get_owner()); - - // TODO for Smart Contracts we will have to rewind them too - match transaction.get_data() { - TransactionType::Transfer(txs) => { - for output in txs { - keys.insert(&output.to); - assets.insert(&output.asset); - } - }, - TransactionType::Burn { asset, amount: _ } => { - assets.insert(asset); - }, - _ => { - return Err(BlockchainError::SmartContractTodo) - } - } - - // keep the lowest nonce available - let nonce = nonces.entry(transaction.get_owner()).or_insert(transaction.get_nonce()); - if *nonce > transaction.get_nonce() { - *nonce = transaction.get_nonce(); - } - Ok(()) - } } pub fn get_block_reward(supply: u64) -> u64 { diff --git a/xelis_daemon/src/core/error.rs b/xelis_daemon/src/core/error.rs index 3847c93e..523a9d55 100644 --- a/xelis_daemon/src/core/error.rs +++ b/xelis_daemon/src/core/error.rs @@ -151,6 +151,10 @@ pub enum BlockchainError { NoBalance(PublicKey), #[error("No balance changes for {} at specific topoheight and asset", _0)] NoBalanceChanges(PublicKey), + #[error("No nonce found on disk for {}", _0)] + NoNonce(PublicKey), + #[error("No nonce changes for {} at specific topoheight", _0)] + NoNonceChanges(PublicKey), #[error("Overflow detected")] Overflow, #[error("Error, block include a dead tx {}", _0)] diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 370b0108..99bfedb2 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -6,7 +6,7 @@ use async_trait::async_trait; use xelis_common::{ crypto::{key::PublicKey, hash::Hash}, transaction::Transaction, - block::{Block, BlockHeader, Difficulty}, account::VersionedBalance, immutable::Immutable, network::Network, + block::{Block, BlockHeader, Difficulty}, account::{VersionedBalance, VersionedNonce}, immutable::Immutable, network::Network, }; use crate::core::error::BlockchainError; @@ -29,16 +29,24 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn get_pruned_topoheight(&self) -> Result, BlockchainError>; fn set_pruned_topoheight(&mut self, pruned_topoheight: u64) -> Result<(), BlockchainError>; + // delete block at topoheight, and all pointers (hash_at_topo, topo_by_hash, reward, supply, diff, cumulative diff...) async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError>; async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError>; + // delete versioned balances at a specific topoheight async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; + // delete versioned nonces at a specific topoheight + async fn delete_versioned_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; + // delete all versions of balances under the specified topoheight + // for those who don't have more recents, set it to the topoheight + // for those above it, cut the chain by deleting the previous topoheight when it's going under async fn create_snapshot_balances_at_topoheight(&mut self, assets: &Vec, topoheight: u64) -> Result<(), BlockchainError>; + // same as above but for nonces + async fn create_snapshot_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - fn get_partial_assets(&self, maximum: usize, skip: usize) -> Result, BlockchainError>; - fn get_partial_keys(&self, maximum: usize, skip: usize) -> Result, BlockchainError>; - - async fn get_balances + Send + Sync, I: Iterator + Send>(&self, asset: &Hash, keys: I) -> Result>, BlockchainError>; + async fn get_partial_assets(&self, maximum: usize, skip: usize, maximum_topoheight: u64) -> Result, BlockchainError>; + async fn get_partial_keys(&self, maximum: usize, skip: usize, maximum_topoheight: u64) -> Result, BlockchainError>; + async fn get_balances<'a, I: Iterator + Send>(&self, asset: &Hash, keys: I, maximum_topoheight: u64) -> Result>, BlockchainError>; fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; @@ -52,8 +60,9 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn has_network(&self) -> Result; async fn asset_exist(&self, asset: &Hash) -> Result; - async fn add_asset(&mut self, asset: &Hash) -> Result<(), BlockchainError>; + async fn add_asset(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; async fn get_assets(&self) -> Result, BlockchainError>; + fn get_asset_registration_topoheight(&self, asset: &Hash) -> Result; fn has_tx_blocks(&self, hash: &Hash) -> Result; fn has_block_linked_to_tx(&self, tx: &Hash, block: &Hash) -> Result; @@ -74,8 +83,14 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn set_balance_at_topoheight(&mut self, asset: &Hash, topoheight: u64, key: &PublicKey, balance: &VersionedBalance) -> Result<(), BlockchainError>; async fn has_nonce(&self, key: &PublicKey) -> Result; - async fn get_nonce(&self, key: &PublicKey) -> Result; - async fn set_nonce(&mut self, key: &PublicKey, nonce: u64) -> Result<(), BlockchainError>; + async fn has_nonce_at_exact_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result; + // returns its topoheight and its VersionedNonce + async fn get_last_nonce(&self, key: &PublicKey) -> Result<(u64, VersionedNonce), BlockchainError>; + async fn get_nonce_at_exact_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result; + async fn get_nonce_at_maximum_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result, BlockchainError>; + // set the new highest topoheight for account + fn set_last_topoheight_for_nonce(&mut self, key: &PublicKey, topoheight: u64) -> Result<(), BlockchainError>; + async fn set_nonce_at_topoheight(&mut self, key: &PublicKey, nonce: u64, topoheight: u64) -> Result<(), BlockchainError>; fn get_block_reward(&self, hash: &Hash) -> Result; fn set_block_reward(&mut self, hash: &Hash, reward: u64) -> Result<(), BlockchainError>; @@ -85,7 +100,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn has_transaction(&self, hash: &Hash) -> Result; async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: Difficulty, hash: Hash) -> Result<(), BlockchainError>; - async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>, HashSet), BlockchainError>; + async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>), BlockchainError>; fn has_blocks(&self) -> bool; fn count_blocks(&self) -> usize; async fn has_block(&self, hash: &Hash) -> Result; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index facdde3c..96d9a7cc 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -5,7 +5,7 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hash, hash}}, immutable::Immutable, transaction::Transaction, - block::{BlockHeader, Block, Difficulty}, account::VersionedBalance, network::Network, + block::{BlockHeader, Block, Difficulty}, account::{VersionedBalance, VersionedNonce}, network::Network, }; use std::{ collections::{HashSet, BTreeSet}, @@ -24,6 +24,7 @@ const TOP_TOPO_HEIGHT: &[u8; 4] = b"TOPO"; const TOP_HEIGHT: &[u8; 4] = b"TOPH"; const NETWORK: &[u8] = b"NET"; const PRUNED_TOPOHEIGHT: &[u8; 4] = b"PRUN"; +const NONCE_PREFIX: &[u8; 4] = b"NONC"; pub struct SledStorage { transactions: Tree, // all txs stored on disk @@ -49,8 +50,9 @@ pub struct SledStorage { hash_at_topo_cache: Option>>, cumulative_difficulty_cache: Option>>, assets_cache: Option>>, - nonces_cache: Option>>, + // FIXME !!!!! balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute + nonces_trees_cache: Option>>, // versioned nonces tree keep in cache to prevent hash recompute tips_cache: Tips, pruned_topoheight: Option } @@ -91,8 +93,8 @@ impl SledStorage { hash_at_topo_cache: init_cache!(cache_size), cumulative_difficulty_cache: init_cache!(cache_size), assets_cache: init_cache!(cache_size), - nonces_cache: init_cache!(cache_size), balances_trees_cache: init_cache!(cache_size), + nonces_trees_cache: init_cache!(cache_size), tips_cache: HashSet::new(), pruned_topoheight: None }; @@ -131,7 +133,7 @@ impl SledStorage { } } - async fn get_arc_data(&self, tree: &Tree, cache: &Option>>>, key: &K) -> Result, BlockchainError> { + async fn get_cacheable_arc_data(&self, tree: &Tree, cache: &Option>>>, key: &K) -> Result, BlockchainError> { let value = if let Some(cache) = cache { let mut cache = cache.lock().await; if let Some(value) = cache.get(key) { @@ -148,7 +150,7 @@ impl SledStorage { Ok(value) } - async fn get_data(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { + async fn get_cacheable_data_copiable(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { let value = if let Some(cache) = cache { let mut cache = cache.lock().await; if let Some(value) = cache.get(key) { @@ -165,7 +167,7 @@ impl SledStorage { Ok(value) } - async fn delete_data(&self, tree: &Tree, cache: &Option>>>, key: &K) -> Result, BlockchainError> { + async fn get_cacheable_data(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { let bytes = match tree.remove(key.to_bytes())? { Some(data) => data.to_vec(), None => return Err(BlockchainError::NotFoundOnDisk(DiskContext::DeleteData)) @@ -180,10 +182,10 @@ impl SledStorage { let mut reader = Reader::new(&bytes); let value = V::read(&mut reader)?; - Ok(Arc::new(value)) + Ok(value) } - async fn delete_data_no_arc(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { + async fn delete_data(&self, tree: &Tree, cache: &Option>>>, key: &K) -> Result, BlockchainError> { let bytes = match tree.remove(key.to_bytes())? { Some(data) => data.to_vec(), None => return Err(BlockchainError::NotFoundOnDisk(DiskContext::DeleteData)) @@ -198,7 +200,7 @@ impl SledStorage { let mut reader = Reader::new(&bytes); let value = V::read(&mut reader)?; - Ok(value) + Ok(Arc::new(value)) } async fn contains_data(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { @@ -240,6 +242,37 @@ impl SledStorage { Ok(tree) } + + // constant prefix + topoheight to create a unique key + fn generate_versioned_nonce_key(&self, topoheight: u64) -> Result<[u8; 12], BlockchainError> { + trace!("generate versioned nonce key at {}", topoheight); + let mut bytes = [0u8; 12]; // 4 bytes for prefix, 8 for u64 + bytes[0..4].copy_from_slice(NONCE_PREFIX); + bytes[4..12].copy_from_slice(&topoheight.to_be_bytes()); + Ok(bytes) + } + + // returns the Tree from cache or insert it and returns it + // if no cache, compute the key each time this function is called. + async fn get_versioned_nonce_tree(&self, topoheight: u64) -> Result { + trace!("get versioned nonce tree at {}", topoheight); + let tree = if let Some(cache) = &self.nonces_trees_cache { + let mut nonces = cache.lock().await; + if let Some(tree) = nonces.get(&topoheight) { + tree.clone() + } else { // not found in cache, compute it and insert it + let key = self.generate_versioned_nonce_key(topoheight)?; + let tree = self.db.open_tree(key)?; + nonces.put(topoheight, tree.clone()); + tree + } + } else { // no cache found, we have to compute it ourself + let key = self.generate_versioned_nonce_key(topoheight)?; + self.db.open_tree(key)? + }; + + Ok(tree) + } } @@ -265,7 +298,7 @@ impl DifficultyProvider for SledStorage { async fn get_cumulative_difficulty_for_block_hash(&self, hash: &Hash) -> Result { trace!("get cumulative difficulty for hash {}", hash); - self.get_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, hash).await + self.get_cacheable_data_copiable(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, hash).await } async fn get_past_blocks_for_block_hash(&self, hash: &Hash) -> Result>, BlockchainError> { @@ -299,7 +332,7 @@ impl DifficultyProvider for SledStorage { async fn get_block_header_by_hash(&self, hash: &Hash) -> Result, BlockchainError> { trace!("get block by hash: {}", hash); - self.get_arc_data(&self.blocks, &self.blocks_cache, hash).await + self.get_cacheable_arc_data(&self.blocks, &self.blocks_cache, hash).await } } @@ -317,8 +350,8 @@ impl Storage for SledStorage { async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError> { // delete topoheight<->hash pointers - let hash = self.delete_data_no_arc(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; - self.delete_data_no_arc::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; + let hash = self.get_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; + self.get_cacheable_data::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; let topoheight_bytes = topoheight.to_be_bytes(); // delete block reward @@ -351,7 +384,7 @@ impl Storage for SledStorage { } async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError> { - self.delete_data_no_arc::>(&self.tx_blocks, &None, hash).await?; + self.get_cacheable_data::>(&self.tx_blocks, &None, hash).await?; self.delete_data(&self.transactions, &self.transactions_cache, hash).await } @@ -361,6 +394,12 @@ impl Storage for SledStorage { Ok(()) } + async fn delete_versioned_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError> { + let tree = self.get_versioned_nonce_tree(topoheight).await?; + self.db.drop_tree(tree.name())?; + Ok(()) + } + async fn create_snapshot_balances_at_topoheight(&mut self, assets: &Vec, topoheight: u64) -> Result<(), BlockchainError> { for asset in assets { // tree where VersionedBalance are stored @@ -406,30 +445,103 @@ impl Storage for SledStorage { Ok(()) } - fn get_partial_assets(&self, maximum: usize, skip: usize) -> Result, BlockchainError> { + async fn create_snapshot_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError> { + // tree where VersionedNonce are stored + let versioned_tree = self.get_versioned_nonce_tree(topoheight).await?; + // tree where PublicKey are stored with the highest noce topoheight in it + for el in self.nonces.iter() { + let (key_bytes, value) = el?; + let key = PublicKey::from_bytes(&key_bytes)?; + let highest_topoheight = u64::from_bytes(&value)?; + + // retrieve the highest versioned nonce + let mut versioned_nonce = self.get_nonce_at_exact_topoheight(&key, highest_topoheight).await?; + + // if the highest topoheight for this account is less than the snapshot topoheight + // update it to the topoheight + // otherwise, delete the previous topoheight in VersionedNonce which is under topoheight + if highest_topoheight < topoheight { + // save the new highest topoheight + self.nonces.insert(&key_bytes, &topoheight.to_be_bytes())?; + // remove the previous topoheight + versioned_nonce.set_previous_topoheight(None); + + // save it + versioned_tree.insert(key_bytes, versioned_nonce.to_bytes())?; + } else { + // find the first VersionedBalance which is under topoheight + while let Some(previous_topoheight) = versioned_nonce.get_previous_topoheight() { + if previous_topoheight < topoheight { + versioned_nonce.set_previous_topoheight(None); + // save it + versioned_tree.insert(key_bytes, versioned_nonce.to_bytes())?; + break; + } + + // keep searching + versioned_nonce = self.get_nonce_at_exact_topoheight(&key, previous_topoheight).await?; + } + } + } + + Ok(()) + } + + async fn get_partial_assets(&self, maximum: usize, skip: usize, maximum_topoheight: u64) -> Result, BlockchainError> { let mut assets: BTreeSet = BTreeSet::new(); - for el in self.assets.iter().keys().skip(skip).take(maximum) { - let key = el?; - assets.insert(Hash::from_bytes(&key)?); + let mut skip_count = 0; + for el in self.assets.iter() { + let (key, value) = el?; + let registered_at_topo = u64::from_bytes(&value)?; + // check that we have a registered asset before the maximum topoheight + if registered_at_topo <= maximum_topoheight { + if skip_count < skip { + skip_count += 1; + } else { + assets.insert(Hash::from_bytes(&key)?); + + if assets.len() == maximum { + break; + } + } + } } Ok(assets) } - fn get_partial_keys(&self, maximum: usize, skip: usize) -> Result, BlockchainError> { - let mut assets: BTreeSet = BTreeSet::new(); - for el in self.nonces.iter().keys().skip(skip).take(maximum) { + async fn get_partial_keys(&self, maximum: usize, skip: usize, maximum_topoheight: u64) -> Result, BlockchainError> { + let mut keys: BTreeSet = BTreeSet::new(); + let mut skip_count = 0; + for el in self.nonces.iter().keys() { let key = el?; - assets.insert(PublicKey::from_bytes(&key)?); + let pkey = PublicKey::from_bytes(&key)?; + + // check that we have a nonce before the maximum topoheight + if self.get_nonce_at_maximum_topoheight(&pkey, maximum_topoheight).await?.is_some() { + if skip_count < skip { + skip_count += 1; + } else { + keys.insert(pkey); + + if keys.len() == maximum { + break; + } + } + } } - Ok(assets) + + Ok(keys) } - async fn get_balances + Send + Sync, I: Iterator + Send>(&self, asset: &Hash, keys: I) -> Result>, BlockchainError> { + async fn get_balances<'a, I: Iterator + Send>(&self, asset: &Hash, keys: I, maximum_topoheight: u64) -> Result>, BlockchainError> { let mut balances = Vec::new(); for key in keys { - if self.has_balance_for(key.as_ref(), asset).await? { - let (_, versioned_balance) = self.get_last_balance(key.as_ref(), asset).await?; - balances.push(Some(versioned_balance.get_balance())); + if self.has_balance_for(key, asset).await? { + let res = match self.get_balance_at_maximum_topoheight(key, asset, maximum_topoheight).await? { + Some((_, version)) => Some(version.get_balance()), + None => None + }; + balances.push(res); } else { balances.push(None); } @@ -487,9 +599,9 @@ impl Storage for SledStorage { self.contains_data(&self.assets, &self.assets_cache, asset).await } - async fn add_asset(&mut self, asset: &Hash) -> Result<(), BlockchainError> { - trace!("add asset {}", asset); - self.assets.insert(asset.as_bytes(), &[0u8; 0])?; + async fn add_asset(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError> { + trace!("add asset {} at topoheight {}", asset, topoheight); + self.assets.insert(asset.as_bytes(), &topoheight.to_be_bytes())?; if let Some(cache) = &self.assets_cache { let mut cache = cache.lock().await; cache.put(asset.clone(), ()); @@ -501,8 +613,8 @@ impl Storage for SledStorage { async fn get_assets(&self) -> Result, BlockchainError> { trace!("get assets"); let mut assets = Vec::new(); - for e in self.assets.iter() { - let (key, _) = e?; + for e in self.assets.iter().keys() { + let key = e?; let mut reader = Reader::new(&key); let hash = Hash::read(&mut reader)?; assets.push(hash); @@ -511,6 +623,11 @@ impl Storage for SledStorage { Ok(assets) } + fn get_asset_registration_topoheight(&self, asset: &Hash) -> Result { + trace!("get asset registration topoheight {}", asset); + self.load_from_disk(&self.assets, asset.as_bytes()) + } + fn has_tx_blocks(&self, hash: &Hash) -> Result { trace!("has tx blocks {}", hash); let contains = self.tx_blocks.contains_key(hash.as_bytes())?; @@ -567,7 +684,7 @@ impl Storage for SledStorage { } let tree = self.db.open_tree(asset.as_bytes())?; - self.get_data(&tree, &None, key).await + self.get_cacheable_data_copiable(&tree, &None, key).await } // set in storage the new top topoheight (the most up-to-date versioned balance) @@ -601,7 +718,7 @@ impl Storage for SledStorage { } let tree = self.get_versioned_balance_tree(asset, topoheight).await?; - self.get_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) + self.get_cacheable_data_copiable(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) } // delete the last topoheight registered for this key @@ -657,7 +774,7 @@ impl Storage for SledStorage { async fn delete_balance_at_topoheight(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("delete balance {} for {} at topoheight {}", asset, key, topoheight); let tree = self.get_versioned_balance_tree(asset, topoheight).await?; - self.delete_data_no_arc(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) + self.get_cacheable_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) } // returns a new versioned balance with already-set previous topoheight @@ -695,7 +812,7 @@ impl Storage for SledStorage { } let tree = self.db.open_tree(asset.as_bytes())?; - let topoheight = self.get_data(&tree, &None, key).await?; + let topoheight = self.get_cacheable_data_copiable(&tree, &None, key).await?; let version = self.get_balance_at_exact_topoheight(key, asset, topoheight).await?; Ok((topoheight, version)) } @@ -710,26 +827,79 @@ impl Storage for SledStorage { async fn has_nonce(&self, key: &PublicKey) -> Result { trace!("has nonce {}", key); - self.contains_data(&self.nonces, &self.nonces_cache, key).await + let contains = self.nonces.contains_key(key.as_bytes())?; + Ok(contains) } - async fn get_nonce(&self, key: &PublicKey) -> Result { - trace!("get nonce {}", key); + async fn has_nonce_at_exact_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result { + trace!("has nonce {} at topoheight {}", key, topoheight); + let contains = self.get_versioned_nonce_tree(topoheight).await?.contains_key(key.as_bytes())?; + Ok(contains) + } + + async fn get_last_nonce(&self, key: &PublicKey) -> Result<(u64, VersionedNonce), BlockchainError> { + trace!("get last nonce {}", key); if !self.has_nonce(key).await? { - return Ok(0) + return Err(BlockchainError::NoNonce(key.clone())) } - self.get_data(&self.nonces, &self.nonces_cache, key).await + let topoheight = self.load_from_disk(&self.nonces, key.as_bytes())?; + Ok((topoheight, self.get_nonce_at_exact_topoheight(key, topoheight).await?)) } - async fn set_nonce(&mut self, key: &PublicKey, nonce: u64) -> Result<(), BlockchainError> { - trace!("set nonce to {} for {}", nonce, key); - self.nonces.insert(&key.as_bytes(), &nonce.to_be_bytes())?; - if let Some(cache) = &self.nonces_cache { - let mut cache = cache.lock().await; - cache.put(key.clone(), nonce); + async fn get_nonce_at_exact_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result { + trace!("get nonce at topoheight {} for {}", topoheight, key); + let tree = &self.get_versioned_nonce_tree(topoheight).await?; + self.load_from_disk(tree, &key.to_bytes()) + } + + async fn get_nonce_at_maximum_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result, BlockchainError> { + trace!("get nonce at maximum topoheight {} for {}", topoheight, key); + // check first that this address has nonce, if no returns None + if !self.has_nonce(key).await? { + return Ok(None) + } + + let (topo, mut version) = self.get_last_nonce(key).await?; + trace!("Last version of nonce for {} is at topoheight {}", key, topo); + // if it's the latest and its under the maximum topoheight + if topo < topoheight { + trace!("Last version nonce (valid) found at {} (maximum topoheight = {})", topo, topoheight); + return Ok(Some((topo, version))) } + // otherwise, we have to go through the whole chain + while let Some(previous) = version.get_previous_topoheight() { + let previous_version = self.get_nonce_at_exact_topoheight(key, previous).await?; + trace!("previous nonce version is at {}", previous); + if previous < topoheight { + trace!("Highest version nonce found at {} (maximum topoheight = {})", topo, topoheight); + return Ok(Some((previous, previous_version))) + } + + if let Some(value) = previous_version.get_previous_topoheight() { + if value > previous { + error!("FATAL ERROR: Previous topoheight ({}) should not be higher than current version ({})!", value, previous); + return Err(BlockchainError::Unknown) + } + } + version = previous_version; + } + + Ok(None) + } + + async fn set_nonce_at_topoheight(&mut self, key: &PublicKey, nonce: u64, topoheight: u64) -> Result<(), BlockchainError> { + trace!("set nonce to {} for {} at topo {}", nonce, key, topoheight); + let tree = self.get_versioned_nonce_tree(topoheight).await?; + tree.insert(&key.as_bytes(), &nonce.to_be_bytes())?; + self.set_last_topoheight_for_nonce(key, topoheight)?; + Ok(()) + } + + fn set_last_topoheight_for_nonce(&mut self, key: &PublicKey, topoheight: u64) -> Result<(), BlockchainError> { + trace!("set last topoheight for nonce {} to {}", key, topoheight); + self.nonces.insert(&key.as_bytes(), &topoheight.to_be_bytes())?; Ok(()) } @@ -746,7 +916,7 @@ impl Storage for SledStorage { async fn get_transaction(&self, hash: &Hash) -> Result, BlockchainError> { trace!("get transaction for hash {}", hash); - self.get_arc_data(&self.transactions, &self.transactions_cache, hash).await + self.get_cacheable_arc_data(&self.transactions, &self.transactions_cache, hash).await } async fn has_transaction(&self, hash: &Hash) -> Result { @@ -784,7 +954,8 @@ impl Storage for SledStorage { Ok(()) } - async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>, HashSet), BlockchainError> { + async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>), BlockchainError> { + let current_topoheight = topoheight; trace!("pop blocks from height: {}, topoheight: {}, count: {}", height, topoheight, count); if height < count as u64 { // also prevent removing genesis block return Err(BlockchainError::NotEnoughBlocks); @@ -821,8 +992,6 @@ impl Storage for SledStorage { let mut tips = self.get_tips().await?; // all txs to be rewinded let mut txs = Vec::new(); - // all miners rewards to be rewinded - let mut miners: HashSet = HashSet::new(); let mut done = 0; 'main: loop { // check if the next block is alone at its height, if yes stop rewinding @@ -843,7 +1012,7 @@ impl Storage for SledStorage { // get all blocks at same height, and delete current block hash from the list trace!("Searching blocks at height {}", height); - let blocks_at_height: Tips = self.delete_data_no_arc(&self.blocks_at_height, &None, &height).await?; + let blocks_at_height: Tips = self.get_cacheable_data(&self.blocks_at_height, &None, &height).await?; trace!("Blocks at height {}: {}", height, blocks_at_height.len()); for hash in blocks_at_height { @@ -851,22 +1020,20 @@ impl Storage for SledStorage { let block = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; trace!("block header deleted successfully"); - miners.insert(block.get_miner().clone()); - - let _: Difficulty = self.delete_data_no_arc(&self.supply, &None, &hash).await?; - let _: Difficulty = self.delete_data_no_arc(&self.difficulty, &None, &hash).await?; + let _: Difficulty = self.get_cacheable_data(&self.supply, &None, &hash).await?; + let _: Difficulty = self.get_cacheable_data(&self.difficulty, &None, &hash).await?; trace!("Deleting cumulative difficulty"); - let cumulative_difficulty: u64 = self.delete_data_no_arc(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; + let cumulative_difficulty: u64 = self.get_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; trace!("Cumulative difficulty deleted: {}", cumulative_difficulty); - let reward: u64 = self.delete_data_no_arc(&self.rewards, &None, &hash).await?; + let reward: u64 = self.get_cacheable_data(&self.rewards, &None, &hash).await?; trace!("Reward for block {} was: {}", hash, reward); for tx_hash in block.get_transactions() { - let tx = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; + let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; if self.has_tx_blocks(tx_hash)? { - let mut blocks: Tips = self.delete_data_no_arc(&self.tx_blocks, &None, tx_hash).await?; + let mut blocks: Tips = self.get_cacheable_data(&self.tx_blocks, &None, tx_hash).await?; let blocks_len = blocks.len(); blocks.remove(&hash); self.set_blocks_for_tx(tx_hash, &blocks)?; @@ -888,12 +1055,12 @@ impl Storage for SledStorage { } trace!("Block was at topoheight {}", topo); - self.delete_data_no_arc(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; + self.get_cacheable_data(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; if let Ok(hash_at_topo) = self.get_hash_at_topo_height(topo).await { if hash_at_topo == hash { trace!("Deleting hash '{}' at topo height '{}'", hash_at_topo, topo); - self.delete_data_no_arc(&self.hash_at_topo, &self.hash_at_topo_cache, &topo).await?; + self.get_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topo).await?; } } } @@ -920,12 +1087,98 @@ impl Storage for SledStorage { for hash in &tips { trace!("hash {} at height {}", hash, self.get_height_for_block_hash(&hash).await?); } + + // clean all assets + let mut deleted_assets = HashSet::new(); + let mut assets = HashSet::new(); + for el in self.assets.iter() { + let (key, value) = el?; + let asset = Hash::from_bytes(&key)?; + let registration_topoheight = u64::from_bytes(&value)?; + if registration_topoheight > topoheight { + trace!("Asset {} was registered at topoheight {}, deleting", asset, registration_topoheight); + self.assets.remove(&key)?; + deleted_assets.insert(asset); + + // drop the tree for this asset + self.db.drop_tree(key)?; + } else { + assets.insert(asset); + } + } + + // now let's process nonces versions + // we set the new highest topoheight to the highest found under the new topoheight + for el in self.nonces.iter() { + let (key, value) = el?; + let highest_topoheight = u64::from_bytes(&value)?; + if highest_topoheight > topoheight { + self.nonces.remove(&key)?; + // find the first version which is under topoheight + let pkey = PublicKey::from_bytes(&key)?; + let mut version = self.get_nonce_at_exact_topoheight(&pkey, highest_topoheight).await?; + while let Some(previous_topoheight) = version.get_previous_topoheight() { + if previous_topoheight < topoheight { + // we find the new highest version which is under new topoheight + trace!("New highest version for {} is at topoheight {}", pkey, previous_topoheight); + self.nonces.insert(&key, &previous_topoheight.to_be_bytes())?; + break; + } + + // keep searching + version = self.get_nonce_at_exact_topoheight(&pkey, previous_topoheight).await?; + } + } else { + // nothing to do as its under the rewinded topoheight + } + } + + // do balances too + for asset in &assets { + let tree = self.db.open_tree(asset.as_bytes())?; + for el in tree.iter() { + let (key, value) = el?; + let highest_topoheight = u64::from_bytes(&value)?; + if highest_topoheight > topoheight { + self.nonces.remove(&key)?; + // find the first version which is under topoheight + let pkey = PublicKey::from_bytes(&key)?; + let mut version = self.get_balance_at_exact_topoheight(&pkey, asset, highest_topoheight).await?; + while let Some(previous_topoheight) = version.get_previous_topoheight() { + if previous_topoheight < topoheight { + // we find the new highest version which is under new topoheight + trace!("New highest version for {} is at topoheight {} with asset {}", pkey, previous_topoheight, asset); + tree.insert(&key, &previous_topoheight.to_be_bytes())?; + break; + } + + // keep searching + version = self.get_balance_at_exact_topoheight(&pkey, asset, previous_topoheight).await?; + } + } else { + // nothing to do as its under the rewinded topoheight + } + } + } + + // now delete all versioned trees for assets and nonces + for topoheight in topoheight..=current_topoheight { + // delete all versioned balances for assets (deleted assets and existing assets) + for asset in deleted_assets.iter().chain(&assets) { + self.delete_versioned_balances_for_asset_at_topoheight(asset, topoheight).await?; + } + + self.delete_versioned_nonces_at_topoheight(topoheight).await?; + } + // store the new tips and topo topoheight self.store_tips(&tips)?; self.set_top_topoheight(topoheight)?; self.set_top_height(height)?; - Ok((height, topoheight, txs, miners)) + // reverse order of txs so its ascending order + txs.reverse(); + Ok((height, topoheight, txs)) } fn has_blocks(&self) -> bool { @@ -1090,7 +1343,7 @@ impl Storage for SledStorage { async fn get_topo_height_for_hash(&self, hash: &Hash) -> Result { trace!("get topoheight for hash: {}", hash); - self.get_data(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await + self.get_cacheable_data_copiable(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await } async fn get_hash_at_topo_height(&self, topoheight: u64) -> Result { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 2f0afd73..23b82dd3 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -11,7 +11,7 @@ use xelis_common::{ serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, - globals::get_current_time, immutable::Immutable + globals::get_current_time, immutable::Immutable, account::VersionedNonce }; use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}}}; use crate::core::error::BlockchainError; @@ -432,7 +432,9 @@ impl P2pServer { // select a random peer which is greater than us to sync chain // candidate peer should have a greater topoheight or a higher block height than us - async fn select_random_best_peer(&self) -> Option> { + // if we are not in fast sync mode, we must verify its pruned topoheight to be sure + // he have the blocks we need + async fn select_random_best_peer(&self, fast_sync: bool) -> Option> { trace!("select random best peer"); let peer_list = self.peer_list.read().await; let our_height = self.blockchain.get_height(); @@ -440,12 +442,23 @@ impl P2pServer { // search for peers which are greater than us // and that are pruned but before our height so we can sync correctly let peers: Vec<&Arc> = peer_list.get_peers().values().filter(|p| { - if let Some(pruned_topoheight) = p.get_pruned_topoheight() { - if pruned_topoheight > our_topoheight { + let peer_topoheight = p.get_topoheight(); + // In fast sync mode, we don't check the pruned topoheight as we don't sync from genesis block + if !fast_sync { + if let Some(pruned_topoheight) = p.get_pruned_topoheight() { + if pruned_topoheight > our_topoheight { + return false + } + } + } else { + // if we want to fast sync, but this peer is not compatible, we skip it + // for this we check that the peer topoheight is greater or equal to the prune safety limit + if peer_topoheight < PRUNE_SAFETY_LIMIT { return false } } - p.get_height() > our_height || p.get_topoheight() > our_topoheight + + p.get_height() > our_height || peer_topoheight > our_topoheight } ).collect(); let count = peers.len(); @@ -455,7 +468,7 @@ impl P2pServer { } let selected = rand::thread_rng().gen_range(0..count); let peer = peers.get(selected)?; - trace!("selected peer: ({}) {}", selected, peer); + trace!("selected peer for sync chain: ({}) {}", selected, peer); // clone the Arc to prevent the lock until the end of the sync request Some(Arc::clone(peer)) } @@ -468,12 +481,12 @@ impl P2pServer { trace!("We are already syncing, skipping..."); continue; } - if let Some(peer) = self.select_random_best_peer().await { + let fast_sync = self.blockchain.is_fast_sync_mode_enabled(); + if let Some(peer) = self.select_random_best_peer(fast_sync).await { trace!("Selected for chain sync is {}", peer); // check if we can maybe fast sync first // otherwise, fallback on the normal chain sync - let peer_topoheight = peer.get_topoheight(); - if self.blockchain.allow_fast_sync(peer_topoheight) { + if fast_sync { if let Err(e) = self.bootstrap_chain(&peer).await { warn!("Error occured while fast syncing with {}: {}", peer, e); } @@ -482,6 +495,8 @@ impl P2pServer { debug!("Error occured on chain sync with {}: {}", peer, e); } } + } else { + trace!("No peer found for chain sync"); } } } @@ -1193,7 +1208,7 @@ impl P2pServer { // if node asks us to pop blocks, check that the difference with peer's height is above STABLE_LIMIT // then, verify if it's a priority node, otherwise, check if we are connected to a priority node so only him can rewind us - if pop_count > 0 && peer.get_height() - self.blockchain.get_height() > STABLE_LIMIT && (peer.is_priority() || !self.is_connected_to_a_synced_priority_node().await) { + if pop_count > 0 && peer.get_topoheight() > our_previous_topoheight && peer.get_height() - self.blockchain.get_height() > STABLE_LIMIT && (peer.is_priority() || !self.is_connected_to_a_synced_priority_node().await) { // check that if we can trust him if peer.is_priority() { warn!("Rewinding chain without checking because {} is a priority node (pop count: {})", peer, pop_count); @@ -1430,12 +1445,19 @@ impl P2pServer { } async fn handle_bootstrap_chain_request(self: &Arc, peer: &Arc, request: StepRequest<'_>) -> Result<(), BlockchainError> { - debug!("Handle bootstrap chain request from {}", peer); + let request_kind = request.kind(); + debug!("Handle bootstrap chain request {:?} from {}", request_kind, peer); let storage = self.blockchain.get_storage().read().await; match request { - StepRequest::Assets(page) => { + StepRequest::ChainInfo => { + let tips = storage.get_tips().await?; + let (hash, height) = self.blockchain.find_common_base(&storage, &tips).await?; + let stable_topo = storage.get_topo_height_for_hash(&hash).await?; + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::ChainInfo(stable_topo, height, hash)))).await?; + }, + StepRequest::Assets(topoheight, page) => { let page = page.unwrap_or(0); - let assets = storage.get_partial_assets(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE)?; + let assets = storage.get_partial_assets(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE, topoheight).await?; let page = if assets.len() == MAX_ITEMS_PER_PAGE { Some(page + 1) } else { @@ -1444,21 +1466,22 @@ impl P2pServer { peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Assets(assets, page)))).await?; }, - StepRequest::Balances(asset, keys) => { - let balances = storage.get_balances(&asset, keys.iter()).await?; + StepRequest::Balances(topoheight, asset, keys) => { + let balances = storage.get_balances(&asset, keys.iter(), topoheight).await?; peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Balances(balances)))).await?; }, - StepRequest::Nonces(keys) => { + StepRequest::Nonces(topoheight, keys) => { let mut nonces = Vec::with_capacity(keys.len()); for key in keys.iter() { - let nonce = storage.get_nonce(key).await?; - nonces.push(nonce); + let (_, version) = storage.get_nonce_at_maximum_topoheight(key, topoheight).await?.unwrap_or_else(|| (0, VersionedNonce::new(0, None))); + nonces.push(version.get_nonce()); } + peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Nonces(nonces)))).await?; }, - StepRequest::Keys(page) => { + StepRequest::Keys(topoheight, page) => { let page = page.unwrap_or(0); - let keys = storage.get_partial_keys(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE)?; + let keys = storage.get_partial_keys(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE, topoheight).await?; let page = if keys.len() == MAX_ITEMS_PER_PAGE { Some(page + 1) } else { @@ -1467,16 +1490,17 @@ impl P2pServer { peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Keys(keys, page)))).await?; }, - StepRequest::BlocksMetadata(begin_topoheight) => { + StepRequest::BlocksMetadata(topoheight) => { let our_topoheight = self.blockchain.get_topo_height(); let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); - if pruned_topoheight > begin_topoheight || our_topoheight < STABLE_LIMIT || begin_topoheight > our_topoheight - STABLE_LIMIT { - debug!("Invalid begin topoheight received from {}", peer); + if topoheight < PRUNE_SAFETY_LIMIT || pruned_topoheight + PRUNE_SAFETY_LIMIT > topoheight || our_topoheight < PRUNE_SAFETY_LIMIT { + debug!("Invalid begin topoheight (received {}, our is {}) received from {}", topoheight, our_topoheight, peer); return Err(P2pError::InvalidPacket.into()) } - let mut blocks = Vec::with_capacity(STABLE_LIMIT as usize); - for topoheight in begin_topoheight..begin_topoheight + STABLE_LIMIT { + let mut blocks = Vec::with_capacity(PRUNE_SAFETY_LIMIT as usize); + // go until the requested stable topoheight + for topoheight in (topoheight-PRUNE_SAFETY_LIMIT..=topoheight).rev() { let hash = storage.get_hash_at_topo_height(topoheight).await?; let supply = storage.get_supply_for_block_hash(&hash)?; let reward = storage.get_block_reward(&hash)?; @@ -1488,10 +1512,6 @@ impl P2pServer { peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::BlocksMetadata(blocks)))).await?; }, - StepRequest::Tips => { - let tips = storage.get_tips().await?; - peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Tips(tips)))).await?; - } } Ok(()) } @@ -1502,11 +1522,9 @@ impl P2pServer { async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { debug!("Starting fast sync with {}", peer); - let stable_topoheight = peer.get_topoheight() - STABLE_LIMIT; - + let mut stable_topoheight = 0; let mut storage = self.blockchain.get_storage().write().await; - let mut step: Option = Some(StepRequest::Assets(None)); - let mut top_height = 0; + let mut step: Option = Some(StepRequest::ChainInfo); loop { let response = if let Some(step) = step.take() { peer.request_boostrap_chain(step).await? @@ -1515,24 +1533,31 @@ impl P2pServer { }; step = match response { + StepResponse::ChainInfo(topoheight, height, hash) => { + storage.set_top_topoheight(topoheight)?; + storage.set_top_height(height)?; + storage.store_tips(&HashSet::from([hash]))?; + + stable_topoheight = topoheight; + Some(StepRequest::Assets(topoheight, None)) + }, // fetch all assets from peer StepResponse::Assets(assets, next_page) => { for asset in assets { - debug!("Saving asset {}", asset); - storage.add_asset(&asset).await?; + debug!("Saving asset {} at topoheight {}", asset, stable_topoheight); + storage.add_asset(&asset, stable_topoheight).await?; } if next_page.is_some() { - Some(StepRequest::Assets(next_page)) + Some(StepRequest::Assets(stable_topoheight, next_page)) } else { // Go to next step - Some(StepRequest::Keys(None)) + Some(StepRequest::Keys(stable_topoheight, None)) } }, // fetch all accounts StepResponse::Keys(keys, next_page) => { - let borrowed_keys = keys.iter().map(|v: &xelis_common::crypto::key::PublicKey| Cow::Borrowed(v)).collect(); - let StepResponse::Nonces(nonces) = peer.request_boostrap_chain(StepRequest::Nonces(Cow::Borrowed(&borrowed_keys))).await? else { + let StepResponse::Nonces(nonces) = peer.request_boostrap_chain(StepRequest::Nonces(stable_topoheight, Cow::Borrowed(&keys))).await? else { // shouldn't happen error!("Received an invalid StepResponse (how ?) while fetching nonces"); return Err(P2pError::InvalidPacket.into()) @@ -1542,7 +1567,7 @@ impl P2pServer { // otherwise in really long time, it may consume lot of memory for asset in storage.get_assets().await? { debug!("Request balances for asset {}", asset); - let StepResponse::Balances(balances) = peer.request_boostrap_chain(StepRequest::Balances(Cow::Borrowed(&asset), Cow::Borrowed(&borrowed_keys))).await? else { + let StepResponse::Balances(balances) = peer.request_boostrap_chain(StepRequest::Balances(stable_topoheight, Cow::Borrowed(&asset), Cow::Borrowed(&keys))).await? else { // shouldn't happen error!("Received an invalid StepResponse (how ?) while fetching balances"); return Err(P2pError::InvalidPacket.into()) @@ -1552,7 +1577,7 @@ impl P2pServer { for (key, balance) in keys.iter().zip(balances) { // check that the account have balance for this asset if let Some(balance) = balance { - debug!("Saving balance {} for key {}", balance, key); + debug!("Saving balance {} for key {} at topoheight {}", balance, key, stable_topoheight); let mut versioned_balance = storage.get_new_versioned_balance(key, &asset, stable_topoheight).await?; versioned_balance.set_balance(balance); storage.set_balance_to(key, &asset, stable_topoheight, &versioned_balance).await?; @@ -1564,44 +1589,52 @@ impl P2pServer { // save all nonces for (key, nonce) in keys.into_iter().zip(nonces) { debug!("Saving nonce {} for {}", nonce, key); - storage.set_nonce(&key, nonce).await?; + storage.set_nonce_at_topoheight(&key, nonce, stable_topoheight).await?; } if next_page.is_some() { - Some(StepRequest::Keys(next_page)) + Some(StepRequest::Keys(stable_topoheight, next_page)) } else { // Go to next step Some(StepRequest::BlocksMetadata(stable_topoheight)) } }, StepResponse::BlocksMetadata(blocks) => { - let mut topoheight = stable_topoheight; - for metadata in blocks { + let mut lowest_topoheight = stable_topoheight; + for (i, metadata) in blocks.into_iter().enumerate() { + lowest_topoheight = stable_topoheight - i as u64; debug!("Saving block metadata {}", metadata.hash); let OwnedObjectResponse::BlockHeader(header, hash) = peer.request_blocking_object(ObjectRequest::BlockHeader(metadata.hash)).await? else { error!("Received an invalid requested object while fetching blocks metadata"); return Err(P2pError::InvalidPacket.into()) }; + let mut txs = Vec::with_capacity(header.get_txs_hashes().len()); + for tx_hash in header.get_txs_hashes() { + let OwnedObjectResponse::Transaction(tx, _) = peer.request_blocking_object(ObjectRequest::Transaction(tx_hash.clone())).await? else { + error!("Received an invalid requested object while fetching block transaction {}", tx_hash); + return Err(P2pError::InvalidPacket.into()) + }; + txs.push(Immutable::Owned(tx)); + } + + // link its TX to the block for tx_hash in header.get_txs_hashes() { storage.add_block_for_tx(tx_hash, &hash)?; } + // save metadata of this block storage.set_supply_for_block_hash(&hash, metadata.supply)?; storage.set_cumulative_difficulty_for_block_hash(&hash, metadata.cumulative_difficulty).await?; storage.set_block_reward(&hash, metadata.reward)?; - storage.set_topo_height_for_block(&hash, topoheight).await?; + storage.set_topo_height_for_block(&hash, lowest_topoheight).await?; - top_height = header.get_height(); - storage.add_new_block(Arc::new(header), &Vec::new(), metadata.difficulty, hash).await?; - topoheight += 1; + // save the block with its transactions, difficulty + storage.add_new_block(Arc::new(header), &txs, metadata.difficulty, hash).await?; } + storage.set_pruned_topoheight(lowest_topoheight)?; - Some(StepRequest::Tips) - }, - StepResponse::Tips(tips) => { - storage.store_tips(&tips)?; None }, response => { // shouldn't happens @@ -1610,9 +1643,7 @@ impl P2pServer { } }; } - - storage.set_top_topoheight(stable_topoheight + STABLE_LIMIT)?; - storage.set_top_height(top_height)?; + self.blockchain.reload_from_disk(&storage).await?; Ok(()) } diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs index b2663558..221dabb0 100644 --- a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -1,4 +1,4 @@ -use std::{collections::{BTreeSet, HashSet}, borrow::Cow}; +use std::{collections::BTreeSet, borrow::Cow}; use log::debug; use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty}; @@ -48,40 +48,54 @@ impl Serializer for BlockMetadata { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)] pub enum StepKind { + ChainInfo, Assets, Keys, Balances, Nonces, - BlocksMetadata, - Tips + BlocksMetadata +} + +impl StepKind { + pub fn next(&self) -> Option { + Some(match self { + Self::ChainInfo => Self::Assets, + Self::Assets => Self::Keys, + Self::Keys => Self::Balances, + Self::Balances => Self::Nonces, + Self::Nonces => Self::BlocksMetadata, + Self::BlocksMetadata => return None + }) + } } #[derive(Debug)] pub enum StepRequest<'a> { - // Pagination - Assets(Option), - // Asset, pagination - Keys(Option), - // Asset, Accounts - Balances(Cow<'a, Hash>, Cow<'a, BTreeSet>>), - // Accounts - Nonces(Cow<'a, BTreeSet>>), + // Request chain info (topoheight, stable height, stable hash) + ChainInfo, + // Max topoheight, Pagination + Assets(u64, Option), + // Max topoheight, Asset, pagination + Keys(u64, Option), + // Max topoheight, Asset, Accounts + Balances(u64, Cow<'a, Hash>, Cow<'a, BTreeSet>), + // Max topoheight, Accounts + Nonces(u64, Cow<'a, BTreeSet>), // Request blocks metadata starting topoheight - BlocksMetadata(u64), - Tips + BlocksMetadata(u64) } impl<'a> StepRequest<'a> { pub fn kind(&self) -> StepKind { match self { - Self::Assets(_) => StepKind::Assets, - Self::Keys(_) => StepKind::Keys, - Self::Balances(_, _) => StepKind::Balances, - Self::Nonces(_) => StepKind::Nonces, - Self::BlocksMetadata(_) => StepKind::BlocksMetadata, - Self::Tips => StepKind::Tips + Self::ChainInfo => StepKind::ChainInfo, + Self::Assets(_, _) => StepKind::Assets, + Self::Keys(_, _) => StepKind::Keys, + Self::Balances(_, _, _) => StepKind::Balances, + Self::Nonces(_, _) => StepKind::Nonces, + Self::BlocksMetadata(_) => StepKind::BlocksMetadata } } } @@ -90,26 +104,31 @@ impl Serializer for StepRequest<'_> { fn read(reader: &mut Reader) -> Result { Ok(match reader.read_u8()? { 0 => { - let page = reader.read_optional_non_zero_u64()?; - Self::Assets(page) - }, + Self::ChainInfo + } 1 => { + let topoheight = reader.read_u64()?; let page = reader.read_optional_non_zero_u64()?; - Self::Keys(page) + Self::Assets(topoheight, page) }, 2 => { - let hash = Cow::<'_, Hash>::read(reader)?; - let keys = Cow::<'_, BTreeSet>>::read(reader)?; - Self::Balances(hash, keys) + let topoheight = reader.read_u64()?; + let page = reader.read_optional_non_zero_u64()?; + Self::Keys(topoheight, page) }, 3 => { - Self::Nonces(Cow::<'_, BTreeSet>>::read(reader)?) + let topoheight = reader.read_u64()?; + let hash = Cow::<'_, Hash>::read(reader)?; + let keys = Cow::<'_, BTreeSet>::read(reader)?; + Self::Balances(topoheight, hash, keys) }, 4 => { - Self::BlocksMetadata(reader.read_u64()?) + let topoheight = reader.read_u64()?; + let keys = Cow::<'_, BTreeSet>::read(reader)?; + Self::Nonces(topoheight, keys) }, 5 => { - Self::Tips + Self::BlocksMetadata(reader.read_u64()?) }, id => { debug!("Received invalid value for StepResponse: {}", id); @@ -120,53 +139,57 @@ impl Serializer for StepRequest<'_> { fn write(&self, writer: &mut Writer) { match self { - Self::Assets(page) => { + Self::ChainInfo => { writer.write_u8(0); - writer.write_optional_non_zero_u64(page); }, - Self::Keys(page) => { + Self::Assets(topoheight, page) => { writer.write_u8(1); + writer.write_u64(topoheight); writer.write_optional_non_zero_u64(page); }, - Self::Balances(asset, accounts) => { + Self::Keys(topoheight, page) => { writer.write_u8(2); - writer.write_hash(asset); - accounts.write(writer); + writer.write_u64(topoheight); + writer.write_optional_non_zero_u64(page); }, - Self::Nonces(nonces) => { + Self::Balances(topoheight, asset, accounts) => { writer.write_u8(3); - nonces.write(writer); + writer.write_u64(topoheight); + writer.write_hash(asset); + accounts.write(writer); }, - Self::BlocksMetadata(blocks) => { + Self::Nonces(topoheight, nonces) => { writer.write_u8(4); - blocks.write(writer); + writer.write_u64(topoheight); + nonces.write(writer); }, - Self::Tips => { + Self::BlocksMetadata(topoheight) => { writer.write_u8(5); - } + writer.write_u64(topoheight); + }, }; } } #[derive(Debug)] pub enum StepResponse { + ChainInfo(u64, u64, Hash), // topoheight of stable hash, stable height, stable hash Assets(BTreeSet, Option), // Set of assets, pagination Keys(BTreeSet, Option), // Set of keys, pagination Balances(Vec>), // Balances requested Nonces(Vec), // Nonces for requested accounts BlocksMetadata(Vec), // top blocks metadata - Tips(HashSet) // chain tips } impl StepResponse { pub fn kind(&self) -> StepKind { match self { + Self::ChainInfo(_, _, _) => StepKind::ChainInfo, Self::Assets(_, _) => StepKind::Assets, Self::Keys(_, _) => StepKind::Keys, Self::Balances(_) => StepKind::Balances, Self::Nonces(_) => StepKind::Nonces, - Self::BlocksMetadata(_) => StepKind::BlocksMetadata, - Self::Tips(_) => StepKind::Tips + Self::BlocksMetadata(_) => StepKind::BlocksMetadata } } } @@ -175,33 +198,31 @@ impl Serializer for StepResponse { fn read(reader: &mut Reader) -> Result { Ok(match reader.read_u8()? { 0 => { + let topoheight = reader.read_u64()?; + let stable_height = reader.read_u64()?; + let hash = reader.read_hash()?; + + Self::ChainInfo(topoheight, stable_height, hash) + }, + 1 => { let assets = BTreeSet::::read(reader)?; let page = reader.read_optional_non_zero_u64()?; Self::Assets(assets, page) }, - 1 => { + 2 => { let keys = BTreeSet::::read(reader)?; let page = reader.read_optional_non_zero_u64()?; Self::Keys(keys, page) }, - 2 => { + 3 => { Self::Balances(Vec::>::read(reader)?) }, - 3 => { + 4 => { Self::Nonces(Vec::::read(reader)?) }, - 4 => { + 5 => { Self::BlocksMetadata(Vec::::read(reader)?) }, - 5 => { - let count = reader.read_u8()? as usize; - let mut set = HashSet::with_capacity(count); - for _ in 0..count { - let hash = reader.read_hash()?; - set.insert(hash); - } - Self::Tips(set) - } id => { debug!("Received invalid value for StepResponse: {}", id); return Err(ReaderError::InvalidValue) @@ -211,35 +232,33 @@ impl Serializer for StepResponse { fn write(&self, writer: &mut Writer) { match self { - Self::Assets(assets, page) => { + Self::ChainInfo(topoheight, stable_height, hash) => { writer.write_u8(0); + writer.write_u64(topoheight); + writer.write_u64(stable_height); + writer.write_hash(hash); + }, + Self::Assets(assets, page) => { + writer.write_u8(1); assets.write(writer); writer.write_optional_non_zero_u64(page); }, Self::Keys(keys, page) => { - writer.write_u8(1); + writer.write_u8(2); keys.write(writer); writer.write_optional_non_zero_u64(page); }, Self::Balances(balances) => { - writer.write_u8(2); + writer.write_u8(3); balances.write(writer); }, Self::Nonces(nonces) => { - writer.write_u8(3); + writer.write_u8(4); nonces.write(writer); }, Self::BlocksMetadata(blocks) => { - writer.write_u8(4); - blocks.write(writer); - }, - Self::Tips(tips) => { writer.write_u8(5); - - writer.write_u8(tips.len() as u8); - for hash in tips { - writer.write_hash(hash); - } + blocks.write(writer); } }; } diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index cfdb60c0..4ac13430 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -50,8 +50,8 @@ impl<'a> Ping<'a> { peer.set_topoheight(self.topoheight); peer.set_height(self.height); - if peer.is_pruned() != self.pruned_topoheight.is_some() { - error!("Invalid protocol rules: impossible to change the pruned state, from {} in ping packet", peer); + if peer.is_pruned() && self.pruned_topoheight.is_none() { + error!("Invalid protocol rules: impossible to change the pruned state (), from {} in ping packet", peer); return Err(P2pError::InvalidProtocolRules) } diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 7db73133..9a378941 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -53,6 +53,7 @@ pub struct Peer { requested_inventory: AtomicBool, // if we requested this peer to send us an inventory notification pruned_topoheight: AtomicU64, // pruned topoheight if its a pruned node is_pruned: AtomicBool, // cannot be set to false if its already to true (protocol rules) + // used for await on bootstrap chain packets bootstrap_chain: Mutex>> } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 062fe059..1bbcd2cc 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -17,7 +17,7 @@ use xelis_common::{ GetTransactionParams, P2pStatusResult, GetBlocksAtHeightParams, - GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams + GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams, GetNonceResult }, DataHash}, async_handler, serializer::Serializer, @@ -278,8 +278,8 @@ async fn get_nonce(blockchain: Arc>, body: Value) -> R } let storage = blockchain.get_storage().read().await; - let nonce = storage.get_nonce(params.address.get_public_key()).await.context("Error while retrieving nonce for account")?; - Ok(json!(nonce)) + let (topoheight, version) = storage.get_last_nonce(params.address.get_public_key()).await.context("Error while retrieving nonce for account")?; + Ok(json!(GetNonceResult { topoheight, version })) } // TODO Rate limiter diff --git a/xelis_wallet/src/api.rs b/xelis_wallet/src/api.rs index 94a6827e..06c69832 100644 --- a/xelis_wallet/src/api.rs +++ b/xelis_wallet/src/api.rs @@ -76,9 +76,10 @@ impl DaemonAPI { Ok(()) } - pub async fn get_nonce(&self, address: &Address<'_>) -> Result { + pub async fn get_last_nonce(&self, address: &Address<'_>) -> Result { let nonce = self.client.call_with("get_nonce", &GetNonceParams { - address: Cow::Borrowed(address) + address: Cow::Borrowed(address), + topoheight: None }).await.context(format!("Error while fetching nonce from address {}", address))?; Ok(nonce) } From 227c1bf77d2f3ce6570b941303c8b4397fe7e6dc Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 01:43:15 +0200 Subject: [PATCH 69/74] daemon: update fast sync comment --- xelis_daemon/src/p2p/mod.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 23b82dd3..20671c4b 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1516,9 +1516,12 @@ impl P2pServer { Ok(()) } - // first, fetch all assets from peer - // then, fetch all keys with its nonces and its balances - // and for the last step, retrieve last TOP_TOPOHEIGHT - STABLE_LIMIT block headers + // first, retrieve chain info of selected peer + // We retrieve all assets through pagination, + // then we fetch all keys with its nonces and its balances (also through pagination) + // and for the last step, retrieve last STABLE TOPOHEIGHT - PRUNE_SAFETY_LIMIT blocks + // reload blockchain cache from disk, and we're ready to sync the rest of the chain + // NOTE: it could be even faster without retrieving each TXs, but we do it in case user don't enable pruning async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { debug!("Starting fast sync with {}", peer); From 08370f8f9d4409128393cfe30633017dbb38b567 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 13:28:07 +0200 Subject: [PATCH 70/74] daemon: check topoheight in bootstrap chain request --- xelis_daemon/src/p2p/mod.rs | 18 +++++++++++------- xelis_daemon/src/p2p/packet/bootstrap_chain.rs | 11 +++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 20671c4b..8693581a 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1447,7 +1447,18 @@ impl P2pServer { async fn handle_bootstrap_chain_request(self: &Arc, peer: &Arc, request: StepRequest<'_>) -> Result<(), BlockchainError> { let request_kind = request.kind(); debug!("Handle bootstrap chain request {:?} from {}", request_kind, peer); + let storage = self.blockchain.get_storage().read().await; + if let Some(topoheight) = request.get_requested_topoheight() { + let our_topoheight = self.blockchain.get_topo_height(); + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); + // verify that the topoheight asked is above the PRUNE_SAFETY_LIMIT + if topoheight < PRUNE_SAFETY_LIMIT || pruned_topoheight + PRUNE_SAFETY_LIMIT > topoheight || our_topoheight < PRUNE_SAFETY_LIMIT { + debug!("Invalid begin topoheight (received {}, our is {}) received from {}", topoheight, our_topoheight, peer); + return Err(P2pError::InvalidPacket.into()) + } + } + match request { StepRequest::ChainInfo => { let tips = storage.get_tips().await?; @@ -1491,13 +1502,6 @@ impl P2pServer { peer.send_packet(Packet::BootstrapChainResponse(BootstrapChainResponse::new(StepResponse::Keys(keys, page)))).await?; }, StepRequest::BlocksMetadata(topoheight) => { - let our_topoheight = self.blockchain.get_topo_height(); - let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); - if topoheight < PRUNE_SAFETY_LIMIT || pruned_topoheight + PRUNE_SAFETY_LIMIT > topoheight || our_topoheight < PRUNE_SAFETY_LIMIT { - debug!("Invalid begin topoheight (received {}, our is {}) received from {}", topoheight, our_topoheight, peer); - return Err(P2pError::InvalidPacket.into()) - } - let mut blocks = Vec::with_capacity(PRUNE_SAFETY_LIMIT as usize); // go until the requested stable topoheight for topoheight in (topoheight-PRUNE_SAFETY_LIMIT..=topoheight).rev() { diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs index 221dabb0..8b5954b7 100644 --- a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -98,6 +98,17 @@ impl<'a> StepRequest<'a> { Self::BlocksMetadata(_) => StepKind::BlocksMetadata } } + + pub fn get_requested_topoheight(&self) -> Option { + Some(*match self { + Self::ChainInfo => return None, + Self::Assets(topo, _) => topo, + Self::Keys(topo, _) => topo, + Self::Balances(topo, _, _) => topo, + Self::Nonces(topo, _) => topo, + Self::BlocksMetadata(topo) => topo + }) + } } impl Serializer for StepRequest<'_> { From ce890d8f2ba5019702016ba400dfe558f866bea0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 16:44:22 +0200 Subject: [PATCH 71/74] daemon: fix balances cache --- xelis_daemon/src/core/storage/sled.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 96d9a7cc..87d3df68 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -50,8 +50,7 @@ pub struct SledStorage { hash_at_topo_cache: Option>>, cumulative_difficulty_cache: Option>>, assets_cache: Option>>, - // FIXME !!!!! - balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute + balances_trees_cache: Option>>, // versioned balances tree keep in cache to prevent hash recompute nonces_trees_cache: Option>>, // versioned nonces tree keep in cache to prevent hash recompute tips_cache: Tips, pruned_topoheight: Option @@ -227,12 +226,13 @@ impl SledStorage { trace!("get versioned balance tree for {} at {}", asset, topoheight); let tree = if let Some(cache) = &self.balances_trees_cache { let mut balances = cache.lock().await; - if let Some(tree) = balances.get(&topoheight) { + let cache_key = (asset.clone(), topoheight); + if let Some(tree) = balances.get(&cache_key) { tree.clone() } else { // not found in cache, compute it and insert it let key = self.generate_versioned_balance_key(asset, topoheight)?; let tree = self.db.open_tree(key.as_bytes())?; - balances.put(topoheight, tree.clone()); + balances.put(cache_key, tree.clone()); tree } } else { // no cache found, we have to compute it ourself From 4e3e99da5c860ce693b9c4a364c0b4bc84ef9d95 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 20:28:24 +0200 Subject: [PATCH 72/74] daemon: fix bug, try to respect chain sync countdown --- xelis_daemon/src/core/storage/sled.rs | 1 - xelis_daemon/src/p2p/mod.rs | 76 +++++++++++++++++---------- xelis_daemon/src/p2p/packet/ping.rs | 2 +- xelis_daemon/src/p2p/peer.rs | 2 +- 4 files changed, 50 insertions(+), 31 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 87d3df68..0c252192 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -946,7 +946,6 @@ impl Storage for SledStorage { self.add_block_hash_at_height(hash.clone(), block.get_height()).await?; - if let Some(cache) = &self.blocks_cache { cache.lock().await.put(hash, block); } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 8693581a..1e13fac9 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -7,7 +7,7 @@ pub mod chain_validator; use serde_json::Value; use xelis_common::{ - config::{VERSION, NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT}, + config::{VERSION, NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT, CHAIN_SYNC_TIMEOUT_SECS}, serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, @@ -30,7 +30,7 @@ use tokio::{net::{TcpListener, TcpStream}, sync::mpsc::{self, UnboundedSender, U use log::{info, warn, error, debug, trace}; use tokio::io::AsyncWriteExt; use tokio::time::{interval, timeout, sleep}; -use std::{borrow::Cow, fs, path::Path, sync::atomic::{AtomicBool, Ordering}, collections::HashSet}; +use std::{borrow::Cow, fs, path::Path, sync::atomic::{AtomicBool, Ordering, AtomicU64}, collections::HashSet}; use std::convert::TryInto; use std::net::SocketAddr; use std::time::Duration; @@ -54,7 +54,8 @@ pub struct P2pServer { peer_list: SharedPeerList, // all peers accepted blockchain: Arc>, // reference to the chain to add blocks/txs connections_sender: UnboundedSender, // this sender allows to create a queue system in one task only - syncing: AtomicBool // used to check if we are already syncing with one peer or not + syncing: AtomicBool, // used to check if we are already syncing with one peer or not + last_sync_update: AtomicU64 // used in case of timed out } impl P2pServer { @@ -77,7 +78,8 @@ impl P2pServer { peer_list: PeerList::new(max_peers), blockchain, connections_sender, - syncing: AtomicBool::new(false) + syncing: AtomicBool::new(false), + last_sync_update: AtomicU64::new(0), }; let arc = Arc::new(server); @@ -477,26 +479,34 @@ impl P2pServer { let duration = Duration::from_secs(CHAIN_SYNC_DELAY); loop { sleep(duration).await; - if self.is_syncing() { - trace!("We are already syncing, skipping..."); - continue; - } - let fast_sync = self.blockchain.is_fast_sync_mode_enabled(); - if let Some(peer) = self.select_random_best_peer(fast_sync).await { - trace!("Selected for chain sync is {}", peer); - // check if we can maybe fast sync first - // otherwise, fallback on the normal chain sync - if fast_sync { - if let Err(e) = self.bootstrap_chain(&peer).await { - warn!("Error occured while fast syncing with {}: {}", peer, e); + let time = get_current_time(); + if !self.is_syncing() { + self.last_sync_update.store(time, Ordering::SeqCst); + let fast_sync = self.blockchain.is_fast_sync_mode_enabled(); + if let Some(peer) = self.select_random_best_peer(fast_sync).await { + self.set_syncing(true); + trace!("Selected for chain sync is {}", peer); + // check if we can maybe fast sync first + // otherwise, fallback on the normal chain sync + if fast_sync { + if let Err(e) = self.bootstrap_chain(&peer).await { + warn!("Error occured while fast syncing with {}: {}", peer, e); + } + } else { + if let Err(e) = self.request_sync_chain_for(&peer).await { + debug!("Error occured on chain sync with {}: {}", peer, e); + } } + self.set_syncing(false); } else { - if let Err(e) = self.request_sync_chain_for(&peer).await { - debug!("Error occured on chain sync with {}: {}", peer, e); - } + trace!("No peer found for chain sync"); } } else { - trace!("No peer found for chain sync"); + // its still syncing, verify the timeout + if time - self.last_sync_update.load(Ordering::Acquire) >= CHAIN_SYNC_TIMEOUT_SECS * 5 { + debug!("Chain sync timeout, resetting"); + self.set_syncing(false); + } } } } @@ -813,12 +823,12 @@ impl P2pServer { let request = request.into_owned(); let last_request = peer.get_last_chain_sync(); let time = get_current_time(); - peer.set_last_chain_sync(time); // Node is trying to ask too fast our chain if last_request + CHAIN_SYNC_DELAY > time { debug!("Peer requested sync chain too fast!"); return Err(P2pError::RequestSyncChainTooFast) } + peer.set_last_chain_sync(time); // at least one block necessary (genesis block) if request.size() == 0 || request.size() > CHAIN_SYNC_REQUEST_MAX_BLOCKS { // allows maximum 64 blocks id (2560 bytes max) @@ -875,6 +885,8 @@ impl P2pServer { // start a new task to wait on all requested blocks tokio::spawn(async move { + let time = get_current_time(); + zelf.last_sync_update.store(time, Ordering::SeqCst); zelf.set_syncing(true); if let Err(e) = zelf.handle_chain_response(&peer, response, pop_count).await { error!("Error while handling chain response from {}: {}", peer, e); @@ -1218,6 +1230,10 @@ impl P2pServer { let mut chain_validator = ChainValidator::new(); for hash in blocks { trace!("Request block header for chain validator: {}", hash); + { + let time = get_current_time(); + self.last_sync_update.store(time, Ordering::SeqCst); + } let response = peer.request_blocking_object(ObjectRequest::BlockHeader(hash)).await?; if let OwnedObjectResponse::BlockHeader(header, hash) = response { trace!("Received {} with hash {}", header, hash); @@ -1238,6 +1254,10 @@ impl P2pServer { if !self.blockchain.has_block(&hash).await? { let mut transactions = Vec::new(); // don't pre allocate for tx_hash in header.get_txs_hashes() { + { + let time = get_current_time(); + self.last_sync_update.store(time, Ordering::SeqCst); + } let response = peer.request_blocking_object(ObjectRequest::Transaction(Hash::max())).await?; if let OwnedObjectResponse::Transaction(tx, _) = response { trace!("Received transaction {} at block {} from {}", tx_hash, hash, peer); @@ -1253,8 +1273,6 @@ impl P2pServer { } } } - - } else { // no rewind are needed, process normally // it will first add blocks to sync, and then all alt-tips blocks if any (top blocks) @@ -1618,11 +1636,13 @@ impl P2pServer { let mut txs = Vec::with_capacity(header.get_txs_hashes().len()); for tx_hash in header.get_txs_hashes() { - let OwnedObjectResponse::Transaction(tx, _) = peer.request_blocking_object(ObjectRequest::Transaction(tx_hash.clone())).await? else { - error!("Received an invalid requested object while fetching block transaction {}", tx_hash); - return Err(P2pError::InvalidPacket.into()) - }; - txs.push(Immutable::Owned(tx)); + if !storage.has_transaction(tx_hash).await? { + let OwnedObjectResponse::Transaction(tx, _) = peer.request_blocking_object(ObjectRequest::Transaction(tx_hash.clone())).await? else { + error!("Received an invalid requested object while fetching block transaction {}", tx_hash); + return Err(P2pError::InvalidPacket.into()) + }; + txs.push(Immutable::Owned(tx)); + } } // link its TX to the block diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 4ac13430..4c777d30 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -136,6 +136,6 @@ impl Serializer for Ping<'_> { impl Display for Ping<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Ping[top_hash: {}, topoheight: {}, height: {}, peers length: {}]", self.top_hash, self.topoheight, self.height, self.peer_list.len()) + write!(f, "Ping[top_hash: {}, topoheight: {}, height: {}, pruned topoheight: {:?}, peers length: {}]", self.top_hash, self.topoheight, self.height, self.pruned_topoheight, self.peer_list.len()) } } \ No newline at end of file diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 9a378941..ab26ec15 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -149,7 +149,7 @@ impl Peer { pub fn set_pruned_topoheight(&self, pruned_topoheight: Option) { if let Some(pruned_topoheight) = pruned_topoheight { self.is_pruned.store(true, Ordering::Release); - self.height.store(pruned_topoheight, Ordering::Release); + self.pruned_topoheight.store(pruned_topoheight, Ordering::Release); } else { self.is_pruned.store(false, Ordering::Release); } From df3a1a3282079ba8a2733c10a41dec6c723500a9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 20 May 2023 23:33:40 +0200 Subject: [PATCH 73/74] daemon: small fix --- xelis_daemon/src/p2p/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1e13fac9..21cea73d 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -481,9 +481,9 @@ impl P2pServer { sleep(duration).await; let time = get_current_time(); if !self.is_syncing() { - self.last_sync_update.store(time, Ordering::SeqCst); let fast_sync = self.blockchain.is_fast_sync_mode_enabled(); if let Some(peer) = self.select_random_best_peer(fast_sync).await { + self.last_sync_update.store(time, Ordering::SeqCst); self.set_syncing(true); trace!("Selected for chain sync is {}", peer); // check if we can maybe fast sync first @@ -825,7 +825,7 @@ impl P2pServer { let time = get_current_time(); // Node is trying to ask too fast our chain if last_request + CHAIN_SYNC_DELAY > time { - debug!("Peer requested sync chain too fast!"); + debug!("{} requested sync chain too fast!", peer); return Err(P2pError::RequestSyncChainTooFast) } peer.set_last_chain_sync(time); From bdb2ccf2976a9724e502f0b0fd8ed692feb5bcd8 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 21 May 2023 00:03:39 +0200 Subject: [PATCH 74/74] update version to 1.4.0 --- Cargo.lock | 6 +++--- xelis_common/Cargo.toml | 2 +- xelis_daemon/Cargo.toml | 2 +- xelis_wallet/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 677824e3..0864d785 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2549,7 +2549,7 @@ dependencies = [ [[package]] name = "xelis_common" -version = "1.3.0" +version = "1.4.0" dependencies = [ "actix-rt", "actix-web", @@ -2577,7 +2577,7 @@ dependencies = [ [[package]] name = "xelis_daemon" -version = "1.3.0" +version = "1.4.0" dependencies = [ "actix", "actix-web", @@ -2623,7 +2623,7 @@ dependencies = [ [[package]] name = "xelis_wallet" -version = "1.3.0" +version = "1.4.0" dependencies = [ "actix", "actix-web", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index 96b99b63..9a74cbbf 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_common" -version = "1.3.0" +version = "1.4.0" edition = "2021" authors = ["Slixe "] diff --git a/xelis_daemon/Cargo.toml b/xelis_daemon/Cargo.toml index f528d74c..4e29bd94 100644 --- a/xelis_daemon/Cargo.toml +++ b/xelis_daemon/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_daemon" -version = "1.3.0" +version = "1.4.0" edition = "2021" authors = ["Slixe "] diff --git a/xelis_wallet/Cargo.toml b/xelis_wallet/Cargo.toml index 621a981a..c5a8218e 100644 --- a/xelis_wallet/Cargo.toml +++ b/xelis_wallet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "xelis_wallet" -version = "1.3.0" +version = "1.4.0" edition = "2021" authors = ["Slixe "]