From 782070114abf8c762ef56730c7d1568cc728c762 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 30 Aug 2023 23:30:59 +0200 Subject: [PATCH 001/160] common: fix prompt bug in Windows (crossterm-rs/crossterm#772) --- xelis_common/src/prompt/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 5e773720..f72d6141 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -11,7 +11,7 @@ use std::io::{Write, stdout, Error as IOError}; use std::num::ParseFloatError; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering, AtomicUsize}; -use crossterm::event::{self, Event, KeyCode, KeyModifiers}; +use crossterm::event::{self, Event, KeyCode, KeyModifiers, KeyEventKind}; use crossterm::terminal; use fern::colors::{ColoredLevelConfig, Color}; use tokio::sync::mpsc::{self, UnboundedSender, UnboundedReceiver}; @@ -157,6 +157,11 @@ impl State { buffer.push_str(&s); } Event::Key(key) => { + // Windows bug - https://github.com/crossterm-rs/crossterm/issues/772 + if key.kind != KeyEventKind::Press { + continue; + } + match key.code { KeyCode::Up => { let mut buffer = self.user_input.lock()?; From d67451cb05b4369f5de9a66af9df653403c8c6a3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 2 Sep 2023 22:41:25 +0200 Subject: [PATCH 002/160] daemon: verify signature --- xelis_daemon/src/core/blockchain.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index a634ce44..539edb8c 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1765,9 +1765,14 @@ impl Blockchain { // verify the transaction and returns fees available // nonces allow us to support multiples tx from same owner in the same block - // txs must be sorted in ascending order based on account nonce + // txs must be sorted in ascending order based on account nonce async fn verify_transaction_with_hash<'a>(&self, storage: &S, tx: &'a Transaction, hash: &Hash, balances: &mut HashMap<&'a PublicKey, HashMap<&'a Hash, u64>>, nonces: Option<&mut HashMap<&'a PublicKey, u64>>, skip_nonces: bool) -> Result<(), BlockchainError> { trace!("Verify transaction with hash {}", hash); + + if !tx.verify_signature() { + return Err(BlockchainError::InvalidTransactionSignature) + } + let owner_balances: &mut HashMap<&'a Hash, u64> = balances.entry(tx.get_owner()).or_insert_with(HashMap::new); { let balance = match owner_balances.entry(&XELIS_ASSET) { From 766aed1043b173314fb02e45c9af67736ee2bd67 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 3 Sep 2023 23:15:08 +0200 Subject: [PATCH 003/160] wallet: support events listeners in XSWD --- xelis_common/src/api/mod.rs | 2 +- xelis_common/src/api/wallet.rs | 13 +++ .../src/rpc_server/websocket/handler.rs | 2 +- xelis_daemon/src/core/blockchain.rs | 4 +- xelis_daemon/src/rpc/mod.rs | 4 +- xelis_wallet/src/api/xswd.rs | 80 ++++++++++++++++--- 6 files changed, 88 insertions(+), 17 deletions(-) diff --git a/xelis_common/src/api/mod.rs b/xelis_common/src/api/mod.rs index bf3a5130..a8520ae1 100644 --- a/xelis_common/src/api/mod.rs +++ b/xelis_common/src/api/mod.rs @@ -188,7 +188,7 @@ impl Serializer for DataValue { } } -#[derive(Deserialize)] +#[derive(Serialize, Deserialize)] pub struct SubscribeParams { pub notify: E } diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 14542fa5..e2e9073e 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -58,4 +58,17 @@ pub struct GetBalanceParams { #[derive(Serialize, Deserialize)] pub struct GetTransactionParams { pub hash: Hash +} + +#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub enum NotifyEvent { + // When a new block is detected by wallet + // it contains Block struct as value + NewBlock, + // When a new asset is added to wallet + // Contains a Hash as value + NewAsset, + // When a new transaction is added to wallet + // Contains TransactionEntry struct as value + NewTransaction, } \ No newline at end of file diff --git a/xelis_common/src/rpc_server/websocket/handler.rs b/xelis_common/src/rpc_server/websocket/handler.rs index b5b084b6..1bb40358 100644 --- a/xelis_common/src/rpc_server/websocket/handler.rs +++ b/xelis_common/src/rpc_server/websocket/handler.rs @@ -30,7 +30,7 @@ where HashSet::from_iter(sessions.values().map(|e| e.keys().cloned()).flatten()) } - pub async fn is_tracking_event(&self, event: &E) -> bool { + pub async fn is_event_tracked(&self, event: &E) -> bool { let sessions = self.sessions.lock().await; sessions .values() diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 539edb8c..a4fada9f 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -961,7 +961,7 @@ impl Blockchain { // broadcast to websocket this tx if let Some(rpc) = self.rpc.lock().await.as_ref() { - if rpc.is_tracking_event(&NotifyEvent::TransactionAddedInMempool).await { + if rpc.is_event_tracked(&NotifyEvent::TransactionAddedInMempool).await { let rpc = rpc.clone(); tokio::spawn(async move { let data: DataHash<'_, Arc> = DataHash { hash: Cow::Owned(hash), data: Cow::Owned(tx) }; @@ -1742,7 +1742,7 @@ impl Blockchain { if let Some(rpc) = self.rpc.lock().await.as_ref() { let previous_stable_height = self.get_stable_height(); if height != previous_stable_height { - if rpc.is_tracking_event(&NotifyEvent::StableHeightChanged).await { + if rpc.is_event_tracked(&NotifyEvent::StableHeightChanged).await { let rpc = rpc.clone(); tokio::spawn(async move { let event = json!(StableHeightChangedEvent { diff --git a/xelis_daemon/src/rpc/mod.rs b/xelis_daemon/src/rpc/mod.rs index 21c3cb64..83922a60 100644 --- a/xelis_daemon/src/rpc/mod.rs +++ b/xelis_daemon/src/rpc/mod.rs @@ -99,8 +99,8 @@ impl DaemonRpcServer { self.get_websocket().get_handler().get_tracked_events().await } - pub async fn is_tracking_event(&self, event: &NotifyEvent) -> bool { - self.get_websocket().get_handler().is_tracking_event(event).await + pub async fn is_event_tracked(&self, event: &NotifyEvent) -> bool { + self.get_websocket().get_handler().is_event_tracked(event).await } pub async fn notify_clients(&self, event: &NotifyEvent, value: Value) -> Result<(), anyhow::Error> { diff --git a/xelis_wallet/src/api/xswd.rs b/xelis_wallet/src/api/xswd.rs index df31cf24..91020c95 100644 --- a/xelis_wallet/src/api/xswd.rs +++ b/xelis_wallet/src/api/xswd.rs @@ -4,7 +4,7 @@ use actix_web::{get, web::{Data, Payload}, HttpRequest, Responder, HttpServer, A use log::{info, error}; use serde_json::{Value, json}; use tokio::sync::Mutex; -use xelis_common::{rpc_server::{RPCHandler, websocket::{WebSocketHandler, WebSocketSessionShared, WebSocketServer}, RpcRequest, RpcResponseError, InternalRpcError}, crypto::{key::{Signature, SIGNATURE_LENGTH}, hash::hash}, serializer::{Serializer, ReaderError, Reader, Writer}}; +use xelis_common::{rpc_server::{RPCHandler, websocket::{WebSocketHandler, WebSocketSessionShared, WebSocketServer}, RpcRequest, RpcResponseError, InternalRpcError}, crypto::{key::{Signature, SIGNATURE_LENGTH}, hash::hash}, serializer::{Serializer, ReaderError, Reader, Writer}, api::wallet::NotifyEvent}; use serde::{Deserialize, Serialize}; use crate::{wallet::Wallet, config::XSWD_BIND_ADDRESS}; @@ -201,15 +201,20 @@ impl PermissionResult { } pub struct XSWDWebSocketHandler { + // RPC handler for methods handler: RPCHandler>, - applications: Mutex, ApplicationData>> + // All applications connected to the wallet + applications: Mutex, ApplicationData>>, + // Applications listening for events + listeners: Mutex, HashMap>>> } impl XSWDWebSocketHandler { pub fn new(handler: RPCHandler>) -> Self { Self { handler, - applications: Mutex::new(HashMap::new()) + applications: Mutex::new(HashMap::new()), + listeners: Mutex::new(HashMap::new()) } } @@ -221,7 +226,7 @@ impl XSWDWebSocketHandler { async fn verify_permission_for_request(&self, app: &mut ApplicationData, request: &RpcRequest) -> Result<(), RpcResponseError> { if !self.handler.has_method(&request.method) { - return Err(RpcResponseError::new(request.id.clone(), InternalRpcError::Custom(format!("Method {} was not found", request.method)))) + return Err(RpcResponseError::new(request.id, InternalRpcError::Custom(format!("Method {} was not found", request.method)))) } let permission = app.permissions.get(&request.method).map(|v| *v).unwrap_or(Permission::Ask); @@ -230,25 +235,25 @@ impl XSWDWebSocketHandler { Permission::Ask => { let result = self.handler.get_data() .request_permission(app, PermissionRequest::Request(request)).await - .map_err(|msg| RpcResponseError::new(request.id.clone(), InternalRpcError::Custom(msg.to_string())))?; + .map_err(|msg| RpcResponseError::new(request.id, InternalRpcError::Custom(msg.to_string())))?; match result { PermissionResult::Allow => Ok(()), - PermissionResult::Deny => Err(RpcResponseError::new(request.id.clone(), PERMISSION_DENIED_ERROR)), + PermissionResult::Deny => Err(RpcResponseError::new(request.id, PERMISSION_DENIED_ERROR)), PermissionResult::AlwaysAllow => { app.permissions.insert(request.method.clone(), Permission::AcceptAlways); Ok(()) }, PermissionResult::AlwaysDeny => { app.permissions.insert(request.method.clone(), Permission::AcceptAlways); - Err(RpcResponseError::new(request.id.clone(), PERMISSION_DENIED_ERROR)) + Err(RpcResponseError::new(request.id, PERMISSION_DENIED_ERROR)) } } } // User has already accepted this method Permission::AcceptAlways => Ok(()), // User has denied access to this method - Permission::DenyAlways => Err(RpcResponseError::new(request.id.clone(), PERMISSION_DENIED_ERROR)) + Permission::DenyAlways => Err(RpcResponseError::new(request.id, PERMISSION_DENIED_ERROR)) } } @@ -323,19 +328,69 @@ impl XSWDWebSocketHandler { })) } + // register a new event listener for the specified connection/application + async fn subscribe_session_to_event(&self, session: &WebSocketSessionShared, event: NotifyEvent, id: Option) -> Result<(), RpcResponseError> { + let mut listeners = self.listeners.lock().await; + let events = listeners.entry(session.clone()).or_insert_with(HashMap::new); + + if events.contains_key(&event) { + return Err(RpcResponseError::new(id, InternalRpcError::EventAlreadySubscribed)); + } + + events.insert(event, id); + + Ok(()) + } + + // unregister an event listener for the specified connection/application + async fn unsubscribe_session_from_event(&self, session: &WebSocketSessionShared, event: NotifyEvent, id: Option) -> Result<(), RpcResponseError> { + let mut listeners = self.listeners.lock().await; + let events = listeners.get_mut(session).ok_or_else(|| RpcResponseError::new(id, InternalRpcError::EventNotSubscribed))?; + + if events.remove(&event).is_none() { + return Err(RpcResponseError::new(id, InternalRpcError::EventNotSubscribed)); + } + + Ok(()) + } + async fn on_message_internal(&self, session: &WebSocketSessionShared, message: &[u8]) -> Result, RpcResponseError> { let mut applications = self.applications.lock().await; // Application is already registered, verify permission and call the method if let Some(app) = applications.get_mut(session) { let request: RpcRequest = self.handler.parse_request(message)?; + + // Verify first if the method exist (and that its not a built-in one) + let is_subscribe = request.method == "subscribe"; + let is_unsubscribe = request.method == "unsubscribe"; + + if !self.handler.has_method(&request.method) && !is_subscribe && !is_unsubscribe { + return Err(RpcResponseError::new(request.id, InternalRpcError::MethodNotFound(request.method))) + } + + // let's check the permission set by user for this method self.verify_permission_for_request(app, &request).await?; - self.handler.execute_method(request).await.map(|v| Some(v)) + if is_subscribe || is_unsubscribe { + // retrieve the event variant + let event = serde_json::from_value( + request.params.ok_or_else(|| RpcResponseError::new(request.id, InternalRpcError::ExpectedParams))?) + .map_err(|e| RpcResponseError::new(request.id, InternalRpcError::InvalidParams(e)) + )?; + if is_subscribe { + self.subscribe_session_to_event(session, event, request.id).await.map(|_| None) + } else { + self.unsubscribe_session_from_event(session, event, request.id).await.map(|_| None) + } + } else { + // Call the method + self.handler.execute_method(request).await.map(|v| Some(v)) + } } else { // Application is not registered, register it - match self.add_application(&mut applications, session, message).await.map(|v| Some(v)) { - Ok(v) => Ok(v), + match self.add_application(&mut applications, session, message).await { + Ok(v) => Ok(Some(v)), Err(e) => { // Send error message and then close the session if let Err(e) = session.send_text(&e.to_json().to_string()).await { @@ -358,6 +413,9 @@ impl WebSocketHandler for XSWDWebSocketHandler { info!("Application {} has disconnected", app.name); } + let mut listeners = self.listeners.lock().await; + listeners.remove(session); + Ok(()) } From 07d4a06fa80454568143e015cf5ce223382a9a86 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 4 Sep 2023 12:56:05 +0200 Subject: [PATCH 004/160] wallet: don't verify twice method in XSWD --- xelis_wallet/src/api/xswd.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/xelis_wallet/src/api/xswd.rs b/xelis_wallet/src/api/xswd.rs index 91020c95..6f1b6cb9 100644 --- a/xelis_wallet/src/api/xswd.rs +++ b/xelis_wallet/src/api/xswd.rs @@ -141,7 +141,7 @@ impl XSWD { }) } - pub fn get_applications(&self) -> &XSWDWebSocketHandler { + pub fn get_handler(&self) -> &XSWDWebSocketHandler { self.websocket.get_handler() } @@ -225,10 +225,6 @@ impl XSWDWebSocketHandler { } async fn verify_permission_for_request(&self, app: &mut ApplicationData, request: &RpcRequest) -> Result<(), RpcResponseError> { - if !self.handler.has_method(&request.method) { - return Err(RpcResponseError::new(request.id, InternalRpcError::Custom(format!("Method {} was not found", request.method)))) - } - let permission = app.permissions.get(&request.method).map(|v| *v).unwrap_or(Permission::Ask); match permission { // Request permission from user From 52f442a373ae60779cc618d4d3863d53425f8a07 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 4 Sep 2023 17:10:21 +0200 Subject: [PATCH 005/160] wallet: generic XSWD implementation --- xelis_wallet/src/api/mod.rs | 18 +++-- xelis_wallet/src/api/xswd.rs | 109 ++++++++++++++++++++++------- xelis_wallet/src/wallet.rs | 132 +++++++++++++++++++---------------- 3 files changed, 169 insertions(+), 90 deletions(-) diff --git a/xelis_wallet/src/api/mod.rs b/xelis_wallet/src/api/mod.rs index ceac1032..920c716d 100644 --- a/xelis_wallet/src/api/mod.rs +++ b/xelis_wallet/src/api/mod.rs @@ -8,16 +8,24 @@ pub use self::{ XSWD, ApplicationData, PermissionResult, - PermissionRequest - } + PermissionRequest, + XSWDPermissionHandler + }, + rpc::register_methods as register_rpc_methods }; -pub enum APIServer { +pub enum APIServer +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ RPCServer(WalletRpcServerShared), - XSWD(XSWD) + XSWD(XSWD) } -impl APIServer { +impl APIServer +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ pub async fn stop(self) { match self { APIServer::RPCServer(server) => { diff --git a/xelis_wallet/src/api/xswd.rs b/xelis_wallet/src/api/xswd.rs index 6f1b6cb9..b597b2ec 100644 --- a/xelis_wallet/src/api/xswd.rs +++ b/xelis_wallet/src/api/xswd.rs @@ -1,14 +1,13 @@ -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, HashSet}, borrow::Cow}; +use anyhow::Error; use async_trait::async_trait; -use actix_web::{get, web::{Data, Payload}, HttpRequest, Responder, HttpServer, App, dev::ServerHandle, HttpResponse}; -use log::{info, error}; +use actix_web::{get, web::{Data, Payload, self}, HttpRequest, Responder, HttpServer, App, dev::ServerHandle, HttpResponse}; +use log::{info, error, debug}; use serde_json::{Value, json}; use tokio::sync::Mutex; -use xelis_common::{rpc_server::{RPCHandler, websocket::{WebSocketHandler, WebSocketSessionShared, WebSocketServer}, RpcRequest, RpcResponseError, InternalRpcError}, crypto::{key::{Signature, SIGNATURE_LENGTH}, hash::hash}, serializer::{Serializer, ReaderError, Reader, Writer}, api::wallet::NotifyEvent}; +use xelis_common::{rpc_server::{RPCHandler, websocket::{WebSocketHandler, WebSocketSessionShared, WebSocketServer}, RpcRequest, RpcResponseError, InternalRpcError, RpcResponse}, crypto::{key::{Signature, SIGNATURE_LENGTH, PublicKey}, hash::hash}, serializer::{Serializer, ReaderError, Reader, Writer}, api::{wallet::NotifyEvent, EventResult}}; use serde::{Deserialize, Serialize}; -use crate::{wallet::Wallet, config::XSWD_BIND_ADDRESS}; - -use super::rpc; +use crate::config::XSWD_BIND_ADDRESS; // XSWD Protocol (XELIS Secure WebSocket DApp) // is a way to communicate with the XELIS Wallet @@ -25,11 +24,22 @@ use super::rpc; // For security reasons, in case the signed token leaks, at each connection, // the wallet will request the authorization of the user // but will keep already-configured permissions. -pub struct XSWD { - websocket: Arc>, +pub struct XSWD +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ + websocket: Arc>>, handle: ServerHandle } +#[async_trait] +pub trait XSWDPermissionHandler { + // Handler function to request permission to user + async fn request_permission(&self, app_data: &ApplicationData, request: PermissionRequest<'_>) -> Result; + // Public key to use to verify the signature + async fn get_public_key(&self) -> Result<&PublicKey, Error>; +} + #[derive(Serialize, Deserialize, Debug)] pub struct ApplicationData { // Application ID in hexadecimal format @@ -111,20 +121,20 @@ impl Serializer for ApplicationData { const PERMISSION_DENIED_ERROR: InternalRpcError = InternalRpcError::CustomStr("Permission denied"); -impl XSWD { - pub fn new(wallet: Arc) -> Result { +impl XSWD +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ + pub fn new(rpc_handler: RPCHandler) -> Result { info!("Starting XSWD Server..."); - let mut rpc_handler = RPCHandler::new(wallet); - rpc::register_methods(&mut rpc_handler); - let websocket = WebSocketServer::new(XSWDWebSocketHandler::new(rpc_handler)); let cloned_websocket = websocket.clone(); let http_server = HttpServer::new(move || { let server = Arc::clone(&cloned_websocket); App::new() .app_data(Data::from(server)) - .service(endpoint) .service(index) + .route("/xswd", web::get().to(endpoint::)) }) .disable_signals() .bind(&XSWD_BIND_ADDRESS)? @@ -141,7 +151,7 @@ impl XSWD { }) } - pub fn get_handler(&self) -> &XSWDWebSocketHandler { + pub fn get_handler(&self) -> &XSWDWebSocketHandler { self.websocket.get_handler() } @@ -200,17 +210,23 @@ impl PermissionResult { } } -pub struct XSWDWebSocketHandler { +pub struct XSWDWebSocketHandler +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ // RPC handler for methods - handler: RPCHandler>, + handler: RPCHandler, // All applications connected to the wallet applications: Mutex, ApplicationData>>, // Applications listening for events - listeners: Mutex, HashMap>>> + listeners: Mutex, HashMap>>>, } -impl XSWDWebSocketHandler { - pub fn new(handler: RPCHandler>) -> Self { +impl XSWDWebSocketHandler +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ + pub fn new(handler: RPCHandler) -> Self { Self { handler, applications: Mutex::new(HashMap::new()), @@ -224,6 +240,38 @@ impl XSWDWebSocketHandler { &self.applications } + // get a HashSet of all events tracked + pub async fn get_tracked_events(&self) -> HashSet { + let sessions = self.listeners.lock().await; + HashSet::from_iter(sessions.values().map(|e| e.keys().cloned()).flatten()) + } + + // verify if a event is tracked by XSWD + pub async fn is_event_tracked(&self, event: &NotifyEvent) -> bool { + let sessions = self.listeners.lock().await; + sessions + .values() + .find(|e| e.keys().into_iter().find(|x| *x == event).is_some()) + .is_some() + } + + // notify a new event to all connected WebSocket + pub async fn notify_event(&self, event: &NotifyEvent, value: Value) { + let value = json!(EventResult { event: Cow::Borrowed(event), value }); + let sessions = self.listeners.lock().await; + for (session, subscriptions) in sessions.iter() { + if let Some(id) = subscriptions.get(event) { + let response = json!(RpcResponse::new(Cow::Borrowed(&id), Cow::Borrowed(&value))); + let session = session.clone(); + tokio::spawn(async move { + if let Err(e) = session.send_text(response.to_string()).await { + debug!("Error occured while notifying a new event: {}", e); + }; + }); + } + } + } + async fn verify_permission_for_request(&self, app: &mut ApplicationData, request: &RpcRequest) -> Result<(), RpcResponseError> { let permission = app.permissions.get(&request.method).map(|v| *v).unwrap_or(Permission::Ask); match permission { @@ -290,11 +338,15 @@ impl XSWDWebSocketHandler { } let wallet = self.handler.get_data(); - // verify signature of application data + // verify signature of application data TODO if let Some(signature) = &app_data.signature { let bytes = app_data.to_bytes(); let bytes = &bytes[0..bytes.len() - SIGNATURE_LENGTH]; // remove signature bytes for verification - let key = wallet.get_public_key(); + let key = wallet.get_public_key().await + .map_err(|e| { + error!("error while retrieving public key: {}", e); + RpcResponseError::new(None, InternalRpcError::CustomStr("Error while retrieving public key")) + })?; if !key.verify_signature(&hash(&bytes), signature) { return Err(RpcResponseError::new(None, InternalRpcError::CustomStr("Invalid signature for application data"))); @@ -402,7 +454,10 @@ impl XSWDWebSocketHandler { } #[async_trait] -impl WebSocketHandler for XSWDWebSocketHandler { +impl WebSocketHandler for XSWDWebSocketHandler +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ async fn on_close(&self, session: &WebSocketSessionShared) -> Result<(), anyhow::Error> { let mut applications = self.applications.lock().await; if let Some(app) = applications.remove(session) { @@ -434,8 +489,10 @@ async fn index() -> Result { Ok(HttpResponse::Ok().body("XSWD is running !")) } -#[get("/xswd")] -async fn endpoint(server: Data>, request: HttpRequest, body: Payload) -> Result { +async fn endpoint(server: Data>>, request: HttpRequest, body: Payload) -> Result +where + W: Clone + Send + Sync + XSWDPermissionHandler + 'static +{ let response = server.handle_connection(request, body).await?; Ok(response) } \ No newline at end of file diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index c4410bde..8cbb2701 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -25,19 +25,22 @@ use thiserror::Error; use log::{error, debug}; #[cfg(feature = "api_server")] -use crate::api::{ - XSWD, - WalletRpcServer, - AuthConfig, - APIServer, - ApplicationData, - PermissionResult, - PermissionRequest +use { + async_trait::async_trait, + crate::api::{ + register_rpc_methods, + XSWD, + WalletRpcServer, + AuthConfig, + APIServer, + ApplicationData, + PermissionResult, + PermissionRequest, + XSWDPermissionHandler + }, + xelis_common::prompt::ShareablePrompt }; -#[cfg(feature = "api_server")] -use xelis_common::prompt::ShareablePrompt; - #[derive(Error, Debug)] pub enum WalletError { #[error("Invalid key pair")] @@ -106,7 +109,7 @@ pub struct Wallet { network: Network, // RPC Server #[cfg(feature = "api_server")] - api_server: Mutex>, + api_server: Mutex>>>, // Prompt for CLI // Only used for requesting permissions through it #[cfg(feature = "api_server")] @@ -251,58 +254,17 @@ impl Wallet { #[cfg(feature = "api_server")] pub async fn enable_xswd(self: &Arc) -> Result<(), Error> { + use xelis_common::rpc_server::RPCHandler; + let mut lock = self.api_server.lock().await; if lock.is_some() { return Err(WalletError::RPCServerAlreadyRunning.into()) } - *lock = Some(APIServer::XSWD(XSWD::new(self.clone())?)); - Ok(()) - } - - #[cfg(feature = "api_server")] - pub async fn request_permission(&self, app_data: &ApplicationData, request: PermissionRequest<'_>) -> Result { - if let Some(prompt) = self.prompt.lock().await.as_ref() { - match request { - PermissionRequest::Application(signed) => { - let mut message = format!("XSWD: Allow application {} ({}) to access your wallet\r\n(Y/N): ", app_data.get_name(), app_data.get_id()); - if signed { - message = "NOTE: Application authorizaion was already approved previously.\r\n".to_string() + &message; - } - let accepted = prompt.read_valid_str_value(message, vec!["y", "n"]).await? == "y"; - if accepted { - Ok(PermissionResult::Allow) - } else { - Ok(PermissionResult::Deny) - } - }, - PermissionRequest::Request(request) => { - let params = if let Some(params) = &request.params { - params.to_string() - } else { - "".to_string() - }; - - let message = format!( - "XSWD: Request from {}: {}\r\nParams: {}\r\nDo you want to allow this request ?\r\n([A]llow / [D]eny / [AA] Always Allow / [AD] Always Deny): ", - app_data.get_name(), - request.method, - params - ); - - let answer = prompt.read_valid_str_value(message, vec!["a", "d", "aa", "ad"]).await?; - Ok(match answer.as_str() { - "a" => PermissionResult::Allow, - "d" => PermissionResult::Deny, - "aa" => PermissionResult::AlwaysAllow, - "ad" => PermissionResult::AlwaysDeny, - _ => unreachable!() - }) - } - } - } else { - Err(WalletError::NoHandlerAvailable.into()) - } + let mut rpc_handler = RPCHandler::new(self.clone()); + register_rpc_methods(&mut rpc_handler); + *lock = Some(APIServer::XSWD(XSWD::new(rpc_handler)?)); + Ok(()) } #[cfg(feature = "api_server")] @@ -553,4 +515,56 @@ impl Wallet { pub fn get_network(&self) -> &Network { &self.network } +} + +#[cfg(feature = "api_server")] +#[async_trait] +impl XSWDPermissionHandler for Arc { + async fn request_permission(&self, app_data: &ApplicationData, request: PermissionRequest<'_>) -> Result { + if let Some(prompt) = self.prompt.lock().await.as_ref() { + match request { + PermissionRequest::Application(signed) => { + let mut message = format!("XSWD: Allow application {} ({}) to access your wallet\r\n(Y/N): ", app_data.get_name(), app_data.get_id()); + if signed { + message = "NOTE: Application authorizaion was already approved previously.\r\n".to_string() + &message; + } + let accepted = prompt.read_valid_str_value(message, vec!["y", "n"]).await? == "y"; + if accepted { + Ok(PermissionResult::Allow) + } else { + Ok(PermissionResult::Deny) + } + }, + PermissionRequest::Request(request) => { + let params = if let Some(params) = &request.params { + params.to_string() + } else { + "".to_string() + }; + + let message = format!( + "XSWD: Request from {}: {}\r\nParams: {}\r\nDo you want to allow this request ?\r\n([A]llow / [D]eny / [AA] Always Allow / [AD] Always Deny): ", + app_data.get_name(), + request.method, + params + ); + + let answer = prompt.read_valid_str_value(message, vec!["a", "d", "aa", "ad"]).await?; + Ok(match answer.as_str() { + "a" => PermissionResult::Allow, + "d" => PermissionResult::Deny, + "aa" => PermissionResult::AlwaysAllow, + "ad" => PermissionResult::AlwaysDeny, + _ => unreachable!() + }) + } + } + } else { + Err(WalletError::NoHandlerAvailable.into()) + } + } + + async fn get_public_key(&self) -> Result<&PublicKey, Error> { + Ok((self as &Wallet).get_public_key()) + } } \ No newline at end of file From 226218686dbfa4514d1a9ddcd3beaedef00b194f Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 4 Sep 2023 17:27:17 +0200 Subject: [PATCH 006/160] globals -> utils --- xelis_common/src/crypto/key.rs | 2 +- xelis_common/src/lib.rs | 2 +- xelis_common/src/{globals.rs => utils.rs} | 0 xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/main.rs | 2 +- xelis_daemon/src/p2p/connection.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 2 +- xelis_daemon/src/p2p/packet/peer_disconnected.rs | 2 +- xelis_daemon/src/p2p/packet/ping.rs | 2 +- xelis_daemon/src/p2p/peer.rs | 2 +- xelis_daemon/src/p2p/peer_list.rs | 2 +- xelis_daemon/src/rpc/getwork_server.rs | 2 +- xelis_miner/src/main.rs | 2 +- xelis_wallet/src/entry.rs | 2 +- xelis_wallet/src/main.rs | 2 +- xelis_wallet/src/transaction_builder.rs | 2 +- xelis_wallet/src/wallet.rs | 2 +- 17 files changed, 16 insertions(+), 16 deletions(-) rename xelis_common/src/{globals.rs => utils.rs} (100%) diff --git a/xelis_common/src/crypto/key.rs b/xelis_common/src/crypto/key.rs index 2c92ba47..c97a36f5 100644 --- a/xelis_common/src/crypto/key.rs +++ b/xelis_common/src/crypto/key.rs @@ -1,5 +1,5 @@ use crate::api::DataElement; -use crate::globals::get_network; +use crate::utils::get_network; use crate::serializer::{Reader, ReaderError, Serializer, Writer}; use super::address::{Address, AddressType}; use super::hash::Hash; diff --git a/xelis_common/src/lib.rs b/xelis_common/src/lib.rs index 88e0a54d..79f3b0cd 100644 --- a/xelis_common/src/lib.rs +++ b/xelis_common/src/lib.rs @@ -5,7 +5,7 @@ pub mod block; pub mod account; pub mod api; -pub mod globals; +pub mod utils; pub mod config; pub mod immutable; pub mod difficulty; diff --git a/xelis_common/src/globals.rs b/xelis_common/src/utils.rs similarity index 100% rename from xelis_common/src/globals.rs rename to xelis_common/src/utils.rs diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index a4fada9f..cebd30de 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -6,7 +6,7 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hashable, Hash}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, - globals::{get_current_timestamp, format_coin}, + utils::{get_current_timestamp, format_coin}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 5be5f7fb..55e6bc23 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -9,7 +9,7 @@ use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; use xelis_common::{ prompt::{Prompt, command::{CommandManager, CommandError, Command, CommandHandler}, PromptError, argument::{ArgumentManager, Arg, ArgType}, LogLevel, self, ShareablePrompt}, - config::{VERSION, BLOCK_TIME}, globals::{format_hashrate, set_network_to}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer + config::{VERSION, BLOCK_TIME}, utils::{format_hashrate, set_network_to}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; use crate::core::{ blockchain::{Config, Blockchain}, diff --git a/xelis_daemon/src/p2p/connection.rs b/xelis_daemon/src/p2p/connection.rs index d20f39f2..d6ab5fd2 100644 --- a/xelis_daemon/src/p2p/connection.rs +++ b/xelis_daemon/src/p2p/connection.rs @@ -8,7 +8,7 @@ use humantime::format_duration; use tokio::net::TcpStream; use tokio::net::tcp::{OwnedWriteHalf, OwnedReadHalf}; use xelis_common::{ - globals::get_current_time, + utils::get_current_time, serializer::{Reader, Serializer}, }; use std::fmt::{Display, Error, Formatter}; diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 659d9a39..e182f047 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -12,7 +12,7 @@ use xelis_common::{ serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, - globals::get_current_time, immutable::Immutable, account::VersionedNonce + utils::get_current_time, immutable::Immutable, account::VersionedNonce }; use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}, tracker::WaiterResponse}}; use crate::core::error::BlockchainError; diff --git a/xelis_daemon/src/p2p/packet/peer_disconnected.rs b/xelis_daemon/src/p2p/packet/peer_disconnected.rs index 53d130b3..32737cbe 100644 --- a/xelis_daemon/src/p2p/packet/peer_disconnected.rs +++ b/xelis_daemon/src/p2p/packet/peer_disconnected.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; -use xelis_common::{serializer::{Serializer, Reader, ReaderError, Writer}, globals::{ip_from_bytes, ip_to_bytes}}; +use xelis_common::{serializer::{Serializer, Reader, ReaderError, Writer}, utils::{ip_from_bytes, ip_to_bytes}}; // this packet is sent when a peer disconnects from one of our peer // it is used to continue to track common peers between us and our peers diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 1f6fa9b7..2728f09a 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -7,7 +7,7 @@ use xelis_common::{ ReaderError, Reader }, - globals::{ + utils::{ ip_to_bytes, ip_from_bytes }, block::Difficulty diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 8e89d376..23d77c1d 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -1,6 +1,6 @@ use lru::LruCache; use xelis_common::config::{PEER_FAIL_TIME_RESET, STABLE_LIMIT, TIPS_LIMIT, PEER_TIMEOUT_BOOTSTRAP_STEP}; -use xelis_common::globals::get_current_time; +use xelis_common::utils::get_current_time; use xelis_common::{ crypto::hash::Hash, config::PEER_TIMEOUT_REQUEST_OBJECT, diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index ee3182f5..4147c1aa 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -4,7 +4,7 @@ use super::{peer::Peer, packet::Packet, error::P2pError}; use std::{collections::HashMap, net::SocketAddr, fs}; use serde::{Serialize, Deserialize}; use tokio::sync::RwLock; -use xelis_common::{serializer::Serializer, globals::get_current_time, config::{P2P_EXTEND_PEERLIST_DELAY, PEER_FAIL_LIMIT}}; +use xelis_common::{serializer::Serializer, utils::get_current_time, config::{P2P_EXTEND_PEERLIST_DELAY, PEER_FAIL_LIMIT}}; use std::sync::Arc; use bytes::Bytes; use log::{info, debug, trace, error, warn}; diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 3de438bf..d4ea7d9f 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -8,7 +8,7 @@ use rand::{rngs::OsRng, RngCore}; use serde::Serialize; use serde_json::json; use tokio::sync::Mutex; -use xelis_common::{crypto::{key::PublicKey, hash::Hash}, globals::get_current_timestamp, api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, serializer::Serializer, block::{BlockHeader, BlockMiner, Difficulty}, config::{DEV_PUBLIC_KEY, STABLE_LIMIT}, immutable::Immutable, rpc_server::{RpcResponseError, InternalRpcError}}; +use xelis_common::{crypto::{key::PublicKey, hash::Hash}, utils::get_current_timestamp, api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, serializer::Serializer, block::{BlockHeader, BlockMiner, Difficulty}, config::{DEV_PUBLIC_KEY, STABLE_LIMIT}, immutable::Immutable, rpc_server::{RpcResponseError, InternalRpcError}}; use crate::core::{blockchain::Blockchain, storage::Storage}; pub type SharedGetWorkServer = Arc>; diff --git a/xelis_miner/src/main.rs b/xelis_miner/src/main.rs index 2b8e725d..37fd7304 100644 --- a/xelis_miner/src/main.rs +++ b/xelis_miner/src/main.rs @@ -12,7 +12,7 @@ use xelis_common::{ serializer::Serializer, difficulty::check_difficulty, config::{VERSION, DEV_ADDRESS}, - globals::{get_current_timestamp, format_hashrate, format_difficulty}, + utils::{get_current_timestamp, format_hashrate, format_difficulty}, crypto::{hash::{Hashable, Hash, hash}, address::Address}, api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, prompt::{Prompt, command::CommandManager, LogLevel, ShareablePrompt, self}, }; diff --git a/xelis_wallet/src/entry.rs b/xelis_wallet/src/entry.rs index 9dc9b698..a64b346c 100644 --- a/xelis_wallet/src/entry.rs +++ b/xelis_wallet/src/entry.rs @@ -1,7 +1,7 @@ use std::fmt::{self, Display, Formatter}; use serde::Serialize; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, globals::format_coin}; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, utils::format_coin}; #[derive(Serialize, Clone)] pub struct Transfer { diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 783a1ec8..3184371d 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -8,7 +8,7 @@ use clap::Parser; use xelis_common::{config::{ DEFAULT_DAEMON_ADDRESS, VERSION, XELIS_ASSET, COIN_VALUE -}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel, self, ShareablePrompt, PromptError}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, globals::{format_coin, set_network_to, get_network}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; +}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel, self, ShareablePrompt, PromptError}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, utils::{format_coin, set_network_to, get_network}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; use xelis_wallet::wallet::Wallet; #[cfg(feature = "api_server")] diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 175a0873..16801bd9 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use xelis_common::{ transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, - globals::calculate_tx_fee, + utils::calculate_tx_fee, serializer::{Writer, Serializer}, crypto::{key::{SIGNATURE_LENGTH, PublicKey, KeyPair}, hash::Hash}, api::wallet::FeeBuilder }; diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 8cbb2701..c62e9c7b 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -9,7 +9,7 @@ use xelis_common::config::XELIS_ASSET; use xelis_common::crypto::address::Address; use xelis_common::crypto::hash::Hash; use xelis_common::crypto::key::{KeyPair, PublicKey}; -use xelis_common::globals::format_coin; +use xelis_common::utils::format_coin; use xelis_common::network::Network; use xelis_common::serializer::{Serializer, Writer}; use xelis_common::transaction::{TransactionType, Transfer, Transaction, EXTRA_DATA_LIMIT_SIZE}; From 819d0e2718144149cadd464222bf1d47e2fee06f Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 4 Sep 2023 23:07:49 +0200 Subject: [PATCH 007/160] wallet: XSWD & RPC Server events listeners --- xelis_wallet/src/api/mod.rs | 17 ++++++++- xelis_wallet/src/api/rpc_server.rs | 54 +++++++++++++++++++---------- xelis_wallet/src/api/xswd.rs | 2 +- xelis_wallet/src/network_handler.rs | 25 +++++++++++-- xelis_wallet/src/storage.rs | 4 +++ xelis_wallet/src/wallet.rs | 17 ++++++--- 6 files changed, 93 insertions(+), 26 deletions(-) diff --git a/xelis_wallet/src/api/mod.rs b/xelis_wallet/src/api/mod.rs index 920c716d..c9707cee 100644 --- a/xelis_wallet/src/api/mod.rs +++ b/xelis_wallet/src/api/mod.rs @@ -2,6 +2,9 @@ mod rpc; mod rpc_server; mod xswd; +use serde::ser::Serialize; +use xelis_common::{api::wallet::NotifyEvent, rpc_server::WebSocketServerHandler}; + pub use self::{ rpc_server::{WalletRpcServer, WalletRpcServerShared, AuthConfig}, xswd::{ @@ -18,7 +21,7 @@ pub enum APIServer where W: Clone + Send + Sync + XSWDPermissionHandler + 'static { - RPCServer(WalletRpcServerShared), + RPCServer(WalletRpcServerShared), XSWD(XSWD) } @@ -26,6 +29,18 @@ impl APIServer where W: Clone + Send + Sync + XSWDPermissionHandler + 'static { + pub async fn notify_event(&self, event: &NotifyEvent, value: &V) { + let json = serde_json::to_value(value).unwrap(); + match self { + APIServer::RPCServer(server) => { + server.get_websocket().get_handler().notify(event, json).await; + }, + APIServer::XSWD(xswd) => { + xswd.get_handler().notify(event, json).await; + } + } + } + pub async fn stop(self) { match self { APIServer::RPCServer(server) => { diff --git a/xelis_wallet/src/api/rpc_server.rs b/xelis_wallet/src/api/rpc_server.rs index 02c9b06d..a1c84a14 100644 --- a/xelis_wallet/src/api/rpc_server.rs +++ b/xelis_wallet/src/api/rpc_server.rs @@ -4,33 +4,35 @@ use actix_web_httpauth::{middleware::HttpAuthentication, extractors::basic::Basi use anyhow::Result; use log::{info, warn}; use tokio::sync::Mutex; -use xelis_common::{config, rpc_server::{RPCHandler, RPCServerHandler, json_rpc}}; +use xelis_common::{config, rpc_server::{RPCHandler, RPCServerHandler, json_rpc, websocket, websocket::{EventWebSocketHandler, WebSocketServerShared, WebSocketServer}, WebSocketServerHandler}, api::wallet::NotifyEvent}; use actix_web::{get, HttpResponse, Responder, HttpServer, web::{Data, self}, App, dev::{ServerHandle, ServiceRequest}, Error, error::{ErrorUnauthorized, ErrorBadGateway, ErrorBadRequest}}; -use crate::wallet::Wallet; -use super::rpc; - -pub type WalletRpcServerShared = Arc; +pub type WalletRpcServerShared = Arc>; pub struct AuthConfig { pub username: String, pub password: String } -pub struct WalletRpcServer { +pub struct WalletRpcServer +where + W: Clone + Send + Sync + 'static +{ handle: Mutex>, - rpc_handler: Arc>>, + rpc_handler: Arc>, + websocket: WebSocketServerShared>, auth_config: Option } -impl WalletRpcServer { - pub async fn new(bind_address: String, wallet: Arc, auth_config: Option) -> Result { - let mut rpc_handler = RPCHandler::new(wallet); - rpc::register_methods(&mut rpc_handler); - +impl WalletRpcServer +where + W: Clone + Send + Sync + 'static +{ + pub async fn new(bind_address: String, rpc_handler: RPCHandler, auth_config: Option) -> Result> { let rpc_handler = Arc::new(rpc_handler); let server = Arc::new(Self { handle: Mutex::new(None), + websocket: WebSocketServer::new(EventWebSocketHandler::new(rpc_handler.clone())), rpc_handler, auth_config }); @@ -39,11 +41,12 @@ impl WalletRpcServer { let clone = Arc::clone(&server); let http_server = HttpServer::new(move || { let server = Arc::clone(&clone); - let auth = HttpAuthentication::basic(auth); + let auth = HttpAuthentication::basic(auth::); App::new() .app_data(Data::from(server)) .wrap(auth) - .route("/json_rpc", web::post().to(json_rpc::, WalletRpcServer>)) + .route("/ws", web::get().to(websocket::, Self>)) + .route("/json_rpc", web::post().to(json_rpc::>)) .service(index) }) .disable_signals() @@ -87,14 +90,29 @@ impl WalletRpcServer { } } -impl RPCServerHandler> for WalletRpcServer { - fn get_rpc_handler(&self) -> &RPCHandler> { +impl WebSocketServerHandler> for WalletRpcServer +where + W: Clone + Send + Sync + 'static +{ + fn get_websocket(&self) -> &WebSocketServerShared> { + &self.websocket + } +} + +impl RPCServerHandler for WalletRpcServer +where + W: Clone + Send + Sync + 'static +{ + fn get_rpc_handler(&self) -> &RPCHandler { &self.rpc_handler } } -async fn auth(request: ServiceRequest, credentials: BasicAuth) -> Result { - let data: Option<&Data> = request.app_data(); +async fn auth(request: ServiceRequest, credentials: BasicAuth) -> Result +where + W: Clone + Send + Sync + 'static +{ + let data: Option<&Data>> = request.app_data(); match data { Some(server) => match server.authenticate(credentials).await { Ok(_) => Ok(request), diff --git a/xelis_wallet/src/api/xswd.rs b/xelis_wallet/src/api/xswd.rs index b597b2ec..df8c8874 100644 --- a/xelis_wallet/src/api/xswd.rs +++ b/xelis_wallet/src/api/xswd.rs @@ -256,7 +256,7 @@ where } // notify a new event to all connected WebSocket - pub async fn notify_event(&self, event: &NotifyEvent, value: Value) { + pub async fn notify(&self, event: &NotifyEvent, value: Value) { let value = json!(EventResult { event: Cow::Borrowed(event), value }); let sessions = self.listeners.lock().await; for (session, subscriptions) in sessions.iter() { diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 744afb3b..740f0595 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -3,7 +3,7 @@ use thiserror::Error; use anyhow::Error; use log::{debug, error, info, warn}; use tokio::{task::JoinHandle, sync::Mutex, time::interval}; -use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance}; +use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance, api::wallet::NotifyEvent}; use crate::{daemon_api::DaemonAPI, wallet::Wallet, entry::{EntryData, Transfer, TransactionEntry}}; @@ -126,6 +126,15 @@ impl NetworkHandler { if let Some(reward) = response.reward { let coinbase = EntryData::Coinbase(reward); let entry = TransactionEntry::new(response.data.hash.into_owned(), topoheight, None, None, coinbase); + + // New coinbase entry, inform listeners + #[cfg(feature = "api_server")] + { + if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { + api_server.notify_event(&NotifyEvent::NewTransaction, &entry).await; + } + } + let mut storage = self.wallet.get_storage().write().await; storage.save_transaction(entry.get_hash(), &entry)?; } else { @@ -135,6 +144,7 @@ impl NetworkHandler { let mut latest_nonce_sent = None; let (block, txs) = block.split(); + // TODO check only executed txs in this block for (tx_hash, tx) in block.get_txs_hashes().iter().zip(txs) { let tx = tx.into_owned(); let is_owner = *tx.get_owner() == *address.get_public_key(); @@ -175,7 +185,18 @@ impl NetworkHandler { if let Some(entry) = entry { let entry = TransactionEntry::new(tx_hash.clone(), topoheight, fee, nonce, entry); let mut storage = self.wallet.get_storage().write().await; - storage.save_transaction(entry.get_hash(), &entry)?; + + if !storage.has_transaction(entry.get_hash())? { + // notify listeners of new transaction + #[cfg(feature = "api_server")] + { + if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { + api_server.notify_event(&NotifyEvent::NewTransaction, &entry).await; + } + } + + storage.save_transaction(entry.get_hash(), &entry)?; + } } if is_owner { diff --git a/xelis_wallet/src/storage.rs b/xelis_wallet/src/storage.rs index ebdc85f5..0989717b 100644 --- a/xelis_wallet/src/storage.rs +++ b/xelis_wallet/src/storage.rs @@ -170,6 +170,10 @@ impl EncryptedStorage { self.save_to_disk(&self.transactions, hash.as_bytes(), &transaction.to_bytes()) } + pub fn has_transaction(&self, hash: &Hash) -> Result { + self.contains_data(&self.transactions, hash.as_bytes()) + } + pub fn get_nonce(&self) -> Result { self.load_from_disk(&self.extra, NONCE_KEY) } diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index c62e9c7b..b9912bfd 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -38,7 +38,8 @@ use { PermissionRequest, XSWDPermissionHandler }, - xelis_common::prompt::ShareablePrompt + xelis_common::prompt::ShareablePrompt, + xelis_common::rpc_server::RPCHandler }; #[derive(Error, Debug)] @@ -85,6 +86,8 @@ pub enum WalletError { RescanTopoheightTooHigh, #[error(transparent)] Any(#[from] Error), + #[error("No API Server is running")] + NoAPIServer, #[error("RPC Server is not running")] RPCServerNotRunning, #[error("RPC Server is already running")] @@ -247,15 +250,16 @@ impl Wallet { if lock.is_some() { return Err(WalletError::RPCServerAlreadyRunning.into()) } - let rpc_server = WalletRpcServer::new(bind_address, Arc::clone(self), config).await?; + let mut rpc_handler = RPCHandler::new(self.clone()); + register_rpc_methods(&mut rpc_handler); + + let rpc_server = WalletRpcServer::new(bind_address, rpc_handler, config).await?; *lock = Some(APIServer::RPCServer(rpc_server)); Ok(()) } #[cfg(feature = "api_server")] pub async fn enable_xswd(self: &Arc) -> Result<(), Error> { - use xelis_common::rpc_server::RPCHandler; - let mut lock = self.api_server.lock().await; if lock.is_some() { return Err(WalletError::RPCServerAlreadyRunning.into()) @@ -275,6 +279,11 @@ impl Wallet { Ok(()) } + #[cfg(feature = "api_server")] + pub fn get_api_server<'a>(&'a self) -> &Mutex>>> { + &self.api_server + } + // Verify if a password is valid or not pub async fn is_valid_password(&self, password: String) -> Result<(), Error> { let mut encrypted_storage = self.storage.write().await; From 6f5f54e48286e63c9b050509bd6aa9cb83d73024 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 5 Sep 2023 11:23:19 +0200 Subject: [PATCH 008/160] all: remove unnecessary Arc for RPCHandler --- xelis_common/src/rpc_server/websocket/handler.rs | 10 +++++++--- xelis_daemon/src/rpc/mod.rs | 8 ++------ xelis_wallet/src/api/rpc_server.rs | 7 ++----- 3 files changed, 11 insertions(+), 14 deletions(-) diff --git a/xelis_common/src/rpc_server/websocket/handler.rs b/xelis_common/src/rpc_server/websocket/handler.rs index 1bb40358..ea5a7649 100644 --- a/xelis_common/src/rpc_server/websocket/handler.rs +++ b/xelis_common/src/rpc_server/websocket/handler.rs @@ -1,4 +1,4 @@ -use std::{collections::{HashMap, HashSet}, hash::Hash, sync::Arc, borrow::Cow}; +use std::{collections::{HashMap, HashSet}, hash::Hash, borrow::Cow}; use async_trait::async_trait; use log::debug; use serde_json::{Value, json}; @@ -10,7 +10,7 @@ use super::{WebSocketSessionShared, WebSocketHandler}; // generic websocket handler supporting event subscriptions pub struct EventWebSocketHandler { sessions: Mutex, HashMap>>>, - handler: Arc> + handler: RPCHandler } impl EventWebSocketHandler @@ -18,7 +18,7 @@ where T: Sync + Send + Clone + 'static, E: Serialize + DeserializeOwned + Send + Eq + Hash + Clone + 'static { - pub fn new(handler: Arc>) -> Self { + pub fn new(handler: RPCHandler) -> Self { Self { sessions: Mutex::new(HashMap::new()), handler @@ -102,6 +102,10 @@ where }; Ok(response) } + + pub fn get_rpc_handler(&self) -> &RPCHandler { + &self.handler + } } #[async_trait] diff --git a/xelis_daemon/src/rpc/mod.rs b/xelis_daemon/src/rpc/mod.rs index 83922a60..774212e3 100644 --- a/xelis_daemon/src/rpc/mod.rs +++ b/xelis_daemon/src/rpc/mod.rs @@ -28,7 +28,6 @@ pub type SharedDaemonRpcServer = Arc>; pub struct DaemonRpcServer { handle: Mutex>, - rpc_handler: Arc>>>, websocket: WebSocketServerShared>, NotifyEvent>>, getwork: Option> } @@ -58,15 +57,12 @@ impl DaemonRpcServer { let mut rpc_handler = RPCHandler::new(blockchain); rpc::register_methods(&mut rpc_handler); - let rpc_handler = Arc::new(rpc_handler); - // create the default websocket server (support event & rpc methods) - let ws = WebSocketServer::new(EventWebSocketHandler::new(rpc_handler.clone())); + let ws = WebSocketServer::new(EventWebSocketHandler::new(rpc_handler)); let server = Arc::new(Self { handle: Mutex::new(None), websocket: ws, - rpc_handler, getwork, }); @@ -132,7 +128,7 @@ impl WebSocketServerHandler> impl RPCServerHandler>> for DaemonRpcServer { fn get_rpc_handler(&self) -> &RPCHandler>> { - &self.rpc_handler + self.get_websocket().get_handler().get_rpc_handler() } } diff --git a/xelis_wallet/src/api/rpc_server.rs b/xelis_wallet/src/api/rpc_server.rs index a1c84a14..f890e743 100644 --- a/xelis_wallet/src/api/rpc_server.rs +++ b/xelis_wallet/src/api/rpc_server.rs @@ -19,7 +19,6 @@ where W: Clone + Send + Sync + 'static { handle: Mutex>, - rpc_handler: Arc>, websocket: WebSocketServerShared>, auth_config: Option } @@ -29,11 +28,9 @@ where W: Clone + Send + Sync + 'static { pub async fn new(bind_address: String, rpc_handler: RPCHandler, auth_config: Option) -> Result> { - let rpc_handler = Arc::new(rpc_handler); let server = Arc::new(Self { handle: Mutex::new(None), - websocket: WebSocketServer::new(EventWebSocketHandler::new(rpc_handler.clone())), - rpc_handler, + websocket: WebSocketServer::new(EventWebSocketHandler::new(rpc_handler)), auth_config }); @@ -104,7 +101,7 @@ where W: Clone + Send + Sync + 'static { fn get_rpc_handler(&self) -> &RPCHandler { - &self.rpc_handler + &self.get_websocket().get_handler().get_rpc_handler() } } From 71e5fa8c24dd5a2d08202acef7870dcc53e96ca4 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 7 Sep 2023 22:00:38 +0200 Subject: [PATCH 009/160] wallet & common: inform WebSocket listeners of new get_info result --- xelis_common/src/api/wallet.rs | 5 ++++- xelis_common/src/block/mod.rs | 4 ++++ xelis_wallet/src/network_handler.rs | 20 ++++++++++++++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index e2e9073e..82bb305e 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -64,7 +64,10 @@ pub struct GetTransactionParams { pub enum NotifyEvent { // When a new block is detected by wallet // it contains Block struct as value - NewBlock, + // NewBlock, + // When a a get_info request is made + // and we receive a different topoheight than previous one + NewChainInfo, // When a new asset is added to wallet // Contains a Hash as value NewAsset, diff --git a/xelis_common/src/block/mod.rs b/xelis_common/src/block/mod.rs index 996b90e1..66824ece 100644 --- a/xelis_common/src/block/mod.rs +++ b/xelis_common/src/block/mod.rs @@ -124,6 +124,10 @@ impl BlockHeader { &self.txs_hashes } + pub fn take_txs_hashes(self) -> Vec { + self.txs_hashes + } + pub fn get_txs_hash(&self) -> Hash { let mut bytes = vec![]; diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 740f0595..4d6bd4c1 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -145,7 +145,7 @@ impl NetworkHandler { let mut latest_nonce_sent = None; let (block, txs) = block.split(); // TODO check only executed txs in this block - for (tx_hash, tx) in block.get_txs_hashes().iter().zip(txs) { + for (tx_hash, tx) in block.into_owned().take_txs_hashes().into_iter().zip(txs) { let tx = tx.into_owned(); let is_owner = *tx.get_owner() == *address.get_public_key(); let fee = if is_owner { Some(tx.get_fee()) } else { None }; @@ -183,7 +183,7 @@ impl NetworkHandler { }; if let Some(entry) = entry { - let entry = TransactionEntry::new(tx_hash.clone(), topoheight, fee, nonce, entry); + let entry = TransactionEntry::new(tx_hash, topoheight, fee, nonce, entry); let mut storage = self.wallet.get_storage().write().await; if !storage.has_transaction(entry.get_hash())? { @@ -271,6 +271,14 @@ impl NetworkHandler { } debug!("New height detected for chain: {}", info.topoheight); + // New get_info with different topoheight, inform listeners + #[cfg(feature = "api_server")] + { + if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { + api_server.notify_event(&NotifyEvent::NewChainInfo, &info).await; + } + } + if let Err(e) = self.sync_new_blocks(&address, current_topoheight, info.topoheight).await { error!("Error while syncing new blocks: {}", e); } @@ -307,6 +315,14 @@ impl NetworkHandler { let mut storage = self.wallet.get_storage().write().await; for asset in &response { if !storage.contains_asset(asset)? { + // New asset added to the wallet, inform listeners + #[cfg(feature = "api_server")] + { + if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { + api_server.notify_event(&NotifyEvent::NewAsset, &asset).await; + } + } + storage.add_asset(asset)?; } } From 9056870e17382a835a3457e1ca8c278b169e0488 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 7 Sep 2023 23:38:05 +0200 Subject: [PATCH 010/160] wallet: return Error from NetworkHandler on stop call --- xelis_wallet/src/network_handler.rs | 17 ++++++++++++----- xelis_wallet/src/wallet.rs | 14 ++++++++------ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 4d6bd4c1..3da4f1cd 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -13,7 +13,13 @@ pub type SharedNetworkHandler = Arc; #[derive(Debug, Error)] pub enum NetworkError { #[error("network handler is already running")] - AlreadyRunning + AlreadyRunning, + #[error("network handler is not running")] + NotRunning, + #[error(transparent)] + TaskError(#[from] tokio::task::JoinError), + #[error(transparent)] + DaemonAPIError(#[from] Error) } pub struct NetworkHandler { @@ -56,15 +62,16 @@ impl NetworkHandler { Ok(()) } - pub async fn stop(&self) { + pub async fn stop(&self) -> Result<(), NetworkError> { if let Some(handle) = self.task.lock().await.take() { if handle.is_finished() { - if let Err(e) = handle.await { - debug!("Network handler was finished with error: {}", e); - } + handle.await??; } else { handle.abort(); } + Ok(()) + } else { + Err(NetworkError::NotRunning) } } diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index b9912bfd..e58ee168 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -16,7 +16,7 @@ use xelis_common::transaction::{TransactionType, Transfer, Transaction, EXTRA_DA use crate::cipher::Cipher; use crate::config::{PASSWORD_ALGORITHM, PASSWORD_HASH_SIZE, SALT_SIZE}; use crate::mnemonics; -use crate::network_handler::{NetworkHandler, SharedNetworkHandler}; +use crate::network_handler::{NetworkHandler, SharedNetworkHandler, NetworkError}; use crate::storage::{EncryptedStorage, Storage}; use crate::transaction_builder::TransactionBuilder; use chacha20poly1305::{aead::OsRng, Error as CryptoError}; @@ -98,7 +98,9 @@ pub enum WalletError { EmptyName, #[cfg(feature = "api_server")] #[error("No handler available for this request")] - NoHandlerAvailable + NoHandlerAvailable, + #[error(transparent)] + NetworkError(#[from] NetworkError) } pub struct Wallet { @@ -434,7 +436,7 @@ impl Wallet { pub async fn set_offline_mode(&self) -> Result<(), WalletError> { let mut handler = self.network_handler.lock().await; if let Some(network_handler) = handler.take() { - network_handler.stop().await; + network_handler.stop().await?; } else { return Err(WalletError::NotOnlineMode) } @@ -444,13 +446,13 @@ impl Wallet { pub async fn rescan(&self, topoheight: u64) -> Result<(), WalletError> { if !self.is_online().await { - // user have to set in offline mode himself first - return Err(WalletError::AlreadyOnlineMode.into()) + // user have to set it online + return Err(WalletError::NotOnlineMode) } let handler = self.network_handler.lock().await; if let Some(network_handler) = handler.as_ref() { - network_handler.stop().await; + network_handler.stop().await?; { let mut storage = self.get_storage().write().await; if topoheight >= storage.get_daemon_topoheight()? { From cb7447e08e086901abacbc32fb84144018e875f3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 12 Sep 2023 22:57:37 +0200 Subject: [PATCH 011/160] common: homomorphic encryption scheme --- Cargo.lock | 111 ++++++++-- xelis_common/Cargo.toml | 2 +- xelis_common/src/crypto/mod.rs | 3 +- xelis_common/src/crypto/paillier.rs | 310 ++++++++++++++++++++++++++++ 4 files changed, 411 insertions(+), 15 deletions(-) create mode 100644 xelis_common/src/crypto/paillier.rs diff --git a/Cargo.lock b/Cargo.lock index 82a3c38d..545b99c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -408,6 +408,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "az" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" + [[package]] name = "backtrace" version = "0.3.68" @@ -856,7 +862,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1014,6 +1020,16 @@ version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" +[[package]] +name = "gmp-mpfr-sys" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19c5c67d8c29fe87e3266e691dd60948e6e4df4496c53355ef3551142945721b" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] + [[package]] name = "h2" version = "0.3.20" @@ -1231,7 +1247,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", "rustix", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1373,7 +1389,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1765,6 +1781,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "rug" +version = "1.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a118f8296241f6952511e8f9ab5b8a759c0d9ed902159b0aeed82d902e84ca6" +dependencies = [ + "az", + "gmp-mpfr-sys", + "libc", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1790,7 +1817,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -1860,7 +1887,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2216,7 +2243,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -2544,6 +2571,21 @@ dependencies = [ "windows-targets", ] +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -2559,51 +2601,93 @@ version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.0" @@ -2642,6 +2726,7 @@ dependencies = [ "num-traits", "rand 0.8.5", "reqwest", + "rug", "serde", "serde_json", "sha3", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index 982c177c..cb5fcb96 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -26,7 +26,7 @@ lazy_static = "1.4.0" clap = { version = "3.1.18", features = ["derive"], optional = true } crossterm = "*" indexmap = { version = "2.0.0", features = ["serde"] } - +rug = "1.22" actix-rt = { version = "2.8.0", optional = true } actix-web = { version = "4", optional = true } actix-ws = { version = "0.2.5", optional = true } diff --git a/xelis_common/src/crypto/mod.rs b/xelis_common/src/crypto/mod.rs index 2db14d4a..7d5fa125 100644 --- a/xelis_common/src/crypto/mod.rs +++ b/xelis_common/src/crypto/mod.rs @@ -1,4 +1,5 @@ pub mod hash; pub mod key; pub mod bech32; -pub mod address; \ No newline at end of file +pub mod address; +pub mod paillier; \ No newline at end of file diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs new file mode 100644 index 00000000..0a3304cd --- /dev/null +++ b/xelis_common/src/crypto/paillier.rs @@ -0,0 +1,310 @@ +use rug::Integer; +use thiserror::Error; + +// p & q are two large primes and must have 2048 bits +pub const PRIME_BITS_SIZE: usize = 2048; +// Size is 4 * 2048 / 8 = 1024 bytes +// 4 because p * q = 4096 bits (N) +// and the ciphertext is the size of N^2 +pub const SIZE: usize = PRIME_BITS_SIZE * 4 / 8; + +#[derive(Debug, Error)] +pub enum CryptoError { + #[error("Invalid ciphertext for this Public Key")] + InvalidCiphertext, + #[error("Invalid operation")] + InvalidOperation, + #[error("Invalid plaintext")] + InvalidPlaintext, + #[error("Invalid decrypted value")] + InvalidDecryptedValue +} + +pub struct PrivateKey { + p: Integer, + q: Integer +} + +impl PrivateKey { + pub fn new(p: Integer, q: Integer) -> PrivateKey { + if p.significant_bits() != PRIME_BITS_SIZE as u32 || q.significant_bits() != PRIME_BITS_SIZE as u32 { + panic!("Invalid prime size p: {}, q: {}", p.significant_bits(), q.significant_bits()) + } + + Self { + p, + q + } + } + + // n = p * q + pub fn get_public_key(&self) -> PublicKey { + PublicKey::new((&self.p * &self.q).into()) + } + + pub fn expand(self) -> ExpandedPrivateKey { + let n: Integer = (&self.p * &self.q).into(); + + // lambda = (p-1) * (q-1) + let p_minus: Integer = &self.p - Integer::from(1); + let q_minus: Integer = &self.q - Integer::from(1); + let lambda: Integer = p_minus * q_minus; + + // boost performance, use invert_ref instead of extended GCD + let mu: Integer = lambda.invert_ref(&n).unwrap().into(); + + ExpandedPrivateKey { + key: self.get_public_key(), + _inner: self, + lambda, + mu, + } + } +} + +// Extended private key +pub struct ExpandedPrivateKey { + _inner: PrivateKey, + key: PublicKey, + lambda: Integer, + mu: Integer, +} + +impl ExpandedPrivateKey { + pub fn decrypt(&self, ciphertext: Ciphertext) -> Result { + if !ciphertext.is_valid(&self.key) { + return Err(CryptoError::InvalidCiphertext) + } + + // c^lambda mod n^2 + let c_lambda: Integer = ciphertext.value.pow_mod_ref(&self.lambda, &self.key.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + // L(x) = (x - 1) / n + let plaintext = (&c_lambda - Integer::from(1)) / &self.key.n; + // m = L(c^lambda mod n^2) * mu mod n + let result = plaintext * &self.mu % &self.key.n; + Ok(result.to_u64().ok_or(CryptoError::InvalidDecryptedValue)?) + } + + pub fn get_public_key(&self) -> &PublicKey { + &self.key + } +} + +// Only necessary value is N, others are precomputed +pub struct PublicKey { + // the modulus (used for encryption) + n: Integer, + // n + 1 + g: Integer, + // n^2 + nn: Integer, +} + +impl PublicKey { + pub fn new(n: Integer) -> PublicKey { + Self { + g: &n + Integer::from(1), + nn: n.square_ref().into(), + n, + } + } + + pub fn encrypt(&self, value: u64) -> Result { + let plaintext = Integer::from(value); + if plaintext >= self.n { + return Err(CryptoError::InvalidPlaintext) + } + + let r = Integer::from(2); + // c = g^value * r^n (mod n^2) + let c1: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + let c2: Integer = r.pow_mod_ref(&self.n, &self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + let mul: Integer = c1 * c2; + Ok(Ciphertext::new(mul % &self.nn)) + } + + // C1 * C2 mod n^2 + pub fn add(&self, c1: &Ciphertext, c2: &Ciphertext) -> Result { + if (!c1.is_valid(self)) || (!c2.is_valid(self)) { + return Err(CryptoError::InvalidCiphertext) + } + + let mul: Integer = (&c1.value * &c2.value).into(); + Ok(Ciphertext::new(mul % &self.nn)) + } + + // C1 * g^value mod N^2 + pub fn add_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { + if !c1.is_valid(self) { + return Err(CryptoError::InvalidCiphertext) + } + + let plaintext = Integer::from(value); + let c2: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + let mul: Integer = (&c1.value * c2).into(); + Ok(Ciphertext::new(mul % &self.nn)) + } + + pub fn sub(&self, c1: &Ciphertext, c2: &Ciphertext) -> Result { + if !c1.is_valid(self) || !c2.is_valid(self) { + return Err(CryptoError::InvalidCiphertext) + } + + // Instead of searching bezout coefficients and GCD we can just invert the value + let negative_c2: Integer = c2.value.invert_ref(&self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + Ok(Ciphertext::new(&c1.value * negative_c2 % &self.nn)) + } + + pub fn mul_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { + if !c1.is_valid(self) { + return Err(CryptoError::InvalidCiphertext) + } + + let plaintext = Integer::from(value); + let mul = c1.value.pow_mod_ref(&plaintext, &self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + Ok(Ciphertext::new(mul)) + } + + pub fn div_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { + if !c1.is_valid(self) { + return Err(CryptoError::InvalidCiphertext) + } + + let plaintext = Integer::from(value); + let inverse = plaintext.invert(&self.nn).map_err(|_| CryptoError::InvalidOperation)?; + + let mul = c1.value.pow_mod_ref(&inverse, &self.nn) + .ok_or(CryptoError::InvalidOperation)? + .into(); + + Ok(Ciphertext::new(mul)) + } +} + +// Represents an encrypted value +pub struct Ciphertext { + value: Integer, +} + +impl Ciphertext { + pub fn new(value: Integer) -> Ciphertext { + Self { + value, + } + } + + // 0 < C < n^2 + pub fn is_valid(&self, key: &PublicKey) -> bool { + key.nn > self.value && self.value > Integer::from(0) + } +} + +mod tests { + use std::str::FromStr; + + use super::*; + fn _generate_private_key() -> ExpandedPrivateKey { + let p = Integer::from_str("26946565058508556335703057678479193452304038415320320612739026385225298610008864186185248157667939692602914497266158802716790474833947772826137352516209983737629258254217925182069688200921824682629208537057830159202300700254744398401385317004557290421622059016544387100633064484394429299712612387988787656113893086893594807335060378763142902668584121938589954668585758578121584153647867617579207136469100271575899315110489594116527521092010000583127405316221856395802750870474485516597674185947739156275281462539159055254987599109169478119201211066791295912114221003467197211019730323321923834862781706821839382425319").unwrap(); + let q = Integer::from_str("30285103848165032371432135057580005479137385975250075866315362110663210942596615960809988401619020086330330323690859032150264976037456961162655919684888298622597867407709454379915077961482177205641007860316172930122789053649106796228331050588480104621044323245329249654789956970860084725229793041508008076837900555099704375472732833392770407190572998528495204954650991713220053319696501576522725356507569592271456467055934422479932228786490254699513808991388789871837682571567374631101622153747215563532592329904419750104317088696095242472742008866975771374389004813336895149595148338528131027712001071213942813066383").unwrap(); + + PrivateKey::new(p, q).expand() + } + + #[test] + fn test_generate_private_key() { + _generate_private_key(); + } + + #[test] + fn test_encrypt_decrypt() { + let key = _generate_private_key(); + let value = 10u64; + let ciphertext = key.get_public_key().encrypt(value).unwrap(); + let decrypted = key.decrypt(ciphertext).unwrap(); + assert_eq!(value, decrypted); + } + + #[test] + fn test_homomorphic_add() { + let key = _generate_private_key(); + let left = 2500u64; + let right = 500u64; + + let c1 = key.get_public_key().encrypt(left).unwrap(); + let c2 = key.get_public_key().encrypt(right).unwrap(); + + let sum = key.get_public_key().add(&c1, &c2).unwrap(); + let decrypted = key.decrypt(sum).unwrap(); + assert_eq!(left + right, decrypted); + } + + #[test] + fn test_homomorphic_add_plaintext() { + let key = _generate_private_key(); + let left = 2500u64; + let right = 500u64; + + let c1 = key.get_public_key().encrypt(left).unwrap(); + + let sum = key.get_public_key().add_plaintext(&c1, right).unwrap(); + let decrypted = key.decrypt(sum).unwrap(); + assert_eq!(left + right, decrypted); + } + + #[test] + fn test_homomorphic_sub() { + let key = _generate_private_key(); + let left = 2500u64; + let right = 500u64; + + let c1 = key.get_public_key().encrypt(left).unwrap(); + let c2 = key.get_public_key().encrypt(right).unwrap(); + + let sum = key.get_public_key().sub(&c1, &c2).unwrap(); + let decrypted = key.decrypt(sum).unwrap(); + assert_eq!(left - right, decrypted); + } + + #[test] + fn test_homomorphic_mul() { + let key = _generate_private_key(); + let left = 2500u64; + let right = 17u64; + + let c1 = key.get_public_key().encrypt(left).unwrap(); + + let sum = key.get_public_key().mul_plaintext(&c1, right).unwrap(); + let decrypted = key.decrypt(sum).unwrap(); + assert_eq!(left * right, decrypted); + } + + #[test] + fn test_homomorphic_div() { + let key = _generate_private_key(); + let left = 2500u64; + let right = 10u64; + + let c1 = key.get_public_key().encrypt(left).unwrap(); + + let sum = key.get_public_key().div_plaintext(&c1, right).unwrap(); + let decrypted = key.decrypt(sum).unwrap(); + assert_eq!(left / right, decrypted); + } +} \ No newline at end of file From b6f82ef547a9472fdd575b24e8cf4680fe5d21ed Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 12 Sep 2023 23:12:04 +0200 Subject: [PATCH 012/160] common: use random R blind --- xelis_common/src/crypto/paillier.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index 0a3304cd..6dbbb047 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -1,4 +1,4 @@ -use rug::Integer; +use rug::{Integer, rand::RandState}; use thiserror::Error; // p & q are two large primes and must have 2048 bits @@ -26,7 +26,7 @@ pub struct PrivateKey { } impl PrivateKey { - pub fn new(p: Integer, q: Integer) -> PrivateKey { + pub fn new(p: Integer, q: Integer) -> Self { if p.significant_bits() != PRIME_BITS_SIZE as u32 || q.significant_bits() != PRIME_BITS_SIZE as u32 { panic!("Invalid prime size p: {}, q: {}", p.significant_bits(), q.significant_bits()) } @@ -118,7 +118,15 @@ impl PublicKey { return Err(CryptoError::InvalidPlaintext) } - let r = Integer::from(2); + // Create a random number generator + let mut rng = RandState::new(); + // Generate a random number between 0 and n + let mut r: Integer = self.n.random_below_ref(&mut rng).into(); + // We want a positive value only + if r.is_zero() { + r += 1; + } + // c = g^value * r^n (mod n^2) let c1: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) .ok_or(CryptoError::InvalidOperation)? @@ -218,8 +226,8 @@ impl Ciphertext { mod tests { use std::str::FromStr; - use super::*; + fn _generate_private_key() -> ExpandedPrivateKey { let p = Integer::from_str("26946565058508556335703057678479193452304038415320320612739026385225298610008864186185248157667939692602914497266158802716790474833947772826137352516209983737629258254217925182069688200921824682629208537057830159202300700254744398401385317004557290421622059016544387100633064484394429299712612387988787656113893086893594807335060378763142902668584121938589954668585758578121584153647867617579207136469100271575899315110489594116527521092010000583127405316221856395802750870474485516597674185947739156275281462539159055254987599109169478119201211066791295912114221003467197211019730323321923834862781706821839382425319").unwrap(); let q = Integer::from_str("30285103848165032371432135057580005479137385975250075866315362110663210942596615960809988401619020086330330323690859032150264976037456961162655919684888298622597867407709454379915077961482177205641007860316172930122789053649106796228331050588480104621044323245329249654789956970860084725229793041508008076837900555099704375472732833392770407190572998528495204954650991713220053319696501576522725356507569592271456467055934422479932228786490254699513808991388789871837682571567374631101622153747215563532592329904419750104317088696095242472742008866975771374389004813336895149595148338528131027712001071213942813066383").unwrap(); From a9454976e9d4f74feb308ca81718982057e49ed5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 13 Sep 2023 13:37:38 +0200 Subject: [PATCH 013/160] common: typo --- xelis_common/src/crypto/paillier.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index 6dbbb047..fcbce3ce 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -62,7 +62,7 @@ impl PrivateKey { } } -// Extended private key +// Expanded private key pub struct ExpandedPrivateKey { _inner: PrivateKey, key: PublicKey, From eecd40978572347e9ebaf7bdb907d2fbd4b6ae92 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 13 Sep 2023 13:53:36 +0200 Subject: [PATCH 014/160] common: generate random r --- xelis_common/src/crypto/paillier.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index fcbce3ce..ee6c5ce3 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -112,12 +112,8 @@ impl PublicKey { } } - pub fn encrypt(&self, value: u64) -> Result { - let plaintext = Integer::from(value); - if plaintext >= self.n { - return Err(CryptoError::InvalidPlaintext) - } - + // Generate r = 0 < r < n + pub fn generate_random_r(&self) -> Integer { // Create a random number generator let mut rng = RandState::new(); // Generate a random number between 0 and n @@ -127,6 +123,16 @@ impl PublicKey { r += 1; } + r + } + + pub fn encrypt(&self, value: u64) -> Result { + let plaintext = Integer::from(value); + if plaintext >= self.n { + return Err(CryptoError::InvalidPlaintext) + } + + let r = self.generate_random_r(); // c = g^value * r^n (mod n^2) let c1: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) .ok_or(CryptoError::InvalidOperation)? From f25d47eab8bb94372a720092904b9327f74a8665 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 13 Sep 2023 14:04:20 +0200 Subject: [PATCH 015/160] common: don't use panic --- xelis_common/src/crypto/paillier.rs | 54 ++++++++++++++++++----------- 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index ee6c5ce3..a9c45ecc 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -17,7 +17,9 @@ pub enum CryptoError { #[error("Invalid plaintext")] InvalidPlaintext, #[error("Invalid decrypted value")] - InvalidDecryptedValue + InvalidDecryptedValue, + #[error("Invalid bits size")] + InvalidBitsSize } pub struct PrivateKey { @@ -26,23 +28,25 @@ pub struct PrivateKey { } impl PrivateKey { - pub fn new(p: Integer, q: Integer) -> Self { + pub fn new(p: Integer, q: Integer) -> Result { if p.significant_bits() != PRIME_BITS_SIZE as u32 || q.significant_bits() != PRIME_BITS_SIZE as u32 { - panic!("Invalid prime size p: {}, q: {}", p.significant_bits(), q.significant_bits()) + return Err(CryptoError::InvalidBitsSize) } - Self { - p, - q - } + Ok( + Self { + p, + q + } + ) } // n = p * q - pub fn get_public_key(&self) -> PublicKey { + pub fn get_public_key(&self) -> Result { PublicKey::new((&self.p * &self.q).into()) } - pub fn expand(self) -> ExpandedPrivateKey { + pub fn expand(self) -> Result { let n: Integer = (&self.p * &self.q).into(); // lambda = (p-1) * (q-1) @@ -53,12 +57,14 @@ impl PrivateKey { // boost performance, use invert_ref instead of extended GCD let mu: Integer = lambda.invert_ref(&n).unwrap().into(); - ExpandedPrivateKey { - key: self.get_public_key(), - _inner: self, - lambda, - mu, - } + Ok( + ExpandedPrivateKey { + key: self.get_public_key()?, + _inner: self, + lambda, + mu, + } + ) } } @@ -104,12 +110,18 @@ pub struct PublicKey { } impl PublicKey { - pub fn new(n: Integer) -> PublicKey { - Self { - g: &n + Integer::from(1), - nn: n.square_ref().into(), - n, + pub fn new(n: Integer) -> Result { + if n.significant_bits() != PRIME_BITS_SIZE as u32 * 2 { + return Err(CryptoError::InvalidCiphertext) } + + Ok( + Self { + g: &n + Integer::from(1), + nn: n.square_ref().into(), + n, + } + ) } // Generate r = 0 < r < n @@ -238,7 +250,7 @@ mod tests { let p = Integer::from_str("26946565058508556335703057678479193452304038415320320612739026385225298610008864186185248157667939692602914497266158802716790474833947772826137352516209983737629258254217925182069688200921824682629208537057830159202300700254744398401385317004557290421622059016544387100633064484394429299712612387988787656113893086893594807335060378763142902668584121938589954668585758578121584153647867617579207136469100271575899315110489594116527521092010000583127405316221856395802750870474485516597674185947739156275281462539159055254987599109169478119201211066791295912114221003467197211019730323321923834862781706821839382425319").unwrap(); let q = Integer::from_str("30285103848165032371432135057580005479137385975250075866315362110663210942596615960809988401619020086330330323690859032150264976037456961162655919684888298622597867407709454379915077961482177205641007860316172930122789053649106796228331050588480104621044323245329249654789956970860084725229793041508008076837900555099704375472732833392770407190572998528495204954650991713220053319696501576522725356507569592271456467055934422479932228786490254699513808991388789871837682571567374631101622153747215563532592329904419750104317088696095242472742008866975771374389004813336895149595148338528131027712001071213942813066383").unwrap(); - PrivateKey::new(p, q).expand() + PrivateKey::new(p, q).unwrap().expand().unwrap() } #[test] From 9791a70072250e35b22588917f405b5a94fc49a2 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 14 Sep 2023 11:48:58 +0200 Subject: [PATCH 016/160] common: more tests --- xelis_common/src/crypto/paillier.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index a9c45ecc..95e506bf 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -88,10 +88,10 @@ impl ExpandedPrivateKey { .into(); // L(x) = (x - 1) / n - let plaintext = (&c_lambda - Integer::from(1)) / &self.key.n; + let l_ = (c_lambda - Integer::from(1)) / &self.key.n; // m = L(c^lambda mod n^2) * mu mod n - let result = plaintext * &self.mu % &self.key.n; - Ok(result.to_u64().ok_or(CryptoError::InvalidDecryptedValue)?) + let plaintext = l_ * &self.mu % &self.key.n; + Ok(plaintext.to_u64().ok_or(CryptoError::InvalidDecryptedValue)?) } pub fn get_public_key(&self) -> &PublicKey { @@ -252,10 +252,21 @@ mod tests { PrivateKey::new(p, q).unwrap().expand().unwrap() } - + #[test] fn test_generate_private_key() { - _generate_private_key(); + let key = _generate_private_key(); + assert!(key._inner.p != key._inner.q); + assert!(key.key.n.significant_bits() == key._inner.p.significant_bits() + key._inner.q.significant_bits()); + assert!(key._inner.p * key._inner.q == key.key.n); + } + + #[test] + fn test_public_key_gcd_g_n_equal_one() { + let private_key = _generate_private_key(); + let public_key = private_key.get_public_key(); + let gcd: Integer = public_key.g.gcd_ref(&public_key.n).into(); + assert!(gcd == 1); } #[test] From f701d4bd69d9f5dafe716960c3232c0a4aac29fd Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 14 Sep 2023 22:54:28 +0200 Subject: [PATCH 017/160] common: add blind test --- xelis_common/src/crypto/paillier.rs | 37 +++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs index 95e506bf..126cc365 100644 --- a/xelis_common/src/crypto/paillier.rs +++ b/xelis_common/src/crypto/paillier.rs @@ -125,7 +125,7 @@ impl PublicKey { } // Generate r = 0 < r < n - pub fn generate_random_r(&self) -> Integer { + pub fn generate_random_r(&self) -> Integer { // Create a random number generator let mut rng = RandState::new(); // Generate a random number between 0 and n @@ -149,14 +149,31 @@ impl PublicKey { let c1: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) .ok_or(CryptoError::InvalidOperation)? .into(); - let c2: Integer = r.pow_mod_ref(&self.n, &self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); + let c2: Integer = r.pow_mod(&self.n, &self.nn) + .map_err(|_| CryptoError::InvalidOperation)?; let mul: Integer = c1 * c2; Ok(Ciphertext::new(mul % &self.nn)) } + pub fn blind(&self, c1: Ciphertext) -> Result { + let r = self.generate_random_r(); + self.blind_with(c1, r) + } + + pub fn blind_with(&self, c1: Ciphertext, r: Integer) -> Result { + if !c1.is_valid(self) { + return Err(CryptoError::InvalidCiphertext) + } + + // C = c * r^n mod n^2 + let c2: Integer = r.pow_mod(&self.n, &self.nn) + .map_err(|_| CryptoError::InvalidOperation)?; + + let mul: Integer = c1.value * c2; + Ok(Ciphertext::new(mul % &self.nn)) + } + // C1 * C2 mod n^2 pub fn add(&self, c1: &Ciphertext, c2: &Ciphertext) -> Result { if (!c1.is_valid(self)) || (!c2.is_valid(self)) { @@ -278,6 +295,18 @@ mod tests { assert_eq!(value, decrypted); } + #[test] + fn test_blind() { + let key = _generate_private_key(); + let value = 10u64; + + let ciphertext = key.get_public_key().encrypt(value).unwrap(); + let blinded = key.get_public_key().blind(ciphertext).unwrap(); + + let decrypted = key.decrypt(blinded).unwrap(); + assert_eq!(value, decrypted); + } + #[test] fn test_homomorphic_add() { let key = _generate_private_key(); From b8c0bac132390aa3214df615e4ea6e980590d69a Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 19 Sep 2023 19:34:29 +0200 Subject: [PATCH 018/160] wallet: add BalanceChanged event --- xelis_common/src/api/wallet.rs | 11 +++++++++++ xelis_wallet/src/network_handler.rs | 19 ++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 82bb305e..535eae50 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use serde::{Deserialize, Serialize}; use crate::{transaction::{TransactionType, Transaction}, crypto::{key::PublicKey, hash::Hash}}; @@ -60,6 +62,12 @@ pub struct GetTransactionParams { pub hash: Hash } +#[derive(Serialize, Deserialize)] +pub struct BalanceChanged<'a> { + pub asset: Cow<'a, Hash>, + pub balance: u64 +} + #[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum NotifyEvent { // When a new block is detected by wallet @@ -74,4 +82,7 @@ pub enum NotifyEvent { // When a new transaction is added to wallet // Contains TransactionEntry struct as value NewTransaction, + // When a balance is changed + // Contains a BalanceChanged as value + BalanceChanged, } \ No newline at end of file diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 3da4f1cd..e60e6eef 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -3,10 +3,16 @@ use thiserror::Error; use anyhow::Error; use log::{debug, error, info, warn}; use tokio::{task::JoinHandle, sync::Mutex, time::interval}; -use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance, api::wallet::NotifyEvent}; +use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance}; use crate::{daemon_api::DaemonAPI, wallet::Wallet, entry::{EntryData, Transfer, TransactionEntry}}; +#[cfg(feature = "api_server")] +use { + std::borrow::Cow, + xelis_common::api::wallet::{NotifyEvent, BalanceChanged} +}; + // NetworkHandler must be behind a Arc to be accessed from Wallet (to stop it) or from tokio task pub type SharedNetworkHandler = Arc; @@ -106,6 +112,17 @@ impl NetworkHandler { }; let balance = res.balance; + // Inform the change of the balance + #[cfg(feature = "api_server")] + { + if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { + api_server.notify_event(&NotifyEvent::BalanceChanged, &BalanceChanged { + asset: Cow::Borrowed(&asset), + balance: balance.get_balance() + }).await; + } + } + // lets write the final balance let mut storage = self.wallet.get_storage().write().await; storage.set_balance_for(asset, balance.get_balance())?; From a5f12c45718ec1052b8870d72cc5143b5dc6b2c5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 18:48:10 +0200 Subject: [PATCH 019/160] wallet: fix transaction signing --- xelis_common/src/serializer/mod.rs | 4 ++++ xelis_wallet/src/transaction_builder.rs | 10 ++++++++-- xelis_wallet/src/wallet.rs | 2 ++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index 566630b3..b1467ae8 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -87,6 +87,10 @@ impl Writer { self.bytes.len() } + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } + pub fn bytes(self) -> Vec { self.bytes } diff --git a/xelis_wallet/src/transaction_builder.rs b/xelis_wallet/src/transaction_builder.rs index 16801bd9..5a849f31 100644 --- a/xelis_wallet/src/transaction_builder.rs +++ b/xelis_wallet/src/transaction_builder.rs @@ -4,7 +4,7 @@ use xelis_common::{ transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, utils::calculate_tx_fee, serializer::{Writer, Serializer}, - crypto::{key::{SIGNATURE_LENGTH, PublicKey, KeyPair}, hash::Hash}, api::wallet::FeeBuilder + crypto::{key::{SIGNATURE_LENGTH, PublicKey, KeyPair}, hash::{Hash, hash}}, api::wallet::FeeBuilder }; use crate::wallet::WalletError; @@ -28,6 +28,7 @@ impl TransactionBuilder { fn serialize(&self) -> Writer { let mut writer = Writer::new(); + writer.write_u8(0); self.owner.write(&mut writer); self.data.write(&mut writer); writer @@ -122,10 +123,15 @@ impl TransactionBuilder { let mut writer = self.serialize(); let fee = self.verify_fees_internal(self.estimate_fees_internal(&writer))?; writer.write_u64(&fee); + writer.write_u64(&self.nonce); - let signature = keypair.sign(&writer.bytes()); + let signature = keypair.sign(hash(writer.as_bytes()).as_bytes()); let tx = Transaction::new(self.owner, self.data, fee, self.nonce, signature); + if !tx.verify_signature() { + return Err(WalletError::InvalidSignature) + } + Ok(tx) } } \ No newline at end of file diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index e58ee168..0c45a305 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -46,6 +46,8 @@ use { pub enum WalletError { #[error("Invalid key pair")] InvalidKeyPair, + #[error("Invalid signature")] + InvalidSignature, #[error("Expected a TX")] ExpectedOneTx, #[error("Too many txs included max is {}", u8::MAX)] From 6ef8678c9d901a05ad1799640a156fe64e45505b Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 18:49:12 +0200 Subject: [PATCH 020/160] daemon: remove Paillier implementation temporarily --- Cargo.lock | 111 +------- xelis_common/Cargo.toml | 1 - xelis_common/src/crypto/mod.rs | 3 +- xelis_common/src/crypto/paillier.rs | 376 ---------------------------- 4 files changed, 14 insertions(+), 477 deletions(-) delete mode 100644 xelis_common/src/crypto/paillier.rs diff --git a/Cargo.lock b/Cargo.lock index 545b99c4..82a3c38d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -408,12 +408,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" -[[package]] -name = "az" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7e4c2464d97fe331d41de9d5db0def0a96f4d823b8b32a2efd503578988973" - [[package]] name = "backtrace" version = "0.3.68" @@ -862,7 +856,7 @@ checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a" dependencies = [ "errno-dragonfly", "libc", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1020,16 +1014,6 @@ version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" -[[package]] -name = "gmp-mpfr-sys" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c5c67d8c29fe87e3266e691dd60948e6e4df4496c53355ef3551142945721b" -dependencies = [ - "libc", - "windows-sys 0.42.0", -] - [[package]] name = "h2" version = "0.3.20" @@ -1247,7 +1231,7 @@ checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", "rustix", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1389,7 +1373,7 @@ dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1781,17 +1765,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "rug" -version = "1.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a118f8296241f6952511e8f9ab5b8a759c0d9ed902159b0aeed82d902e84ca6" -dependencies = [ - "az", - "gmp-mpfr-sys", - "libc", -] - [[package]] name = "rustc-demangle" version = "0.1.23" @@ -1817,7 +1790,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -1887,7 +1860,7 @@ version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -2243,7 +2216,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys", ] [[package]] @@ -2571,21 +2544,6 @@ dependencies = [ "windows-targets", ] -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-sys" version = "0.48.0" @@ -2601,93 +2559,51 @@ version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.0" @@ -2726,7 +2642,6 @@ dependencies = [ "num-traits", "rand 0.8.5", "reqwest", - "rug", "serde", "serde_json", "sha3", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index cb5fcb96..a359c0f5 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -26,7 +26,6 @@ lazy_static = "1.4.0" clap = { version = "3.1.18", features = ["derive"], optional = true } crossterm = "*" indexmap = { version = "2.0.0", features = ["serde"] } -rug = "1.22" actix-rt = { version = "2.8.0", optional = true } actix-web = { version = "4", optional = true } actix-ws = { version = "0.2.5", optional = true } diff --git a/xelis_common/src/crypto/mod.rs b/xelis_common/src/crypto/mod.rs index 7d5fa125..2db14d4a 100644 --- a/xelis_common/src/crypto/mod.rs +++ b/xelis_common/src/crypto/mod.rs @@ -1,5 +1,4 @@ pub mod hash; pub mod key; pub mod bech32; -pub mod address; -pub mod paillier; \ No newline at end of file +pub mod address; \ No newline at end of file diff --git a/xelis_common/src/crypto/paillier.rs b/xelis_common/src/crypto/paillier.rs deleted file mode 100644 index 126cc365..00000000 --- a/xelis_common/src/crypto/paillier.rs +++ /dev/null @@ -1,376 +0,0 @@ -use rug::{Integer, rand::RandState}; -use thiserror::Error; - -// p & q are two large primes and must have 2048 bits -pub const PRIME_BITS_SIZE: usize = 2048; -// Size is 4 * 2048 / 8 = 1024 bytes -// 4 because p * q = 4096 bits (N) -// and the ciphertext is the size of N^2 -pub const SIZE: usize = PRIME_BITS_SIZE * 4 / 8; - -#[derive(Debug, Error)] -pub enum CryptoError { - #[error("Invalid ciphertext for this Public Key")] - InvalidCiphertext, - #[error("Invalid operation")] - InvalidOperation, - #[error("Invalid plaintext")] - InvalidPlaintext, - #[error("Invalid decrypted value")] - InvalidDecryptedValue, - #[error("Invalid bits size")] - InvalidBitsSize -} - -pub struct PrivateKey { - p: Integer, - q: Integer -} - -impl PrivateKey { - pub fn new(p: Integer, q: Integer) -> Result { - if p.significant_bits() != PRIME_BITS_SIZE as u32 || q.significant_bits() != PRIME_BITS_SIZE as u32 { - return Err(CryptoError::InvalidBitsSize) - } - - Ok( - Self { - p, - q - } - ) - } - - // n = p * q - pub fn get_public_key(&self) -> Result { - PublicKey::new((&self.p * &self.q).into()) - } - - pub fn expand(self) -> Result { - let n: Integer = (&self.p * &self.q).into(); - - // lambda = (p-1) * (q-1) - let p_minus: Integer = &self.p - Integer::from(1); - let q_minus: Integer = &self.q - Integer::from(1); - let lambda: Integer = p_minus * q_minus; - - // boost performance, use invert_ref instead of extended GCD - let mu: Integer = lambda.invert_ref(&n).unwrap().into(); - - Ok( - ExpandedPrivateKey { - key: self.get_public_key()?, - _inner: self, - lambda, - mu, - } - ) - } -} - -// Expanded private key -pub struct ExpandedPrivateKey { - _inner: PrivateKey, - key: PublicKey, - lambda: Integer, - mu: Integer, -} - -impl ExpandedPrivateKey { - pub fn decrypt(&self, ciphertext: Ciphertext) -> Result { - if !ciphertext.is_valid(&self.key) { - return Err(CryptoError::InvalidCiphertext) - } - - // c^lambda mod n^2 - let c_lambda: Integer = ciphertext.value.pow_mod_ref(&self.lambda, &self.key.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - - // L(x) = (x - 1) / n - let l_ = (c_lambda - Integer::from(1)) / &self.key.n; - // m = L(c^lambda mod n^2) * mu mod n - let plaintext = l_ * &self.mu % &self.key.n; - Ok(plaintext.to_u64().ok_or(CryptoError::InvalidDecryptedValue)?) - } - - pub fn get_public_key(&self) -> &PublicKey { - &self.key - } -} - -// Only necessary value is N, others are precomputed -pub struct PublicKey { - // the modulus (used for encryption) - n: Integer, - // n + 1 - g: Integer, - // n^2 - nn: Integer, -} - -impl PublicKey { - pub fn new(n: Integer) -> Result { - if n.significant_bits() != PRIME_BITS_SIZE as u32 * 2 { - return Err(CryptoError::InvalidCiphertext) - } - - Ok( - Self { - g: &n + Integer::from(1), - nn: n.square_ref().into(), - n, - } - ) - } - - // Generate r = 0 < r < n - pub fn generate_random_r(&self) -> Integer { - // Create a random number generator - let mut rng = RandState::new(); - // Generate a random number between 0 and n - let mut r: Integer = self.n.random_below_ref(&mut rng).into(); - // We want a positive value only - if r.is_zero() { - r += 1; - } - - r - } - - pub fn encrypt(&self, value: u64) -> Result { - let plaintext = Integer::from(value); - if plaintext >= self.n { - return Err(CryptoError::InvalidPlaintext) - } - - let r = self.generate_random_r(); - // c = g^value * r^n (mod n^2) - let c1: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - let c2: Integer = r.pow_mod(&self.n, &self.nn) - .map_err(|_| CryptoError::InvalidOperation)?; - - let mul: Integer = c1 * c2; - Ok(Ciphertext::new(mul % &self.nn)) - } - - pub fn blind(&self, c1: Ciphertext) -> Result { - let r = self.generate_random_r(); - self.blind_with(c1, r) - } - - pub fn blind_with(&self, c1: Ciphertext, r: Integer) -> Result { - if !c1.is_valid(self) { - return Err(CryptoError::InvalidCiphertext) - } - - // C = c * r^n mod n^2 - let c2: Integer = r.pow_mod(&self.n, &self.nn) - .map_err(|_| CryptoError::InvalidOperation)?; - - let mul: Integer = c1.value * c2; - Ok(Ciphertext::new(mul % &self.nn)) - } - - // C1 * C2 mod n^2 - pub fn add(&self, c1: &Ciphertext, c2: &Ciphertext) -> Result { - if (!c1.is_valid(self)) || (!c2.is_valid(self)) { - return Err(CryptoError::InvalidCiphertext) - } - - let mul: Integer = (&c1.value * &c2.value).into(); - Ok(Ciphertext::new(mul % &self.nn)) - } - - // C1 * g^value mod N^2 - pub fn add_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { - if !c1.is_valid(self) { - return Err(CryptoError::InvalidCiphertext) - } - - let plaintext = Integer::from(value); - let c2: Integer = self.g.pow_mod_ref(&plaintext, &self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - - let mul: Integer = (&c1.value * c2).into(); - Ok(Ciphertext::new(mul % &self.nn)) - } - - pub fn sub(&self, c1: &Ciphertext, c2: &Ciphertext) -> Result { - if !c1.is_valid(self) || !c2.is_valid(self) { - return Err(CryptoError::InvalidCiphertext) - } - - // Instead of searching bezout coefficients and GCD we can just invert the value - let negative_c2: Integer = c2.value.invert_ref(&self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - - Ok(Ciphertext::new(&c1.value * negative_c2 % &self.nn)) - } - - pub fn mul_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { - if !c1.is_valid(self) { - return Err(CryptoError::InvalidCiphertext) - } - - let plaintext = Integer::from(value); - let mul = c1.value.pow_mod_ref(&plaintext, &self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - - Ok(Ciphertext::new(mul)) - } - - pub fn div_plaintext(&self, c1: &Ciphertext, value: u64) -> Result { - if !c1.is_valid(self) { - return Err(CryptoError::InvalidCiphertext) - } - - let plaintext = Integer::from(value); - let inverse = plaintext.invert(&self.nn).map_err(|_| CryptoError::InvalidOperation)?; - - let mul = c1.value.pow_mod_ref(&inverse, &self.nn) - .ok_or(CryptoError::InvalidOperation)? - .into(); - - Ok(Ciphertext::new(mul)) - } -} - -// Represents an encrypted value -pub struct Ciphertext { - value: Integer, -} - -impl Ciphertext { - pub fn new(value: Integer) -> Ciphertext { - Self { - value, - } - } - - // 0 < C < n^2 - pub fn is_valid(&self, key: &PublicKey) -> bool { - key.nn > self.value && self.value > Integer::from(0) - } -} - -mod tests { - use std::str::FromStr; - use super::*; - - fn _generate_private_key() -> ExpandedPrivateKey { - let p = Integer::from_str("26946565058508556335703057678479193452304038415320320612739026385225298610008864186185248157667939692602914497266158802716790474833947772826137352516209983737629258254217925182069688200921824682629208537057830159202300700254744398401385317004557290421622059016544387100633064484394429299712612387988787656113893086893594807335060378763142902668584121938589954668585758578121584153647867617579207136469100271575899315110489594116527521092010000583127405316221856395802750870474485516597674185947739156275281462539159055254987599109169478119201211066791295912114221003467197211019730323321923834862781706821839382425319").unwrap(); - let q = Integer::from_str("30285103848165032371432135057580005479137385975250075866315362110663210942596615960809988401619020086330330323690859032150264976037456961162655919684888298622597867407709454379915077961482177205641007860316172930122789053649106796228331050588480104621044323245329249654789956970860084725229793041508008076837900555099704375472732833392770407190572998528495204954650991713220053319696501576522725356507569592271456467055934422479932228786490254699513808991388789871837682571567374631101622153747215563532592329904419750104317088696095242472742008866975771374389004813336895149595148338528131027712001071213942813066383").unwrap(); - - PrivateKey::new(p, q).unwrap().expand().unwrap() - } - - #[test] - fn test_generate_private_key() { - let key = _generate_private_key(); - assert!(key._inner.p != key._inner.q); - assert!(key.key.n.significant_bits() == key._inner.p.significant_bits() + key._inner.q.significant_bits()); - assert!(key._inner.p * key._inner.q == key.key.n); - } - - #[test] - fn test_public_key_gcd_g_n_equal_one() { - let private_key = _generate_private_key(); - let public_key = private_key.get_public_key(); - let gcd: Integer = public_key.g.gcd_ref(&public_key.n).into(); - assert!(gcd == 1); - } - - #[test] - fn test_encrypt_decrypt() { - let key = _generate_private_key(); - let value = 10u64; - let ciphertext = key.get_public_key().encrypt(value).unwrap(); - let decrypted = key.decrypt(ciphertext).unwrap(); - assert_eq!(value, decrypted); - } - - #[test] - fn test_blind() { - let key = _generate_private_key(); - let value = 10u64; - - let ciphertext = key.get_public_key().encrypt(value).unwrap(); - let blinded = key.get_public_key().blind(ciphertext).unwrap(); - - let decrypted = key.decrypt(blinded).unwrap(); - assert_eq!(value, decrypted); - } - - #[test] - fn test_homomorphic_add() { - let key = _generate_private_key(); - let left = 2500u64; - let right = 500u64; - - let c1 = key.get_public_key().encrypt(left).unwrap(); - let c2 = key.get_public_key().encrypt(right).unwrap(); - - let sum = key.get_public_key().add(&c1, &c2).unwrap(); - let decrypted = key.decrypt(sum).unwrap(); - assert_eq!(left + right, decrypted); - } - - #[test] - fn test_homomorphic_add_plaintext() { - let key = _generate_private_key(); - let left = 2500u64; - let right = 500u64; - - let c1 = key.get_public_key().encrypt(left).unwrap(); - - let sum = key.get_public_key().add_plaintext(&c1, right).unwrap(); - let decrypted = key.decrypt(sum).unwrap(); - assert_eq!(left + right, decrypted); - } - - #[test] - fn test_homomorphic_sub() { - let key = _generate_private_key(); - let left = 2500u64; - let right = 500u64; - - let c1 = key.get_public_key().encrypt(left).unwrap(); - let c2 = key.get_public_key().encrypt(right).unwrap(); - - let sum = key.get_public_key().sub(&c1, &c2).unwrap(); - let decrypted = key.decrypt(sum).unwrap(); - assert_eq!(left - right, decrypted); - } - - #[test] - fn test_homomorphic_mul() { - let key = _generate_private_key(); - let left = 2500u64; - let right = 17u64; - - let c1 = key.get_public_key().encrypt(left).unwrap(); - - let sum = key.get_public_key().mul_plaintext(&c1, right).unwrap(); - let decrypted = key.decrypt(sum).unwrap(); - assert_eq!(left * right, decrypted); - } - - #[test] - fn test_homomorphic_div() { - let key = _generate_private_key(); - let left = 2500u64; - let right = 10u64; - - let c1 = key.get_public_key().encrypt(left).unwrap(); - - let sum = key.get_public_key().div_plaintext(&c1, right).unwrap(); - let decrypted = key.decrypt(sum).unwrap(); - assert_eq!(left / right, decrypted); - } -} \ No newline at end of file From d1b5e65fa9529e5c145aea44957c2ccaddb37afa Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 19:43:37 +0200 Subject: [PATCH 021/160] daemon: improve log --- xelis_daemon/src/p2p/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index e182f047..5676071f 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1538,7 +1538,7 @@ impl P2pServer { let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); // verify that the topoheight asked is above the PRUNE_SAFETY_LIMIT if topoheight < PRUNE_SAFETY_LIMIT || pruned_topoheight + PRUNE_SAFETY_LIMIT > topoheight || our_topoheight < PRUNE_SAFETY_LIMIT { - warn!("Invalid begin topoheight (received {}, our is {}) received from {}", topoheight, our_topoheight, peer); + warn!("Invalid begin topoheight (received {}, our is {}, pruned: {}) received from {}", topoheight, our_topoheight, pruned_topoheight, peer); return Err(P2pError::InvalidPacket.into()) } } From d807a2f1a9e89fb3e307784fe384d9664f6568b1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 19:59:13 +0200 Subject: [PATCH 022/160] daemon: add check for fast sync requests --- xelis_daemon/src/p2p/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 5676071f..81dacd01 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1537,7 +1537,7 @@ impl P2pServer { let our_topoheight = self.blockchain.get_topo_height(); let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); // verify that the topoheight asked is above the PRUNE_SAFETY_LIMIT - if topoheight < PRUNE_SAFETY_LIMIT || pruned_topoheight + PRUNE_SAFETY_LIMIT > topoheight || our_topoheight < PRUNE_SAFETY_LIMIT { + if pruned_topoheight >= topoheight || topoheight > our_topoheight || topoheight < PRUNE_SAFETY_LIMIT || our_topoheight < PRUNE_SAFETY_LIMIT { warn!("Invalid begin topoheight (received {}, our is {}, pruned: {}) received from {}", topoheight, our_topoheight, pruned_topoheight, peer); return Err(P2pError::InvalidPacket.into()) } @@ -1551,6 +1551,11 @@ impl P2pServer { StepResponse::ChainInfo(stable_topo, height, hash) }, StepRequest::Assets(min, max, page) => { + if min > max { + warn!("Invalid range for assets"); + return Err(P2pError::InvalidPacket.into()) + } + let page = page.unwrap_or(0); let assets = storage.get_partial_assets(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE, min, max).await?; let page = if assets.len() == MAX_ITEMS_PER_PAGE { @@ -1573,6 +1578,11 @@ impl P2pServer { StepResponse::Nonces(nonces) }, StepRequest::Keys(min, max, page) => { + if min > max { + warn!("Invalid range for assets"); + return Err(P2pError::InvalidPacket.into()) + } + let page = page.unwrap_or(0); let keys = storage.get_partial_keys(MAX_ITEMS_PER_PAGE, page as usize * MAX_ITEMS_PER_PAGE, min, max).await?; let page = if keys.len() == MAX_ITEMS_PER_PAGE { From ab5708f285cac1e06ad0795ed1b589065d2c8910 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 20:36:44 +0200 Subject: [PATCH 023/160] daemon: fix fast sync block metadata --- xelis_daemon/src/p2p/mod.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 81dacd01..bda75d63 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1533,11 +1533,16 @@ impl P2pServer { debug!("Handle bootstrap chain request {:?} from {}", request_kind, peer); let storage = self.blockchain.get_storage().read().await; + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); if let Some(topoheight) = request.get_requested_topoheight() { let our_topoheight = self.blockchain.get_topo_height(); - let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); // verify that the topoheight asked is above the PRUNE_SAFETY_LIMIT - if pruned_topoheight >= topoheight || topoheight > our_topoheight || topoheight < PRUNE_SAFETY_LIMIT || our_topoheight < PRUNE_SAFETY_LIMIT { + // TODO check that the block is stable + if + pruned_topoheight >= topoheight + || topoheight > our_topoheight + || topoheight < PRUNE_SAFETY_LIMIT + { warn!("Invalid begin topoheight (received {}, our is {}, pruned: {}) received from {}", topoheight, our_topoheight, pruned_topoheight, peer); return Err(P2pError::InvalidPacket.into()) } @@ -1594,8 +1599,14 @@ impl P2pServer { }, StepRequest::BlocksMetadata(topoheight) => { let mut blocks = Vec::with_capacity(PRUNE_SAFETY_LIMIT as usize); - // go until the requested stable topoheight - for topoheight in (topoheight-PRUNE_SAFETY_LIMIT..=topoheight).rev() { + // go from the lowest available point until the requested stable topoheight + let lower = if topoheight - PRUNE_SAFETY_LIMIT <= pruned_topoheight { + pruned_topoheight + 1 + } else { + topoheight - PRUNE_SAFETY_LIMIT + }; + + for topoheight in (lower..=topoheight).rev() { let hash = storage.get_hash_at_topo_height(topoheight).await?; let supply = storage.get_supply_for_block_hash(&hash)?; let reward = storage.get_block_reward(&hash)?; From 5d0faab06db3fcd6407d49970d7174a1827f74d3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 20:52:53 +0200 Subject: [PATCH 024/160] daemon: request node inventory at new connection --- xelis_daemon/src/p2p/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index bda75d63..8da6e186 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -317,7 +317,7 @@ impl P2pServer { peer_list.add_peer(peer_id, peer) }; - self.handle_connection(peer.clone()).await + self.handle_connection(peer).await } // Connect to a specific peer address @@ -655,6 +655,7 @@ impl P2pServer { // task for reading from peer { let zelf = Arc::clone(&self); + let peer = Arc::clone(&peer); tokio::spawn(async move { if let Err(e) = zelf.handle_connection_read_side(&peer, write_task).await { debug!("Error while running read part from peer {}: {}", peer, e); @@ -667,6 +668,11 @@ impl P2pServer { }); } + // Request its inventory to be sure to be synced + if let Err(e) = self.request_inventory_of(&peer).await { + warn!("Error while requesting inventory of {}: {}", peer, e); + } + Ok(()) } @@ -1118,7 +1124,6 @@ impl P2pServer { }); } - // request the next page if next_page.is_some() { trace!("Requesting next page of inventory from {}", peer); @@ -1127,7 +1132,6 @@ impl P2pServer { peer.set_requested_inventory(true); peer.send_packet(Packet::NotifyInventoryRequest(PacketWrapper::new(packet, ping))).await?; } - }, Packet::BootstrapChainRequest(request) => { self.handle_bootstrap_chain_request(peer, request.step()).await?; From 1a490df44bd1966d1ee9868ab7ca0abd92f197a2 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 21:11:08 +0200 Subject: [PATCH 025/160] daemon: support zero nonces for txs --- xelis_daemon/src/core/blockchain.rs | 5 ++++- xelis_daemon/src/rpc/getwork_server.rs | 6 +++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index cebd30de..53e79d89 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1042,10 +1042,13 @@ impl Blockchain { let transaction = sorted_tx.get_tx(); let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { *nonce - } else { + } else if storage.has_nonce(transaction.get_owner()).await? { let (_, version) = storage.get_last_nonce(transaction.get_owner()).await?; nonces.insert(transaction.get_owner(), version.get_nonce()); version.get_nonce() + } else { // This sender has no nonce, we start at 0 + nonces.insert(transaction.get_owner(), 0); + 0 }; if account_nonce < transaction.get_nonce() { diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index d4ea7d9f..48d138eb 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -209,7 +209,11 @@ impl GetWorkServer { // notify the new miner so he can work ASAP let zelf = Arc::clone(&self); - tokio::spawn(zelf.send_new_job(addr, key)); + tokio::spawn(async move { + if let Err(e) = zelf.send_new_job(addr, key).await { + error!("Error while sending new job to miner: {}", e); + } + }); } pub async fn delete_miner(&self, addr: &Addr>) { From e2f7d3b6f1467447fe05f0b2079305198438f0fd Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 22:30:11 +0200 Subject: [PATCH 026/160] wallet: fix rescan --- xelis_wallet/src/wallet.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 0c45a305..466eb743 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -464,8 +464,12 @@ impl Wallet { storage.delete_top_block_hash()?; // balances will be re-fetched from daemon storage.delete_balances()?; - let nonce_result = network_handler.get_api().get_last_nonce(&self.get_address()).await?; - storage.set_nonce(nonce_result.version.get_nonce())?; + let nonce_result = network_handler.get_api() + .get_last_nonce(&self.get_address()).await + // User has no transactions/balances yet, set its nonce to 0 + .map(|v| v.version.get_nonce()).unwrap_or(0); + + storage.set_nonce(nonce_result)?; if topoheight == 0 { storage.delete_transactions()?; From a51bf36b0aae9c615c683c26b35fe98fbed52ac0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 29 Sep 2023 22:59:05 +0200 Subject: [PATCH 027/160] daemon: add 'first_seen' for mempool txs in TransactionResponse --- xelis_common/src/api/daemon.rs | 4 ++++ xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/core/mempool.rs | 7 +++++++ xelis_daemon/src/rpc/rpc.rs | 18 ++++++++---------- 4 files changed, 20 insertions(+), 11 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index a4662196..e5d6f8a4 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -174,6 +174,10 @@ pub struct TransactionResponse<'a, T: Clone> { pub executed_in_block: Option, // if it is in mempool pub in_mempool: bool, + // if its a mempool tx, we add the timestamp when it was added + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(default)] + pub first_seen: Option, #[serde(flatten)] pub data: DataHash<'a, T> } diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 53e79d89..2af9494a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1050,7 +1050,7 @@ impl Blockchain { nonces.insert(transaction.get_owner(), 0); 0 }; - + if account_nonce < transaction.get_nonce() { trace!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); } else if account_nonce == transaction.get_nonce() { diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index 98098b0e..94b76563 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -3,6 +3,7 @@ use std::cmp::Reverse; use std::collections::{HashMap, BTreeMap, HashSet}; use std::sync::Arc; use log::{trace, debug}; +use xelis_common::utils::get_current_time; use xelis_common::{ crypto::{ hash::Hash, @@ -15,6 +16,7 @@ use xelis_common::{ #[derive(serde::Serialize)] pub struct SortedTx { tx: Arc, + first_seen: u64, // timestamp when the tx was added size: usize } @@ -80,6 +82,7 @@ impl Mempool { let sorted_tx = SortedTx { size: tx.size(), + first_seen: get_current_time(), tx }; @@ -221,6 +224,10 @@ impl SortedTx { self.size } + pub fn get_first_seen(&self) -> u64 { + self.first_seen + } + pub fn consume(self) -> Arc { self.tx } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 0d1947cc..18654208 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -84,7 +84,7 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, Ok(value) } -pub async fn get_transaction_response(storage: &S, tx: &Arc, hash: &Hash, in_mempool: bool) -> Result { +pub async fn get_transaction_response(storage: &S, tx: &Arc, hash: &Hash, in_mempool: bool, first_seen: Option) -> Result { let blocks = if storage.has_tx_blocks(hash).context("Error while checking if tx in included in blocks")? { Some(storage.get_blocks_for_tx(hash).context("Error while retrieving in which blocks its included")?) } else { @@ -93,20 +93,18 @@ pub async fn get_transaction_response(storage: &S, tx: &Arc> = DataHash { hash: Cow::Borrowed(&hash), data: Cow::Borrowed(tx) }; let executed_in_block = storage.get_block_executer_for_tx(hash).ok(); - Ok(json!(TransactionResponse { blocks, executed_in_block, data, in_mempool })) + Ok(json!(TransactionResponse { blocks, executed_in_block, data, in_mempool, first_seen })) } // first check on disk, then check in mempool pub async fn get_transaction_response_for_hash(storage: &S, mempool: &Mempool, hash: &Hash) -> Result { - let (transaction, in_mempool) = match storage.get_transaction(hash).await { - Ok(tx) => (tx, false), + match storage.get_transaction(hash).await { + Ok(tx) => get_transaction_response(storage, &tx, hash, false, None).await, Err(_) => { - let tx = mempool.get_tx(hash).context("Error while retrieving transaction from disk and mempool")?; - (tx, true) + let tx = mempool.get_sorted_tx(hash).context("Error while retrieving transaction from disk and mempool")?; + get_transaction_response(storage, &tx.get_tx(), hash, true, Some(tx.get_first_seen())).await } - }; - - get_transaction_response(storage, &transaction, hash, in_mempool).await + } } pub fn register_methods(handler: &mut RPCHandler>>) { @@ -389,7 +387,7 @@ async fn get_mempool(blockchain: Arc>, body: Value) -> let storage = blockchain.get_storage().read().await; let mut transactions: Vec = Vec::new(); for (hash, sorted_tx) in mempool.get_txs() { - transactions.push(get_transaction_response(&*storage, sorted_tx.get_tx(), hash, true).await?); + transactions.push(get_transaction_response(&*storage, sorted_tx.get_tx(), hash, true, Some(sorted_tx.get_first_seen())).await?); } Ok(json!(transactions)) From aff43f03da996b6282c183add4d9b7a927c21804 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 30 Sep 2023 02:27:48 +0200 Subject: [PATCH 028/160] daemon: maybe I should take some rest? --- xelis_daemon/src/core/storage/sled.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 3ad6194c..96ba9a6e 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -648,7 +648,6 @@ impl Storage for SledStorage { fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError> { self.txs_executed.remove(tx.as_bytes())?; - self.remove_tx_executed(tx)?; Ok(()) } From 9f4d94a739e0dae0cd1a71954df80f8a608f4120 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 30 Sep 2023 14:16:18 +0200 Subject: [PATCH 029/160] daemon: better tx propagation --- xelis_daemon/src/core/blockchain.rs | 22 ++++--- xelis_daemon/src/core/storage/sled.rs | 1 - xelis_daemon/src/p2p/mod.rs | 79 ++++++++++++++---------- xelis_daemon/src/p2p/packet/inventory.rs | 8 ++- xelis_daemon/src/p2p/tracker.rs | 8 +-- 5 files changed, 68 insertions(+), 50 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 2af9494a..3e232d69 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1022,6 +1022,7 @@ impl Blockchain { // txs are sorted in descending order thanks to Reverse { + let mut balances = HashMap::new(); 'main: for (fee, hashes) in txs { let mut transactions: Vec<(&Arc, &SortedTx)> = Vec::with_capacity(hashes.len()); // prepare TXs by sorting them by nonce @@ -1054,12 +1055,17 @@ impl Blockchain { if account_nonce < transaction.get_nonce() { trace!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); } else if account_nonce == transaction.get_nonce() { - trace!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); - // TODO no clone - block.txs_hashes.push(hash.as_ref().clone()); - total_txs_size += sorted_tx.get_size(); - // we use unwrap because above we insert it - *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + // Check if it can be added to the block + if self.verify_transaction_with_hash(&storage, sorted_tx.get_tx(), hash, &mut balances, None, true).await.is_ok() { + trace!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); + // TODO no clone + block.txs_hashes.push(hash.as_ref().clone()); + total_txs_size += sorted_tx.get_size(); + // we use unwrap because above we insert it + *nonces.get_mut(transaction.get_owner()).unwrap() += 1; + } else { + warn!("This TX {} is not suitable for this block (nonce: {}, account nonce: {}, fees: {})", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); + } } else { warn!("This TX in mempool {} is in advance (nonce: {}, account nonce: {}, fees: {}), it should be removed from mempool", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); } @@ -1825,7 +1831,7 @@ impl Blockchain { if let Some(value) = balance.checked_sub(output.amount) { *balance = value; } else { - warn!("Overflow detected with transaction {}", hash); + warn!("Overflow detected with transaction transfer {}", hash); return Err(BlockchainError::Overflow) } } @@ -1850,7 +1856,7 @@ impl Blockchain { if let Some(value) = balance.checked_sub(*amount) { *balance = value; } else { - warn!("Overflow detected with transaction {}", hash); + warn!("Overflow detected with transaction burn {}", hash); return Err(BlockchainError::Overflow) } }, diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 96ba9a6e..58b55c06 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -648,7 +648,6 @@ impl Storage for SledStorage { fn remove_tx_executed(&mut self, tx: &Hash) -> Result<(), BlockchainError> { self.txs_executed.remove(tx.as_bytes())?; - Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 8da6e186..52a32f2c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -14,7 +14,7 @@ use xelis_common::{ block::{BlockHeader, Block}, utils::get_current_time, immutable::Immutable, account::VersionedNonce }; -use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}, tracker::WaiterResponse}}; +use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}}}; use crate::core::error::BlockchainError; use crate::p2p::connection::ConnectionMessage; use crate::p2p::packet::chain::CommonPoint; @@ -668,18 +668,13 @@ impl P2pServer { }); } - // Request its inventory to be sure to be synced - if let Err(e) = self.request_inventory_of(&peer).await { - warn!("Error while requesting inventory of {}: {}", peer, e); - } - - Ok(()) - } - - async fn handle_transaction_propagation_response(self: Arc, waiter: WaiterResponse) -> Result<(), P2pError> { - let response = waiter.await??; - if let OwnedObjectResponse::Transaction(tx, hash) = response { - self.blockchain.add_tx_with_hash_to_mempool(tx, hash, true).await?; + // verify that we are synced with him to receive all TXs correctly + let our_topoheight = self.blockchain.get_topo_height(); + let peer_topoheight = peer.get_topoheight(); + if peer_topoheight == our_topoheight { + if let Err(e) = self.request_inventory_of(&peer).await { + warn!("Error while requesting inventory of {}: {}", peer, e); + } } Ok(()) @@ -708,14 +703,28 @@ impl P2pServer { ping.into_owned().update_peer(peer).await?; if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - let waiter: WaiterResponse = self.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash))?; let peer = Arc::clone(peer); let zelf = Arc::clone(&self); tokio::spawn(async move { - if let Err(e) = zelf.handle_transaction_propagation_response(waiter).await { - peer.increment_fail_count(); - error!("Error while handling transaction propagation response: {}", e); + let response = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { + Ok(response) => response, + Err(e) => { + error!("Error while requesting TX to {} using ObjectTracker: {}", peer, e); + peer.increment_fail_count(); + return; + } }; + + if let OwnedObjectResponse::Transaction(tx, hash) = response { + if let Err(e) = zelf.blockchain.add_tx_with_hash_to_mempool(tx, hash, true).await { + if let BlockchainError::TxAlreadyInMempool(_) = e { + debug!("TX is already in mempool finally, another peer was faster"); + } else { + error!("Error while adding new requested tx to mempool: {}", e); + peer.increment_fail_count(); + } + } + } }); } }, @@ -1024,21 +1033,22 @@ impl P2pServer { ping.into_owned().update_peer(peer).await?; let request = request.into_owned(); - let mut hashes = IndexSet::new(); + + let page_id = request.page().unwrap_or(0); + let skip = page_id as usize * NOTIFY_MAX_LEN; let mempool = self.blockchain.get_mempool().read().await; let nonces_cache = mempool.get_nonces_cache(); - let all_txs = nonces_cache.values().flat_map(|v| v.get_txs().values()).collect::>(); - let next_page = { - let page_id = request.page().unwrap_or(0); - let skip = page_id as usize * NOTIFY_MAX_LEN; + let all_txs = nonces_cache.values() + .flat_map(|v| v.get_txs().values()) + .skip(skip).take(NOTIFY_MAX_LEN) + .map(|tx| Cow::Borrowed(tx.as_ref())) + .collect::>(); - let all_txs_size = all_txs.len(); + let next_page = { + let all_txs_size = mempool.size(); if skip < all_txs_size { - for tx_hash in all_txs.into_iter().skip(skip).take(NOTIFY_MAX_LEN) { - hashes.insert(Cow::Borrowed(tx_hash.as_ref())); - } - let left = all_txs_size - (hashes.len() + skip); + let left = all_txs_size - (all_txs.len() + skip); if left > 0 { Some(page_id + 1) } else { @@ -1049,7 +1059,7 @@ impl P2pServer { } }; - let packet = NotifyInventoryResponse::new(next_page, Cow::Owned(hashes)); + let packet = NotifyInventoryResponse::new(next_page, Cow::Owned(all_txs)); peer.send_packet(Packet::NotifyInventoryResponse(packet)).await? }, Packet::NotifyInventoryResponse(inventory) => { @@ -1081,7 +1091,7 @@ impl P2pServer { let mempool = self.blockchain.get_mempool().read().await; let storage = self.blockchain.get_storage().read().await; for hash in txs.into_owned() { - if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? { + if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { missing_txs.push(hash.into_owned()); } } @@ -1089,22 +1099,23 @@ impl P2pServer { // second part is to retrieve all txs we don't have concurrently // we don't want to block the peer and others locks for too long so we do it in a separate task - { + if !missing_txs.is_empty() { let peer = Arc::clone(&peer); - let blockchain = Arc::clone(&self.blockchain); + let zelf = Arc::clone(&self); tokio::spawn(async move { for hash in missing_txs { - let response = match peer.request_blocking_object(ObjectRequest::Transaction(hash)).await { + let response = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { Ok(response) => response, Err(e) => { - error!("Error while retrieving tx from {} inventory: {}", peer, e); + error!("Error while requesting TX to {} using ObjectTracker: {}", peer, e); peer.increment_fail_count(); return; } }; if let OwnedObjectResponse::Transaction(tx, hash) = response { - if let Err(e) = blockchain.add_tx_with_hash_to_mempool(tx, hash, false).await { + debug!("Received {} with nonce {}", hash, tx.get_nonce()); + if let Err(e) = zelf.blockchain.add_tx_with_hash_to_mempool(tx, hash, false).await { match e { BlockchainError::TxAlreadyInMempool(hash) | BlockchainError::TxAlreadyInBlockchain(hash) => { // ignore because maybe another peer send us this same tx diff --git a/xelis_daemon/src/p2p/packet/inventory.rs b/xelis_daemon/src/p2p/packet/inventory.rs index bf5e5269..da953568 100644 --- a/xelis_daemon/src/p2p/packet/inventory.rs +++ b/xelis_daemon/src/p2p/packet/inventory.rs @@ -1,9 +1,10 @@ use std::borrow::Cow; use indexmap::IndexSet; +use log::debug; use xelis_common::{crypto::hash::Hash, serializer::{Serializer, ReaderError, Reader, Writer}}; -pub const NOTIFY_MAX_LEN: usize = 1024; // 1024 * 32 bytes = 32KB +pub const NOTIFY_MAX_LEN: usize = 16384; // 16384 * 32 bytes = 512 KiB #[derive(Debug, Clone)] pub struct NotifyInventoryRequest { @@ -66,7 +67,10 @@ impl<'a> Serializer for NotifyInventoryResponse<'a> { let mut txs = IndexSet::with_capacity(count as usize); for _ in 0..count { - txs.insert(Cow::Owned(reader.read_hash()?)); + if !txs.insert(Cow::Owned(reader.read_hash()?)) { + debug!("Duplicate transaction in NotifyInventoryResponse"); + return Err(ReaderError::InvalidValue) + } } Ok(Self::new(next, Cow::Owned(txs))) } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 4e98800c..6cbab342 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -9,9 +9,7 @@ use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, erro pub type SharedObjectTracker = Arc; -pub type WaiterResponse = oneshot::Receiver>; - -// this sender allows to create a queue system in one task only +// this ObjectTracker is a unique sender allows to create a queue system in one task only // currently used to fetch in order all txs propagated by the network pub struct ObjectTracker { request_sender: UnboundedSender, @@ -106,11 +104,11 @@ impl ObjectTracker { Ok(()) } - pub fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { let (sender, receiver) = oneshot::channel(); self.request_sender.send(Message::Request(peer, request, sender))?; - Ok(receiver) + Ok(receiver.await??) } async fn request_object_from_peer_internal(&self, peer: &Peer, request: ObjectRequest) -> Result<(), P2pError> { From 164e6979623e015dda947ccd91930d248c5138be Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 01:25:34 +0200 Subject: [PATCH 030/160] daemon: better tx propagation --- xelis_daemon/src/core/blockchain.rs | 6 +-- xelis_daemon/src/p2p/mod.rs | 80 ++++++++++++----------------- xelis_daemon/src/p2p/queue.rs | 48 +++++++++++++++++ xelis_daemon/src/p2p/tracker.rs | 8 ++- 4 files changed, 87 insertions(+), 55 deletions(-) create mode 100644 xelis_daemon/src/p2p/queue.rs diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3e232d69..f3a107f7 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -952,11 +952,7 @@ impl Blockchain { if broadcast { // P2p broadcast to others peers if let Some(p2p) = self.p2p.lock().await.as_ref() { - let p2p = Arc::clone(p2p); - let hash = hash.clone(); - tokio::spawn(async move { - p2p.broadcast_tx_hash(&hash).await; - }); + p2p.broadcast_tx_hash(&storage, hash.clone()).await; } // broadcast to websocket this tx diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 52a32f2c..8ff6ba12 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -5,6 +5,7 @@ pub mod packet; pub mod peer_list; pub mod chain_validator; mod tracker; +mod queue; use indexmap::IndexSet; use xelis_common::{ @@ -18,7 +19,7 @@ use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_valida use crate::core::error::BlockchainError; use crate::p2p::connection::ConnectionMessage; use crate::p2p::packet::chain::CommonPoint; -use self::{packet::chain::{BlockId, ChainRequest, ChainResponse}, tracker::{ObjectTracker, SharedObjectTracker}}; +use self::{packet::chain::{BlockId, ChainRequest, ChainResponse}, tracker::{ObjectTracker, SharedObjectTracker}, queue::QueuedFetcher}; use self::packet::object::{ObjectRequest, ObjectResponse, OwnedObjectResponse}; use self::peer_list::{SharedPeerList, PeerList}; use self::connection::{State, Connection}; @@ -59,8 +60,8 @@ pub struct P2pServer { verify_syncing_time_out: AtomicBool, // chain sync timeout check last_sync_request_sent: AtomicU64, // used to check if we are already syncing with one peer or not object_tracker: SharedObjectTracker, // used to requests objects to peers and avoid requesting the same object to multiple peers - broadcast_semaphore: Semaphore, // used to limit the number of broadcast messages - propagation_semaphore: Semaphore, // used to limit the number of block propagations common peers check + queued_fetcher: QueuedFetcher, // used to requests all propagated txs in one task only + block_propagation_semaphore: Semaphore, // used to limit the number of block propagations common peers check is_running: AtomicBool // used to check if the server is running or not in tasks } @@ -76,6 +77,10 @@ impl P2pServer { let addr: SocketAddr = bind_address.parse()?; // parse the bind address // create mspc channel let (connections_sender, receiver) = mpsc::unbounded_channel(); + + let object_tracker = ObjectTracker::new(); + let queued_fetcher = QueuedFetcher::new(Arc::clone(&blockchain), Arc::clone(&object_tracker)); + let server = Self { peer_id, tag, @@ -87,9 +92,9 @@ impl P2pServer { syncing: AtomicBool::new(false), verify_syncing_time_out: AtomicBool::new(false), last_sync_request_sent: AtomicU64::new(0), - object_tracker: ObjectTracker::new(), - broadcast_semaphore: Semaphore::new(1), - propagation_semaphore: Semaphore::new(1), + object_tracker, + queued_fetcher, + block_propagation_semaphore: Semaphore::new(1), is_running: AtomicBool::new(true) }; @@ -354,9 +359,8 @@ impl P2pServer { // build a ping packet with the current state of the blockchain // if a peer is given, we will check and update the peers list - async fn build_generic_ping_packet(&self) -> Ping<'_> { + async fn build_generic_ping_packet_with_storage(&self, storage: &S) -> Ping<'_> { let (cumulative_difficulty, block_top_hash, pruned_topoheight) = { - let storage = self.blockchain.get_storage().read().await; let pruned_topoheight = match storage.get_pruned_topoheight() { Ok(pruned_topoheight) => pruned_topoheight, Err(e) => { @@ -379,6 +383,11 @@ impl P2pServer { Ping::new(Cow::Owned(block_top_hash), highest_topo_height, highest_height, pruned_topoheight, cumulative_difficulty, new_peers) } + async fn build_generic_ping_packet(&self) -> Ping<'_> { + let storage = self.blockchain.get_storage().read().await; + self.build_generic_ping_packet_with_storage(&*storage).await + } + // select a random peer which is greater than us to sync chain // candidate peer should have a greater topoheight or a higher block height than us // if we are not in fast sync mode, we must verify its pruned topoheight to be sure @@ -694,38 +703,18 @@ impl P2pServer { // peer should not send us twice the same transaction debug!("Received tx hash {} from {}", hash, peer); - let mut txs_cache = peer.get_txs_cache().lock().await; - if txs_cache.contains(&hash) { - warn!("{} send us a transaction ({}) already tracked by him", peer, hash); - return Err(P2pError::InvalidProtocolRules) + { + let mut txs_cache = peer.get_txs_cache().lock().await; + if txs_cache.contains(&hash) { + warn!("{} send us a transaction ({}) already tracked by him", peer, hash); + return Err(P2pError::InvalidProtocolRules) + } + txs_cache.put(hash.clone(), ()); } - txs_cache.put(hash.clone(), ()); ping.into_owned().update_peer(peer).await?; if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - let peer = Arc::clone(peer); - let zelf = Arc::clone(&self); - tokio::spawn(async move { - let response = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { - Ok(response) => response, - Err(e) => { - error!("Error while requesting TX to {} using ObjectTracker: {}", peer, e); - peer.increment_fail_count(); - return; - } - }; - - if let OwnedObjectResponse::Transaction(tx, hash) = response { - if let Err(e) = zelf.blockchain.add_tx_with_hash_to_mempool(tx, hash, true).await { - if let BlockchainError::TxAlreadyInMempool(_) = e { - debug!("TX is already in mempool finally, another peer was faster"); - } else { - error!("Error while adding new requested tx to mempool: {}", e); - peer.increment_fail_count(); - } - } - } - }); + self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)); } }, Packet::BlockPropagation(packet_wrapper) => { @@ -767,7 +756,7 @@ impl P2pServer { // because we track peerlist of each peers, we can try to determinate it { // semaphore allows to prevent any deadlock because of loop lock - let _permit = self.propagation_semaphore.acquire().await?; + let _permit = self.block_propagation_semaphore.acquire().await?; let peer_list = self.peer_list.read().await; let peer_peers = peer.get_peers(false).lock().await; // iterate over all peers of this peer broadcaster @@ -1104,7 +1093,7 @@ impl P2pServer { let zelf = Arc::clone(&self); tokio::spawn(async move { for hash in missing_txs { - let response = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { + let response = match zelf.object_tracker.fetch_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { Ok(response) => response, Err(e) => { error!("Error while requesting TX to {} using ObjectTracker: {}", peer, e); @@ -1464,16 +1453,11 @@ impl P2pServer { &self.peer_list } - pub async fn broadcast_tx_hash(&self, tx: &Hash) { - if let Err(e) = self.broadcast_semaphore.acquire().await { - error!("Error while acquiring semaphore: {}", e); - return; - } - - debug!("Broadcasting tx hash {}", tx); - let ping = self.build_generic_ping_packet().await; + pub async fn broadcast_tx_hash(&self, storage: &S, tx: Hash) { + info!("Broadcasting tx hash {}", tx); + let ping = self.build_generic_ping_packet_with_storage(storage).await; let current_topoheight = ping.get_topoheight(); - let packet = Packet::TransactionPropagation(PacketWrapper::new(Cow::Borrowed(tx), Cow::Owned(ping))); + let packet = Packet::TransactionPropagation(PacketWrapper::new(Cow::Borrowed(&tx), Cow::Owned(ping))); // transform packet to bytes (so we don't need to transform it for each peer) let bytes = Bytes::from(packet.to_bytes()); let peer_list = self.peer_list.read().await; @@ -1484,7 +1468,7 @@ impl P2pServer { trace!("Peer {} is not too far from us, checking cache for tx hash {}", peer, tx); let mut txs_cache = peer.get_txs_cache().lock().await; // check that we didn't already send this tx to this peer or that he don't already have it - if !txs_cache.contains(tx) { + if !txs_cache.contains(&tx) { trace!("Broadcasting tx hash {} to {}", tx, peer); if let Err(e) = peer.send_bytes(bytes.clone()).await { error!("Error while broadcasting tx hash {} to {}: {}", tx, peer, e); diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs new file mode 100644 index 00000000..157d5244 --- /dev/null +++ b/xelis_daemon/src/p2p/queue.rs @@ -0,0 +1,48 @@ +use std::sync::Arc; +use log::{error, debug}; +use tokio::sync::mpsc::{UnboundedSender, unbounded_channel}; +use crate::core::{blockchain::Blockchain, storage::Storage}; +use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::SharedObjectTracker}; + +pub struct QueuedFetcher { + sender: UnboundedSender<(Arc, ObjectRequest)> +} + +impl QueuedFetcher { + pub fn new(blockchain: Arc>, tracker: SharedObjectTracker) -> Self { + let (sender, mut receiver) = unbounded_channel(); + let fetcher = Self { + sender + }; + + tokio::spawn(async move { + while let Some((peer, request)) = receiver.recv().await { + match tracker.fetch_object_from_peer(peer.clone(), request).await { + Ok(response) => { + if let OwnedObjectResponse::Transaction(tx, hash) = response { + debug!("Adding {} to mempool from {}", hash, peer); + if let Err(e) = blockchain.add_tx_to_mempool(tx, true).await { + error!("Error while adding tx {} to mempool: {}", hash, e); + peer.increment_fail_count(); + } + } else { + error!("Received non tx object from peer"); + peer.increment_fail_count(); + } + }, + Err(e) => { + error!("Error while fetching object from peer: {}", e); + } + }; + } + }); + + fetcher + } + + pub fn fetch(&self, peer: Arc, request: ObjectRequest) { + if let Err(e) = self.sender.send((peer, request)) { + error!("Error while sending get_data to fetcher: {}", e); + } + } +} \ No newline at end of file diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 6cbab342..333758fd 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -7,6 +7,7 @@ use log::{error, debug}; use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, error::P2pError, peer::Peer}; +pub type WaiterResponse = oneshot::Receiver>; pub type SharedObjectTracker = Arc; // this ObjectTracker is a unique sender allows to create a queue system in one task only @@ -104,11 +105,14 @@ impl ObjectTracker { Ok(()) } - pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + pub fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { let (sender, receiver) = oneshot::channel(); self.request_sender.send(Message::Request(peer, request, sender))?; + Ok(receiver) + } - Ok(receiver.await??) + pub async fn fetch_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + Ok(self.request_object_from_peer(peer, request)?.await??) } async fn request_object_from_peer_internal(&self, peer: &Peer, request: ObjectRequest) -> Result<(), P2pError> { From 557293585371fa53ed2cd10df17fd45b6a8a8d43 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 01:55:30 +0200 Subject: [PATCH 031/160] daemon: add 'get_accounts' RPC method --- API.md | 40 ++++++++++++++++++++++++++++- xelis_common/src/api/daemon.rs | 8 ++++++ xelis_daemon/src/rpc/rpc.rs | 46 +++++++++++++++++++++++++++++++++- 3 files changed, 92 insertions(+), 2 deletions(-) diff --git a/API.md b/API.md index f096ff45..02652b0c 100644 --- a/API.md +++ b/API.md @@ -1016,7 +1016,6 @@ Retrieve a specific range of blocks (up to 20 maximum) based on topoheight } ``` - #### Get Blocks Range By Height Retrieve a specific range of blocks (up to 20 maximum) based on height @@ -1110,4 +1109,43 @@ Retrieve a specific range of blocks (up to 20 maximum) based on height } ] } +``` + +#### Get Accounts +Retrieve a list of available accounts (each account returned had at least one interaction on-chain) + +##### Method `get_accounts` + +##### Parameters +| Name | Type | Required | Note | +|:------------------:|:-------:|:--------:|:-------------------------------------------------:| +| skip | Integer | Optional | How many accounts to skip | +| maximum | Integer | Optional | Maximum accounts to fetch (limited to 100) | +| minimum_topoheight | Integer | Optional | Minimum topoheight for first on-chain interaction | +| maximum_topoheight | Integer | Optional | Maximum topoheight for first on-chain interaction | + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "get_accounts", + "params": {} +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + "xet1qqq9rrdy6s2zy4yavp59094jzlm66n33vy0datvv900yls8pugvyvmqn46pvl", + "xet1qqqgpk6n5klceg9gg9tcw0xa8r3e7zd3gc5mzv2v4m48knxd0y9wadg3mdp9t", + "xet1qqqvpwf9qprl6hzysg0zycm3y56ygys32wukxnl7yezqc7ydudy3azcxq6nwv", + "xet1qqqvltq9dsmvdsvapr6y0742sv477766g9vpvp2expe5v7x7fadvftc9h2vyw", + "xet1qqqd9ur03xahtts6q00t8z8ya2gxm39qx43ljz32vmv8p7j9ccxn6zccrfnxp", + "xet1qqqd2jtz9f2u3z6uznpx8mqdkh6llt3yn3eg3a5tpsfn8jcsthufg5qmwwl2j" + ] +} ``` \ No newline at end of file diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index e5d6f8a4..85426325 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -190,6 +190,14 @@ pub struct GetAssetsParams { pub maximum_topoheight: Option } +#[derive(Serialize, Deserialize)] +pub struct GetAccountsParams { + pub skip: Option, + pub maximum: Option, + pub minimum_topoheight: Option, + pub maximum_topoheight: Option +} + #[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum NotifyEvent { // When a new block is accepted by chain diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 18654208..2f9ee53a 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -17,7 +17,7 @@ use xelis_common::{ GetTransactionParams, P2pStatusResult, GetBlocksAtHeightParams, - GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams, GetNonceResult, GetAssetsParams + GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams, GetNonceResult, GetAssetsParams, GetAccountsParams }, DataHash}, async_handler, serializer::Serializer, @@ -135,6 +135,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_blocks_range_by_topoheight", async_handler!(get_blocks_range_by_topoheight)); handler.register_method("get_blocks_range_by_height", async_handler!(get_blocks_range_by_height)); handler.register_method("get_transactions", async_handler!(get_transactions)); + handler.register_method("get_accounts", async_handler!(get_accounts)); } async fn version(_: Arc>, body: Value) -> Result { @@ -525,4 +526,47 @@ async fn get_transactions(blockchain: Arc>, body: Valu } Ok(json!(transactions)) +} + +const MAX_ACCOUNTS: usize = 100; +// retrieve all available accounts (each account got at least one interaction on chain) +async fn get_accounts(blockchain: Arc>, body: Value) -> Result { + let params: GetAccountsParams = parse_params(body)?; + + let topoheight = blockchain.get_topo_height(); + let maximum = if let Some(maximum) = params.maximum { + if maximum > MAX_ACCOUNTS { + return Err(InternalRpcError::InvalidRequest).context(format!("Maximum accounts requested cannot be greater than {}", MAX_ACCOUNTS))? + } + maximum + } else { + MAX_ACCOUNTS + }; + let skip = params.skip.unwrap_or(0); + let minimum_topoheight = if let Some(minimum) = params.minimum_topoheight { + if minimum > topoheight { + return Err(InternalRpcError::InvalidRequest).context(format!("Minimum topoheight requested cannot be greater than {}", topoheight))? + } + + minimum + } else { + 0 + }; + let maximum_topoheight = if let Some(maximum) = params.maximum_topoheight { + if maximum > topoheight { + return Err(InternalRpcError::InvalidRequest).context(format!("Maximum topoheight requested cannot be greater than {}", topoheight))? + } + + if maximum < minimum_topoheight { + return Err(InternalRpcError::InvalidRequest).context(format!("Maximum topoheight requested must be greater or equal to {}", minimum_topoheight))? + } + maximum + } else { + topoheight + }; + + let storage = blockchain.get_storage().read().await; + let accounts = storage.get_partial_keys(maximum, skip, minimum_topoheight, maximum_topoheight).await.context("Error while retrieving accounts")?; + + Ok(json!(accounts)) } \ No newline at end of file From 873fccca94c8a3c90f31c23b7130cdab050e815c Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 12:44:50 +0200 Subject: [PATCH 032/160] daemon: longer wait time between connections try --- xelis_daemon/src/p2p/peer_list.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 4147c1aa..7715e480 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -282,7 +282,7 @@ impl PeerList { // we check that we're not already connected to this peer and that we didn't tried to connect to it recently fn find_peer_to_connect_to_with_state(&mut self, current_time: u64, state: StoredPeerState) -> Option { for (addr, stored_peer) in &mut self.stored_peers { - if *stored_peer.get_state() == state && stored_peer.get_last_connection_try() + P2P_EXTEND_PEERLIST_DELAY <= current_time && Self::internal_get_peer_by_addr(&self.peers, addr).is_none() { + if *stored_peer.get_state() == state && stored_peer.get_last_connection_try() + (stored_peer.get_fail_count() as u64 * P2P_EXTEND_PEERLIST_DELAY) <= current_time && Self::internal_get_peer_by_addr(&self.peers, addr).is_none() { stored_peer.set_last_connection_try(current_time); return Some(addr.clone()); } @@ -294,6 +294,11 @@ impl PeerList { // increase the fail count of a peer pub fn increase_fail_count_for_saved_peer(&mut self, addr: &SocketAddr) { if let Some(stored_peer) = self.stored_peers.get_mut(addr) { + let fail_count = stored_peer.get_fail_count(); + if fail_count == u8::MAX { + // we reached the max value, we can't increase it anymore + return; + } stored_peer.set_fail_count(stored_peer.get_fail_count() + 1); } } From 4c24e90b4296fcf06e59b32600e7c5df6da304e9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 12:49:38 +0200 Subject: [PATCH 033/160] daemon: improve mempool, update API.md --- API.md | 1 + xelis_daemon/src/core/blockchain.rs | 83 ++++++++++++----------------- xelis_daemon/src/core/mempool.rs | 51 ++++-------------- 3 files changed, 45 insertions(+), 90 deletions(-) diff --git a/API.md b/API.md index 02652b0c..58e198dc 100644 --- a/API.md +++ b/API.md @@ -1113,6 +1113,7 @@ Retrieve a specific range of blocks (up to 20 maximum) based on height #### Get Accounts Retrieve a list of available accounts (each account returned had at least one interaction on-chain) +The topoheight range in parameters search for all accounts having a on-chain interaction in this inclusive range. ##### Method `get_accounts` diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index f3a107f7..a92308cd 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -3,7 +3,7 @@ use lru::LruCache; use serde_json::{Value, json}; use xelis_common::{ config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT, BLOCK_TIME_MILLIS}, - crypto::{key::PublicKey, hash::{Hashable, Hash}}, + crypto::{key::PublicKey, hash::{Hashable, Hash, HASH_SIZE}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, utils::{get_current_timestamp, format_coin}, @@ -12,7 +12,7 @@ use xelis_common::{ serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network }; use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; -use super::{storage::{Storage, DifficultyProvider}, mempool::SortedTx}; +use super::storage::{Storage, DifficultyProvider}; use std::{sync::atomic::{Ordering, AtomicU64}, collections::hash_map::Entry, time::{Duration, Instant}, borrow::Cow}; use std::collections::{HashMap, HashSet}; use async_recursion::async_recursion; @@ -1012,59 +1012,44 @@ impl Blockchain { let mut block = BlockHeader::new(self.get_version_at_height(height), height, get_current_timestamp(), sorted_tips, extra_nonce, address, Vec::new()); let mempool = self.mempool.read().await; - let txs = mempool.get_sorted_txs(); + + // get all availables txs and sort them by fee per size + let mut txs = mempool.get_txs() + .iter() + .map(|(hash, tx)| (tx.get_fee(), tx.get_size(), hash, tx.get_tx())) + .collect::>(); + txs.sort_by(|(a_fee, a_size, _, a_tx), (b_fee, b_size, _, b_tx)| { + // If its the same sender, check the nonce + if a_tx.get_owner() == b_tx.get_owner() { + // Increasing nonces (lower first) + return a_tx.get_nonce().cmp(&b_tx.get_nonce()) + } + + let a = a_fee * *a_size as u64; + let b = b_fee * *b_size as u64; + // Decreasing fees (higher first) + b.cmp(&a) + }); + let mut total_txs_size = 0; let mut nonces: HashMap<&PublicKey, u64> = HashMap::new(); - - // txs are sorted in descending order thanks to Reverse + let mut block_size = block.size(); { let mut balances = HashMap::new(); - 'main: for (fee, hashes) in txs { - let mut transactions: Vec<(&Arc, &SortedTx)> = Vec::with_capacity(hashes.len()); - // prepare TXs by sorting them by nonce - // only txs from same owner who have same fees or decreasing fees with increasing nonce will have - // all its txs in the same block - // maybe we can improve this to support all levels of fees - for hash in hashes { - let tx = mempool.get_sorted_tx(hash)?; - transactions.push((hash, tx)); + 'main: for (fee, size, hash, tx) in txs { + if block_size + total_txs_size + size >= MAX_BLOCK_SIZE { + break 'main; } - transactions.sort_by(|(_, a), (_, b)| a.get_tx().get_nonce().cmp(&b.get_tx().get_nonce())); - - for (hash, sorted_tx) in transactions { - if block.size() + total_txs_size + sorted_tx.get_size() >= MAX_BLOCK_SIZE { - break 'main; - } - let transaction = sorted_tx.get_tx(); - let account_nonce = if let Some(nonce) = nonces.get(transaction.get_owner()) { - *nonce - } else if storage.has_nonce(transaction.get_owner()).await? { - let (_, version) = storage.get_last_nonce(transaction.get_owner()).await?; - nonces.insert(transaction.get_owner(), version.get_nonce()); - version.get_nonce() - } else { // This sender has no nonce, we start at 0 - nonces.insert(transaction.get_owner(), 0); - 0 - }; - - if account_nonce < transaction.get_nonce() { - trace!("Skipping {} with {} fees because another TX should be selected first due to nonce", hash, format_coin(fee.0)); - } else if account_nonce == transaction.get_nonce() { - // Check if it can be added to the block - if self.verify_transaction_with_hash(&storage, sorted_tx.get_tx(), hash, &mut balances, None, true).await.is_ok() { - trace!("Selected {} (nonce: {}, account nonce: {}, fees: {}) for mining", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); - // TODO no clone - block.txs_hashes.push(hash.as_ref().clone()); - total_txs_size += sorted_tx.get_size(); - // we use unwrap because above we insert it - *nonces.get_mut(transaction.get_owner()).unwrap() += 1; - } else { - warn!("This TX {} is not suitable for this block (nonce: {}, account nonce: {}, fees: {})", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); - } - } else { - warn!("This TX in mempool {} is in advance (nonce: {}, account nonce: {}, fees: {}), it should be removed from mempool", hash, transaction.get_nonce(), account_nonce, format_coin(fee.0)); - } + // Check if the TX is valid for this potential block + if let Err(e) = self.verify_transaction_with_hash(&storage, tx, hash, &mut balances, Some(&mut nonces), false).await { + warn!("TX {} is not valid for mining: {}", hash, e); + } else { + trace!("Selected {} (nonce: {}, fees: {}) for mining", hash, tx.get_nonce(), format_coin(fee)); + // TODO no clone + block.txs_hashes.push(hash.as_ref().clone()); + block_size += HASH_SIZE; // add the hash size + total_txs_size += size; } } } diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index 94b76563..e9d5fee6 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -1,8 +1,7 @@ use super::error::BlockchainError; -use std::cmp::Reverse; -use std::collections::{HashMap, BTreeMap, HashSet}; +use std::collections::{HashMap, BTreeMap}; use std::sync::Arc; -use log::{trace, debug}; +use log::{trace, debug, warn}; use xelis_common::utils::get_current_time; use xelis_common::{ crypto::{ @@ -33,18 +32,14 @@ pub struct Mempool { // store all txs waiting to be included in a block txs: HashMap, SortedTx>, // store all sender's nonce for faster finding - nonces_cache: HashMap, - // binary tree map for sorted txs hash by fees - // keys represents fees, while value represents all txs hash - sorted_txs: BTreeMap, HashSet>>, + nonces_cache: HashMap } impl Mempool { pub fn new() -> Self { Mempool { txs: HashMap::new(), - nonces_cache: HashMap::new(), - sorted_txs: BTreeMap::new() + nonces_cache: HashMap::new() } } @@ -62,7 +57,9 @@ impl Mempool { if let Some(tx_hash) = cache.txs.remove(&nonce) { trace!("TX {} with same nonce found in cache, removing it from sorted txs", tx_hash); // remove the tx hash from sorted txs - Self::delete_tx(&mut self.txs, &mut self.sorted_txs, tx_hash); + if self.txs.remove(&tx_hash).is_none() { + warn!("TX {} not found in mempool while deleting collision with {}", tx_hash, hash); + } } } @@ -86,10 +83,6 @@ impl Mempool { tx }; - let entry = self.sorted_txs.entry(Reverse(sorted_tx.get_fee())).or_insert_with(HashSet::new); - // add the tx hash in sorted txs - entry.insert(hash.clone()); - // insert in map self.txs.insert(hash, sorted_tx); @@ -126,10 +119,6 @@ impl Mempool { &self.txs } - pub fn get_sorted_txs(&self) -> &BTreeMap, HashSet>> { - &self.sorted_txs - } - pub fn get_cached_nonce(&self, key: &PublicKey) -> Option<&NonceCache> { self.nonces_cache.get(key) } @@ -140,7 +129,6 @@ impl Mempool { pub fn clear(&mut self) { self.txs.clear(); - self.sorted_txs.clear(); self.nonces_cache.clear(); } @@ -177,7 +165,9 @@ impl Mempool { // now delete all necessary txs for hash in hashes { - Self::delete_tx(&mut self.txs, &mut self.sorted_txs, hash); + if self.txs.remove(&hash).is_none() { + warn!("TX {} not found in mempool while deleting", hash); + } } } } @@ -188,27 +178,6 @@ impl Mempool { } } } - - - fn delete_tx(txs: &mut HashMap, SortedTx>, sorted_txs: &mut BTreeMap, HashSet>>, hash: Arc) { - trace!("Trying to delete {}", hash); - if let Some(sorted_tx) = txs.remove(&hash) { - trace!("Deleted from HashMap: {}", hash); - let fee_reverse = Reverse(sorted_tx.get_fee()); - let mut is_empty = false; - if let Some(hashes) = sorted_txs.get_mut(&fee_reverse) { - trace!("Removing tx hash {} for fee entry {}", hash, fee_reverse.0); - hashes.remove(&hash); - is_empty = hashes.is_empty(); - } - - // don't keep empty data - if is_empty { - trace!("Removing empty fee ({}) entry", fee_reverse.0); - sorted_txs.remove(&fee_reverse); - } - } - } } impl SortedTx { From 88757521f2d5f367f34fa10be78181ef60192f46 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 12:51:04 +0200 Subject: [PATCH 034/160] update API.md --- API.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API.md b/API.md index 58e198dc..3a1e8bde 100644 --- a/API.md +++ b/API.md @@ -764,7 +764,7 @@ NOTE: result returned in `data` field can changes based on the TransactionType ( ], "executed_in_block": "0000073b071e04ce4e79b095f3c44f4aefb65f4e70f8a5591c986cb4b688d692", "data": { - "Transfer": [ + "transfers": [ { "amount": 15000, "asset": "0000000000000000000000000000000000000000000000000000000000000000", From 4202f37235aa779108aec6833b1784c2583188e9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 13:18:57 +0200 Subject: [PATCH 035/160] update API.md --- API.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/API.md b/API.md index 3a1e8bde..64d28989 100644 --- a/API.md +++ b/API.md @@ -811,7 +811,7 @@ No parameters "blocks": null, "executed_in_block": null, "data": { - "Transfer": [ + "transfers": [ { "amount": 1500, "asset": "0000000000000000000000000000000000000000000000000000000000000000", @@ -869,7 +869,7 @@ Fetch transactions by theirs hashes from database and mempool of daemon and keep ], "executed_in_block": "0000073b071e04ce4e79b095f3c44f4aefb65f4e70f8a5591c986cb4b688d692", "data": { - "Transfer": [ + "transfers": [ { "amount": 15000, "asset": "0000000000000000000000000000000000000000000000000000000000000000", From a456096e8688efcc5733a808e604ed44e8e52db9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 13:34:21 +0200 Subject: [PATCH 036/160] daemon: optimize mempool --- xelis_daemon/src/core/blockchain.rs | 5 +++- xelis_daemon/src/core/mempool.rs | 44 +++++++++++++++++++++-------- xelis_daemon/src/p2p/mod.rs | 2 +- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index a92308cd..92e9208b 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -926,7 +926,7 @@ impl Blockchain { } // compute balances of previous pending TXs - let txs_hashes = cache.get_txs().values(); + let txs_hashes = cache.get_txs(); let mut owner_txs = Vec::with_capacity(txs_hashes.len()); for hash in txs_hashes { let tx = mempool.get_tx(hash)?; @@ -936,9 +936,12 @@ impl Blockchain { // we need to do it in two times because of the constraint of lifetime on &tx let mut balances = HashMap::new(); let mut nonces = HashMap::new(); + // compute balances and nonces for tx in &owner_txs { self.verify_transaction_with_hash(storage, tx, &hash, &mut balances, Some(&mut nonces), false).await?; } + // Verify original TX + self.verify_transaction_with_hash(storage, &tx, &hash, &mut balances, Some(&mut nonces), false).await?; } else { let mut balances = HashMap::new(); self.verify_transaction_with_hash(&storage, &tx, &hash, &mut balances, None, false).await?; diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index e9d5fee6..9faf9594 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -1,6 +1,7 @@ use super::error::BlockchainError; -use std::collections::{HashMap, BTreeMap}; +use std::collections::HashMap; use std::sync::Arc; +use indexmap::IndexSet; use log::{trace, debug, warn}; use xelis_common::utils::get_current_time; use xelis_common::{ @@ -24,7 +25,7 @@ pub struct NonceCache { min: u64, max: u64, // all txs for this user ordered by nonce - txs: BTreeMap>, + txs: IndexSet>, } #[derive(serde::Serialize)] @@ -48,25 +49,34 @@ impl Mempool { let hash = Arc::new(hash); let nonce = tx.get_nonce(); // update the cache for this owner + let mut must_update = true; if let Some(cache) = self.nonces_cache.get_mut(tx.get_owner()) { // delete the TX if its in the range of already tracked nonces trace!("Cache found for owner {} with nonce range {}-{}, nonce = {}", tx.get_owner(), cache.get_min(), cache.get_max(), nonce); if nonce >= cache.get_min() && nonce <= cache.get_max() { trace!("nonce {} is in range {}-{}", nonce, cache.get_min(), cache.get_max()); // because it's based on order and we may have the same order - if let Some(tx_hash) = cache.txs.remove(&nonce) { + let index = ((nonce - cache.get_min()) % (cache.get_max() - cache.get_min())) as usize; + cache.txs.insert(hash.clone()); + must_update = false; + + if let Some(tx_hash) = cache.txs.swap_remove_index(index) { trace!("TX {} with same nonce found in cache, removing it from sorted txs", tx_hash); // remove the tx hash from sorted txs if self.txs.remove(&tx_hash).is_none() { warn!("TX {} not found in mempool while deleting collision with {}", tx_hash, hash); } + } else { + warn!("No TX found in cache for nonce {} while adding {}", nonce, hash); } } - cache.update(nonce, hash.clone()); + if must_update { + cache.update(nonce, hash.clone()); + } } else { - let mut txs = BTreeMap::new(); - txs.insert(nonce, hash.clone()); + let mut txs = IndexSet::new(); + txs.insert(hash.clone()); // init the cache let cache = NonceCache { @@ -152,10 +162,15 @@ impl Mempool { // filter all txs hashes which are not found // or where its nonce is smaller than the new nonce // TODO when drain_filter is stable, use it (allow to get all hashes deleted) - cache.txs.retain(|tx_nonce, tx| { - let delete = *tx_nonce < nonce; + cache.txs.retain(|hash| { + let delete = if let Some(tx) = self.txs.get(hash) { + tx.get_tx().get_nonce() < nonce + } else { + true + }; + if delete { - hashes.push(Arc::clone(tx)); + hashes.push(Arc::clone(hash)); } !delete }); @@ -211,13 +226,13 @@ impl NonceCache { self.max } - pub fn get_txs(&self) -> &BTreeMap> { + pub fn get_txs(&self) -> &IndexSet> { &self.txs } fn update(&mut self, nonce: u64, hash: Arc) { self.update_nonce_range(nonce); - self.txs.insert(nonce, hash); + self.txs.insert(hash); } fn update_nonce_range(&mut self, nonce: u64) { @@ -233,6 +248,11 @@ impl NonceCache { } pub fn has_tx_with_same_nonce(&self, nonce: u64) -> Option<&Arc> { - self.txs.get(&nonce) + if nonce < self.min || nonce > self.max { + return None; + } + + let index = ((nonce - self.min) % (self.max - self.min)) as usize; + self.txs.get_index(index) } } \ No newline at end of file diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 8ff6ba12..6ddb224e 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1029,7 +1029,7 @@ impl P2pServer { let mempool = self.blockchain.get_mempool().read().await; let nonces_cache = mempool.get_nonces_cache(); let all_txs = nonces_cache.values() - .flat_map(|v| v.get_txs().values()) + .flat_map(|v| v.get_txs()) .skip(skip).take(NOTIFY_MAX_LEN) .map(|tx| Cow::Borrowed(tx.as_ref())) .collect::>(); From 7e5a7c7a719108692c30741109b8bd8631397faa Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 1 Oct 2023 13:49:08 +0200 Subject: [PATCH 037/160] daemon: optimize mempool --- xelis_daemon/src/core/blockchain.rs | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 92e9208b..2488331c 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -924,23 +924,14 @@ impl Blockchain { debug!("TX {} nonce is not in the range of the pending TXs for this owner, received: {}, expected between {} and {}", hash, tx.get_nonce(), cache.get_min(), cache.get_max()); return Err(BlockchainError::InvalidTxNonceMempoolCache) } - - // compute balances of previous pending TXs - let txs_hashes = cache.get_txs(); - let mut owner_txs = Vec::with_capacity(txs_hashes.len()); - for hash in txs_hashes { - let tx = mempool.get_tx(hash)?; - owner_txs.push(tx); - } - // we need to do it in two times because of the constraint of lifetime on &tx let mut balances = HashMap::new(); let mut nonces = HashMap::new(); - // compute balances and nonces - for tx in &owner_txs { - self.verify_transaction_with_hash(storage, tx, &hash, &mut balances, Some(&mut nonces), false).await?; - } + // because we already verified the range of nonce + nonces.insert(tx.get_owner(), tx.get_nonce()); + // Verify original TX + // We may have double spending in balances, but it is ok because miner check that all txs included are valid self.verify_transaction_with_hash(storage, &tx, &hash, &mut balances, Some(&mut nonces), false).await?; } else { let mut balances = HashMap::new(); @@ -1873,7 +1864,7 @@ impl Blockchain { } // we increment it in case any new tx for same owner is following *nonce += 1; - } else { + } else { // We don't have any cache, compute using chain data // it is possible that a miner has balance but no nonces, so we need to check it let nonce = if storage.has_nonce(tx.get_owner()).await? { let (_, version) = storage.get_last_nonce(tx.get_owner()).await?; From 31a4a4a508bf7be2b1ae791a7ada412ea46d2bcc Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 2 Oct 2023 00:01:17 +0200 Subject: [PATCH 038/160] daemon: find common point in fast sync in case of necessary rewind --- xelis_daemon/src/core/blockchain.rs | 6 +- xelis_daemon/src/main.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 218 ++++++++++-------- .../src/p2p/packet/bootstrap_chain.rs | 40 +++- xelis_daemon/src/p2p/packet/chain.rs | 27 +-- 5 files changed, 161 insertions(+), 132 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 2488331c..5e4386cc 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1691,17 +1691,17 @@ impl Blockchain { Ok(false) } - pub async fn rewind_chain(&self, count: usize) -> Result { + pub async fn rewind_chain(&self, count: u64) -> Result { let mut storage = self.storage.write().await; self.rewind_chain_for_storage(&mut storage, count).await } - pub async fn rewind_chain_for_storage(&self, storage: &mut S, count: usize) -> Result { + pub async fn rewind_chain_for_storage(&self, storage: &mut S, count: u64) -> Result { trace!("rewind chain with count = {}", count); let current_height = self.get_height(); let current_topoheight = self.get_topo_height(); warn!("Rewind chain with count = {}, height = {}, topoheight = {}", count, current_height, current_topoheight); - let (new_height, new_topoheight, txs) = storage.pop_blocks(current_height, current_topoheight, count as u64).await?; + let (new_height, new_topoheight, txs) = storage.pop_blocks(current_height, current_topoheight, count).await?; debug!("New topoheight: {} (diff: {})", new_topoheight, current_topoheight - new_topoheight); { diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 55e6bc23..93b376fe 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -289,7 +289,7 @@ async fn pop_blocks(manager: &CommandManager>>, mu } info!("Trying to pop {} blocks from chain...", amount); - let topoheight = blockchain.rewind_chain(amount as usize).await.context("Error while rewinding chain")?; + let topoheight = blockchain.rewind_chain(amount).await.context("Error while rewinding chain")?; info!("Chain as been rewinded until topoheight {}", topoheight); Ok(()) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 6ddb224e..740308ca 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1172,6 +1172,22 @@ impl P2pServer { Ok(()) } + async fn find_common_point(&self, storage: &S, blocks: Vec) -> Result, BlockchainError> { + for block_id in blocks { // search a common point + trace!("Searching common point for block {} at topoheight {}", block_id.get_hash(), block_id.get_topoheight()); + if storage.has_block(block_id.get_hash()).await? { + let (hash, topoheight) = block_id.consume(); + trace!("Block {} is common, expected topoheight: {}", hash, topoheight); + // check that the block is ordered like us + if storage.is_block_topological_ordered(&hash).await && storage.get_topo_height_for_hash(&hash).await? == topoheight { // common point + debug!("common point found at block {} with same topoheight at {}", hash, topoheight); + return Ok(Some(CommonPoint::new(hash, topoheight))) + } + } + } + Ok(None) + } + // search a common point between our blockchain and the peer's one // when the common point is found, start sending blocks from this point async fn handle_chain_request(self: &Arc, peer: &Arc, blocks: Vec) -> Result<(), BlockchainError> { @@ -1181,59 +1197,48 @@ impl P2pServer { let mut response_blocks = Vec::new(); let mut top_blocks = Vec::new(); // common point used to notify peer if he should rewind or not - let mut common_point = None; - for block_id in blocks { // search a common point - trace!("Searching common point for block {} at topoheight {}", block_id.get_hash(), block_id.get_topoheight()); - if storage.has_block(block_id.get_hash()).await? { - let (hash, mut topoheight) = block_id.consume(); - trace!("Block {} is common, expected topoheight from {}: {}", hash, peer, topoheight); - // check that the block is ordered like us - if storage.is_block_topological_ordered(&hash).await && storage.get_topo_height_for_hash(&hash).await? == topoheight { // common point - debug!("common point with {} found at block {} with same topoheight at {}", peer, hash, topoheight); - common_point = Some(CommonPoint::new(hash, topoheight)); - - // lets add all blocks ordered hash - let top_topoheight = self.blockchain.get_topo_height(); - let stable_height = self.blockchain.get_stable_height(); - // used to detect if we find unstable height for alt tips - let mut potential_unstable_height = None; - // check to see if we should search for alt tips (and above unstable height) - let should_search_alt_tips = top_topoheight - topoheight < CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64; - - // complete ChainResponse blocks until we are full or that we reach the top topheight - while response_blocks.len() < CHAIN_SYNC_RESPONSE_MAX_BLOCKS && topoheight <= top_topoheight { - trace!("looking for hash at topoheight {}", topoheight); - let hash = storage.get_hash_at_topo_height(topoheight).await?; - if should_search_alt_tips && potential_unstable_height.is_none() { - let height = storage.get_height_for_block_hash(&hash).await?; - if height >= stable_height { - debug!("Found unstable height at {}", height); - potential_unstable_height = Some(height); - } - } - trace!("for chain request, adding hash {} at topoheight {}", hash, topoheight); - response_blocks.push(hash); - topoheight += 1; + let common_point = self.find_common_point(&*storage, blocks).await?; + if let Some(common_point) = &common_point { + let mut topoheight = common_point.get_topoheight(); + // lets add all blocks ordered hash + let top_topoheight = self.blockchain.get_topo_height(); + let stable_height = self.blockchain.get_stable_height(); + // used to detect if we find unstable height for alt tips + let mut potential_unstable_height = None; + // check to see if we should search for alt tips (and above unstable height) + let should_search_alt_tips = top_topoheight - topoheight < CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64; + + // complete ChainResponse blocks until we are full or that we reach the top topheight + while response_blocks.len() < CHAIN_SYNC_RESPONSE_MAX_BLOCKS && topoheight <= top_topoheight { + trace!("looking for hash at topoheight {}", topoheight); + let hash = storage.get_hash_at_topo_height(topoheight).await?; + if should_search_alt_tips && potential_unstable_height.is_none() { + let height = storage.get_height_for_block_hash(&hash).await?; + if height >= stable_height { + debug!("Found unstable height at {}", height); + potential_unstable_height = Some(height); } + } + trace!("for chain request, adding hash {} at topoheight {}", hash, topoheight); + response_blocks.push(hash); + topoheight += 1; + } - // now, lets check if peer is near to be synced, and send him alt tips blocks - if let Some(mut height) = potential_unstable_height { - let top_height = self.blockchain.get_height(); - trace!("unstable height: {}, top height: {}", height, top_height); - while height <= top_height && top_blocks.len() < CHAIN_SYNC_TOP_BLOCKS { - trace!("get blocks at height {} for top blocks", height); - for hash in storage.get_blocks_at_height(height).await? { - if !response_blocks.contains(&hash) { - trace!("Adding top block at height {}: {}", height, hash); - top_blocks.push(hash); - } else { - trace!("Top block at height {}: {} was skipped because its already present in response blocks", height, hash); - } - } - height += 1; + // now, lets check if peer is near to be synced, and send him alt tips blocks + if let Some(mut height) = potential_unstable_height { + let top_height = self.blockchain.get_height(); + trace!("unstable height: {}, top height: {}", height, top_height); + while height <= top_height && top_blocks.len() < CHAIN_SYNC_TOP_BLOCKS { + trace!("get blocks at height {} for top blocks", height); + for hash in storage.get_blocks_at_height(height).await? { + if !response_blocks.contains(&hash) { + trace!("Adding top block at height {}: {}", height, hash); + top_blocks.push(hash); + } else { + trace!("Top block at height {}: {} was skipped because its already present in response blocks", height, hash); } } - break; + height += 1; } } } @@ -1267,7 +1272,7 @@ impl P2pServer { // check that if we can trust him if peer.is_priority() { warn!("Rewinding chain without checking because {} is a priority node (pop count: {})", peer, pop_count); - self.blockchain.rewind_chain(pop_count as usize).await?; + self.blockchain.rewind_chain(pop_count).await?; } else { // request all blocks header and verify basic chain structure let mut chain_validator = ChainValidator::new(self.blockchain.clone()); @@ -1291,7 +1296,7 @@ impl P2pServer { } // peer chain looks correct, lets rewind our chain warn!("Rewinding chain because of {} (pop count: {})", peer, pop_count); - self.blockchain.rewind_chain(pop_count as usize).await?; + self.blockchain.rewind_chain(pop_count).await?; // now retrieve all txs from all blocks header and add block in chain for hash in chain_validator.get_order() { @@ -1548,11 +1553,12 @@ impl P2pServer { } let response = match request { - StepRequest::ChainInfo => { + StepRequest::ChainInfo(blocks) => { + let common_point = self.find_common_point(&*storage, blocks).await?; let tips = storage.get_tips().await?; let (hash, height) = self.blockchain.find_common_base(&storage, &tips).await?; let stable_topo = storage.get_topo_height_for_hash(&hash).await?; - StepResponse::ChainInfo(stable_topo, height, hash) + StepResponse::ChainInfo(common_point, stable_topo, height, hash) }, StepRequest::Assets(min, max, page) => { if min > max { @@ -1621,6 +1627,42 @@ impl P2pServer { Ok(()) } + async fn build_list_of_blocks_id(&self, storage: &S) -> Result, BlockchainError> { + let mut blocks = Vec::new(); + let topoheight = self.blockchain.get_topo_height(); + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); + let mut i = 0; + + // we add 1 for the genesis block added below + while i < topoheight && topoheight - i >= pruned_topoheight && blocks.len() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { + trace!("Requesting hash at topo {} for ChainInfo", topoheight - i); + let hash = storage.get_hash_at_topo_height(topoheight - i).await?; + blocks.push(BlockId::new(hash, topoheight - i)); + match blocks.len() { + 0..=19 => { + i += 1; + }, + 20..=39 => { + i += 5; + } + 40..=59 => { + i += 50; + }, + 60..=79 => { + i += 500; + } + _ => { + i = i * 2; + } + }; + } + + // add genesis block + let genesis_block = storage.get_hash_at_topo_height(0).await?; + blocks.push(BlockId::new(genesis_block, 0)); + Ok(blocks) + } + // first, retrieve chain info of selected peer // We retrieve all assets through pagination, // then we fetch all keys with its nonces and its balances (also through pagination) @@ -1630,11 +1672,11 @@ impl P2pServer { async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { debug!("Starting fast sync with {}", peer); - let our_topoheight = self.blockchain.get_topo_height(); + let mut our_topoheight = self.blockchain.get_topo_height(); let mut stable_topoheight = 0; let mut storage = self.blockchain.get_storage().write().await; - let mut step: Option = Some(StepRequest::ChainInfo); + let mut step: Option = Some(StepRequest::ChainInfo(self.build_list_of_blocks_id(&*storage).await?)); // keep them in memory, we add them when we're syncing // it's done to prevent any sync failure @@ -1650,7 +1692,28 @@ impl P2pServer { }; step = match response { - StepResponse::ChainInfo(topoheight, height, hash) => { + StepResponse::ChainInfo(common_point, topoheight, height, hash) => { + // first, check the common point in case we deviated from the chain + if let Some(common_point) = common_point { + debug!("Unverified common point found at {} with hash {}", common_point.get_topoheight(), common_point.get_hash()); + let hash_at_topo = storage.get_hash_at_topo_height(common_point.get_topoheight()).await?; + if hash_at_topo != *common_point.get_hash() { + warn!("Common point is {} while our hash at topoheight {} is {}. Aborting", common_point.get_hash(), common_point.get_topoheight(), storage.get_hash_at_topo_height(common_point.get_topoheight()).await?); + return Err(BlockchainError::Unknown) + } + + let top_block_hash = storage.get_top_block_hash().await?; + if *common_point.get_hash() != top_block_hash { + let deviation = our_topoheight - common_point.get_topoheight(); + warn!("Common point is {} while our top block hash is {} ! Deviation of {} topoheight blocks", common_point.get_hash(), top_block_hash, deviation); + our_topoheight = self.blockchain.rewind_chain_for_storage(&mut *storage, deviation).await?; + debug!("New topoheight after rewind is now {}", our_topoheight); + } + } else { + warn!("No common point with {} ! Not same chain ?", peer); + return Err(BlockchainError::Unknown) + } + top_topoheight = topoheight; top_height = height; top_block_hash = Some(hash); @@ -1789,44 +1852,11 @@ impl P2pServer { // its used to find a common point with the peer to which we ask the chain pub async fn request_sync_chain_for(&self, peer: &Arc) -> Result<(), BlockchainError> { debug!("Requesting chain from {}", peer); - let mut request = ChainRequest::new(); - { - let storage = self.blockchain.get_storage().read().await; - let topoheight = self.blockchain.get_topo_height(); - let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); - let mut i = 0; - - // we add 1 for the genesis block added below - while i < topoheight && topoheight - i >= pruned_topoheight && request.size() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { - trace!("Requesting hash at topo {}", topoheight - i); - let hash = storage.get_hash_at_topo_height(topoheight - i).await?; - request.add_block_id(hash, topoheight - i); - match request.size() { - 0..=19 => { - i += 1; - }, - 20..=39 => { - i += 5; - } - 40..=59 => { - i += 50; - }, - 60..=79 => { - i += 500; - } - _ => { - i = i * 2; - } - }; - } - - // add genesis block - let genesis_block = storage.get_hash_at_topo_height(0).await?; - request.add_block_id(genesis_block, 0); - trace!("Sending a chain request with {} blocks", request.size()); - peer.set_chain_sync_requested(true); - } - let ping = self.build_generic_ping_packet().await; + let storage = self.blockchain.get_storage().read().await; + let request = ChainRequest::new(self.build_list_of_blocks_id(&*storage).await?); + trace!("Sending a chain request with {} blocks", request.size()); + peer.set_chain_sync_requested(true); + let ping = self.build_generic_ping_packet_with_storage(&*storage).await; peer.send_packet(Packet::ChainRequest(PacketWrapper::new(Cow::Owned(request), Cow::Owned(ping)))).await?; Ok(()) } diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs index fc041c7f..d3a0c794 100644 --- a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -1,7 +1,9 @@ use std::borrow::Cow; use indexmap::IndexSet; use log::debug; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty}; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty, config::CHAIN_SYNC_REQUEST_MAX_BLOCKS}; + +use super::chain::{BlockId, CommonPoint}; // this file implements the protocol for the fast sync (bootstrapped chain) // You will have to request through StepRequest::FetchAssets all the registered assets @@ -73,8 +75,8 @@ impl StepKind { #[derive(Debug)] pub enum StepRequest<'a> { - // Request chain info (topoheight, stable height, stable hash) - ChainInfo, + // Request chain info (top topoheight, top height, top hash) + ChainInfo(Vec), // Min topoheight, Max topoheight, Pagination Assets(u64, u64, Option), // Min topoheight, Max topoheight, Asset, pagination @@ -90,7 +92,7 @@ pub enum StepRequest<'a> { impl<'a> StepRequest<'a> { pub fn kind(&self) -> StepKind { match self { - Self::ChainInfo => StepKind::ChainInfo, + Self::ChainInfo(_) => StepKind::ChainInfo, Self::Assets(_, _, _) => StepKind::Assets, Self::Keys(_, _, _) => StepKind::Keys, Self::Balances(_, _, _) => StepKind::Balances, @@ -101,7 +103,7 @@ impl<'a> StepRequest<'a> { pub fn get_requested_topoheight(&self) -> Option { Some(*match self { - Self::ChainInfo => return None, + Self::ChainInfo(_) => return None, Self::Assets(_, topo, _) => topo, Self::Keys(_, topo, _) => topo, Self::Balances(topo, _, _) => topo, @@ -115,7 +117,17 @@ impl Serializer for StepRequest<'_> { fn read(reader: &mut Reader) -> Result { Ok(match reader.read_u8()? { 0 => { - Self::ChainInfo + let len = reader.read_u8()?; + if len == 0 || len > CHAIN_SYNC_REQUEST_MAX_BLOCKS as u8 { + debug!("Invalid chain info request length: {}", len); + return Err(ReaderError::InvalidValue) + } + + let mut blocks = Vec::with_capacity(len as usize); + for _ in 0..len { + blocks.push(BlockId::read(reader)?); + } + Self::ChainInfo(blocks) } 1 => { let min_topoheight = reader.read_u64()?; @@ -174,8 +186,12 @@ impl Serializer for StepRequest<'_> { fn write(&self, writer: &mut Writer) { match self { - Self::ChainInfo => { + Self::ChainInfo(blocks) => { writer.write_u8(0); + writer.write_u8(blocks.len() as u8); + for block_id in blocks { + block_id.write(writer); + } }, Self::Assets(min, max, page) => { writer.write_u8(1); @@ -210,7 +226,7 @@ impl Serializer for StepRequest<'_> { #[derive(Debug)] pub enum StepResponse { - ChainInfo(u64, u64, Hash), // topoheight of stable hash, stable height, stable hash + ChainInfo(Option, u64, u64, Hash), // common point, topoheight of stable hash, stable height, stable hash Assets(IndexSet, Option), // Set of assets, pagination Keys(IndexSet, Option), // Set of keys, pagination Balances(Vec>), // Balances requested @@ -221,7 +237,7 @@ pub enum StepResponse { impl StepResponse { pub fn kind(&self) -> StepKind { match self { - Self::ChainInfo(_, _, _) => StepKind::ChainInfo, + Self::ChainInfo(_, _, _, _) => StepKind::ChainInfo, Self::Assets(_, _) => StepKind::Assets, Self::Keys(_, _) => StepKind::Keys, Self::Balances(_) => StepKind::Balances, @@ -235,11 +251,12 @@ impl Serializer for StepResponse { fn read(reader: &mut Reader) -> Result { Ok(match reader.read_u8()? { 0 => { + let common_point = Option::read(reader)?; let topoheight = reader.read_u64()?; let stable_height = reader.read_u64()?; let hash = reader.read_hash()?; - Self::ChainInfo(topoheight, stable_height, hash) + Self::ChainInfo(common_point, topoheight, stable_height, hash) }, 1 => { let assets = IndexSet::::read(reader)?; @@ -281,8 +298,9 @@ impl Serializer for StepResponse { fn write(&self, writer: &mut Writer) { match self { - Self::ChainInfo(topoheight, stable_height, hash) => { + Self::ChainInfo(common_point, topoheight, stable_height, hash) => { writer.write_u8(0); + common_point.write(writer); writer.write_u64(topoheight); writer.write_u64(stable_height); writer.write_hash(hash); diff --git a/xelis_daemon/src/p2p/packet/chain.rs b/xelis_daemon/src/p2p/packet/chain.rs index 91da8692..df74e67f 100644 --- a/xelis_daemon/src/p2p/packet/chain.rs +++ b/xelis_daemon/src/p2p/packet/chain.rs @@ -53,19 +53,12 @@ pub struct ChainRequest { } impl ChainRequest { - pub fn new() -> Self { + pub fn new(blocks: Vec) -> Self { Self { - blocks: Vec::new() + blocks } } - pub fn add_block_id(&mut self, hash: Hash, topoheight: u64) { - self.blocks.push(BlockId { - hash, - topoheight - }); - } - pub fn size(&self) -> usize { self.blocks.len() } @@ -165,15 +158,7 @@ impl ChainResponse { impl Serializer for ChainResponse { fn write(&self, writer: &mut Writer) { - match &self.common_point { - None => { - writer.write_bool(false); - }, - Some(point) => { - writer.write_bool(true); - point.write(writer); - } - }; + self.common_point.write(writer); writer.write_u16(self.blocks.len() as u16); for hash in &self.blocks { writer.write_hash(hash); @@ -186,11 +171,7 @@ impl Serializer for ChainResponse { } fn read(reader: &mut Reader) -> Result { - let common_point = match reader.read_bool()? { - true => Some(CommonPoint::read(reader)?), - false => None - }; - + let common_point = Option::read(reader)?; let len = reader.read_u16()?; if len > CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u16 { debug!("Invalid chain response length: {}", len); From 2bd8e84de0391382f4e0b4693bd5c97fa4a319cb Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 2 Oct 2023 15:25:28 +0200 Subject: [PATCH 039/160] fix API.md --- API.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/API.md b/API.md index 64d28989..b262da78 100644 --- a/API.md +++ b/API.md @@ -289,10 +289,8 @@ Retrieve all blocks at a specific height } ] } - -NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). - ``` +NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). #### Get Block By Hash Retrieve a block by its hash From bb18d5e4a029c6241a1e818cef289c8af86f4fe2 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Oct 2023 01:11:09 +0200 Subject: [PATCH 040/160] daemon: fix partial fast sync with deviated chains --- xelis_daemon/src/core/blockchain.rs | 25 ++++++++++++++++++++++--- xelis_daemon/src/core/storage/sled.rs | 15 ++++++++++----- xelis_daemon/src/p2p/mod.rs | 12 +++++++++--- 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 5e4386cc..827fc76a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -514,7 +514,16 @@ impl Blockchain { } #[async_recursion] - async fn find_tip_base(&self, storage: &S, hash: &Hash, height: u64) -> Result<(Hash, u64), BlockchainError> { + async fn find_tip_base(&self, storage: &S, hash: &Hash, height: u64, pruned_topoheight: u64) -> Result<(Hash, u64), BlockchainError> { + if pruned_topoheight > 0 && storage.is_block_topological_ordered(hash).await { + let topoheight = storage.get_topo_height_for_hash(hash).await?; + // Node is pruned, we only prune chain to stable height so we can return the hash + if topoheight <= pruned_topoheight { + debug!("Node is pruned, returns {} at {} as stable tip base", hash, height); + return Ok((hash.clone(), height)) + } + } + let (tips, tips_count) = { // first, check if we have it in cache let mut cache = self.tip_base_cache.lock().await; @@ -535,6 +544,15 @@ impl Blockchain { let mut bases = Vec::with_capacity(tips_count); for hash in tips.iter() { + if pruned_topoheight > 0 && storage.is_block_topological_ordered(hash).await { + let topoheight = storage.get_topo_height_for_hash(hash).await?; + // Node is pruned, we only prune chain to stable height so we can return the hash + if topoheight <= pruned_topoheight { + let block_height = storage.get_height_for_block_hash(hash).await?; + debug!("Node is pruned, returns tip {} at {} as stable tip base", hash, block_height); + return Ok((hash.clone(), block_height)) + } + } // if block is sync, it is a tip base if self.is_sync_block_at_height(storage, hash, height).await? { let block_height = storage.get_height_for_block_hash(hash).await?; @@ -546,7 +564,7 @@ impl Blockchain { } // if block is not sync, we need to find its tip base too - bases.push(self.find_tip_base(storage, hash, height).await?); + bases.push(self.find_tip_base(storage, hash, height, pruned_topoheight).await?); } if bases.is_empty() { @@ -579,9 +597,10 @@ impl Blockchain { } } + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); let mut bases = Vec::new(); for hash in tips.into_iter() { - bases.push(self.find_tip_base(storage, hash, best_height).await?); + bases.push(self.find_tip_base(storage, hash, best_height, pruned_topoheight).await?); } diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 58b55c06..9ab1ab6d 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -806,7 +806,7 @@ impl Storage for SledStorage { trace!("has balance {} for {} at exact topoheight {}", asset, key, topoheight); // check first that this address has balance, if no returns if !self.has_balance_for(key, asset).await? { - return Err(BlockchainError::NoBalanceChanges(key.clone())) + return Ok(false) } let tree = self.get_versioned_balance_tree(asset, topoheight).await?; @@ -818,7 +818,7 @@ impl Storage for SledStorage { async fn get_balance_at_exact_topoheight(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("get balance {} for {} at exact topoheight {}", asset, key, topoheight); // check first that this address has balance, if no returns - if !self.has_balance_for(key, asset).await? { + if !self.has_balance_at_exact_topoheight(key, asset, topoheight).await? { return Err(BlockchainError::NoBalanceChanges(key.clone())) } @@ -1224,7 +1224,7 @@ impl Storage for SledStorage { while let Some(previous_topoheight) = version.get_previous_topoheight() { if previous_topoheight < topoheight { // we find the new highest version which is under new topoheight - trace!("New highest version for {} is at topoheight {}", pkey, previous_topoheight); + trace!("New highest version nonce for {} is at topoheight {}", pkey, previous_topoheight); self.nonces.insert(&key, &previous_topoheight.to_be_bytes())?; break; } @@ -1244,21 +1244,26 @@ impl Storage for SledStorage { let (key, value) = el?; let highest_topoheight = u64::from_bytes(&value)?; if highest_topoheight > topoheight { - self.nonces.remove(&key)?; // find the first version which is under topoheight let pkey = PublicKey::from_bytes(&key)?; + let mut delete = true; let mut version = self.get_balance_at_exact_topoheight(&pkey, asset, highest_topoheight).await?; while let Some(previous_topoheight) = version.get_previous_topoheight() { if previous_topoheight < topoheight { // we find the new highest version which is under new topoheight - trace!("New highest version for {} is at topoheight {} with asset {}", pkey, previous_topoheight, asset); + trace!("New highest version balance for {} is at topoheight {} with asset {}", pkey, previous_topoheight, asset); tree.insert(&key, &previous_topoheight.to_be_bytes())?; + delete = false; break; } // keep searching version = self.get_balance_at_exact_topoheight(&pkey, asset, previous_topoheight).await?; } + + if delete { + tree.remove(&key)?; + } } else { // nothing to do as its under the rewinded topoheight } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 740308ca..647dc8f2 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1704,9 +1704,15 @@ impl P2pServer { let top_block_hash = storage.get_top_block_hash().await?; if *common_point.get_hash() != top_block_hash { - let deviation = our_topoheight - common_point.get_topoheight(); - warn!("Common point is {} while our top block hash is {} ! Deviation of {} topoheight blocks", common_point.get_hash(), top_block_hash, deviation); - our_topoheight = self.blockchain.rewind_chain_for_storage(&mut *storage, deviation).await?; + let pruned_topoheight = storage.get_pruned_topoheight()?.unwrap_or(0); + + warn!("Common point is {} while our top block hash is {} !", common_point.get_hash(), top_block_hash); + let pop_count = if pruned_topoheight >= common_point.get_topoheight() { + our_topoheight - pruned_topoheight + } else { + our_topoheight - common_point.get_topoheight() + }; + our_topoheight = self.blockchain.rewind_chain_for_storage(&mut *storage, pop_count).await?; debug!("New topoheight after rewind is now {}", our_topoheight); } } else { From 3fc284968a40ac7737aa0c37ec58fa47d44e5642 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Oct 2023 17:04:52 +0200 Subject: [PATCH 041/160] wallet: fix nonce scan --- xelis_wallet/src/network_handler.rs | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index e60e6eef..1db8ba3b 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -133,9 +133,8 @@ impl NetworkHandler { Ok(Some((topoheight, balance))) } - async fn get_balance_and_transactions(&self, address: &Address<'_>, asset: &Hash, min_topoheight: u64, mut current_topoheight: Option) -> Result<(), Error> { + async fn get_balance_and_transactions(&self, address: &Address<'_>, asset: &Hash, min_topoheight: u64, current_topoheight: Option) -> Result<(), Error> { let mut res = self.get_versioned_balance_and_topoheight(address, asset, current_topoheight).await?; - let mut is_highest_nonce = true; while let Some((topoheight, balance)) = res.take() { // don't sync already synced blocks if min_topoheight > topoheight { @@ -166,7 +165,6 @@ impl NetworkHandler { } } - let mut latest_nonce_sent = None; let (block, txs) = block.split(); // TODO check only executed txs in this block for (tx_hash, tx) in block.into_owned().take_txs_hashes().into_iter().zip(txs) { @@ -222,21 +220,6 @@ impl NetworkHandler { storage.save_transaction(entry.get_hash(), &entry)?; } } - - if is_owner { - latest_nonce_sent = nonce; - } - } - - // check that we have a outgoing tx (in case of same wallets used in differents places at same time) - if is_highest_nonce { - if let (Some(last_nonce), None) = (latest_nonce_sent, current_topoheight.take()) { - // don't keep the lock in case of a request - debug!("Detected a nonce changes for balance at topoheight {} with last nonce {} current topoheight {:?}", topoheight, last_nonce, current_topoheight); - let mut storage = self.wallet.get_storage().write().await; - storage.set_nonce(last_nonce + 1)?; - is_highest_nonce = false; - } } if let Some(previous_topo) = balance.get_previous_topoheight() { @@ -355,6 +338,16 @@ impl NetworkHandler { } } + // Retrieve the highest nonce (in one call, in case of assets/txs not tracked correctly) + { + let res = self.api.get_last_nonce(&address).await?; + let nonce = res.version.get_nonce(); + debug!("New nonce found is {}", nonce); + let mut storage = self.wallet.get_storage().write().await; + storage.set_nonce(nonce)?; + } + + // get balance and transactions for each asset for asset in assets { debug!("calling get balances and transactions {}", current_topoheight); if let Err(e) = self.get_balance_and_transactions(&address, &asset, current_topoheight, None).await { From 5f9fd530e4539f5785d0b5c85cb8375c36a61d17 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Oct 2023 22:06:59 +0200 Subject: [PATCH 042/160] daemon: check if block exist --- xelis_daemon/src/rpc/rpc.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 2f9ee53a..41918e4a 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -41,6 +41,10 @@ pub async fn get_block_type_for_block(blockchain: &Blockchain, st } pub async fn get_block_response_for_hash(blockchain: &Blockchain, storage: &S, hash: Hash, include_txs: bool) -> Result { + if !storage.has_block(&hash).await.context("Error while checking if block exist")? { + return Err(InternalRpcError::AnyError(BlockchainError::BlockNotFound(hash).into())) + } + let (topoheight, supply, reward) = if storage.is_block_topological_ordered(&hash).await { ( Some(storage.get_topo_height_for_hash(&hash).await.context("Error while retrieving topo height")?), From cfe3c19149b342a7cd2c8106e85573f8cb1ef5be Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Oct 2023 22:25:42 +0200 Subject: [PATCH 043/160] daemon: delete TX from storage on rewind --- xelis_daemon/src/core/storage/sled.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 9ab1ab6d..a1d97b04 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -1149,6 +1149,9 @@ impl Storage for SledStorage { self.remove_tx_executed(&tx_hash)?; } + trace!("Deleting TX {} in block {}", tx_hash, hash); + self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; + txs.push((tx_hash.clone(), tx)); } @@ -1172,10 +1175,7 @@ impl Storage for SledStorage { // generate new tips trace!("Removing {} from {} tips", hash, tips.len()); tips.remove(&hash); - trace!("Tips: {}", tips.len()); - for tip in &tips { - trace!("Tip {}", tip); - } + trace!("Tips: {}", tips.iter().map(|b| b.to_string()).collect::>().join(", ")); for hash in block.get_tips() { trace!("Adding {} to {} tips", hash, tips.len()); @@ -1189,7 +1189,7 @@ impl Storage for SledStorage { } debug!("Blocks processed {}, new topoheight: {}, tips: {}", done, topoheight, tips.len()); for hash in &tips { - trace!("hash {} at height {}", hash, self.get_height_for_block_hash(&hash).await?); + trace!("tip {} at height {}", hash, self.get_height_for_block_hash(&hash).await?); } // clean all assets From fe901f6f06babff6c9d5b1ff6f29e95eb902afb3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 3 Oct 2023 22:46:55 +0200 Subject: [PATCH 044/160] daemon: handle multiple packets at same tim --- xelis_daemon/src/p2p/connection.rs | 11 ++++++++++- xelis_daemon/src/p2p/mod.rs | 26 +++++++++++++++++++++----- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/xelis_daemon/src/p2p/connection.rs b/xelis_daemon/src/p2p/connection.rs index d6ab5fd2..c548cd8c 100644 --- a/xelis_daemon/src/p2p/connection.rs +++ b/xelis_daemon/src/p2p/connection.rs @@ -81,7 +81,7 @@ impl Connection { Ok(()) } - pub async fn read_packet(&self, buf: &mut [u8], max_size: u32) -> P2pResult> { + pub async fn read_packet_bytes(&self, buf: &mut [u8], max_size: u32) -> P2pResult> { let mut stream = self.read.lock().await; let size = self.read_packet_size(&mut stream, buf).await?; if size == 0 || size > max_size { @@ -91,6 +91,10 @@ impl Connection { trace!("Size received: {}", size); let bytes = self.read_all_bytes(&mut stream, buf, size).await?; + Ok(bytes) + } + + pub async fn read_packet_from_bytes(&self, bytes: &[u8]) -> P2pResult> { let mut reader = Reader::new(&bytes); let packet = Packet::read(&mut reader)?; if reader.total_read() != bytes.len() { @@ -100,6 +104,11 @@ impl Connection { Ok(packet) } + pub async fn read_packet(&self, buf: &mut [u8], max_size: u32) -> P2pResult> { + let bytes = self.read_packet_bytes(buf, max_size).await?; + self.read_packet_from_bytes(&bytes).await + } + async fn read_packet_size(&self, stream: &mut OwnedReadHalf, buf: &mut [u8]) -> P2pResult { let read = self.read_bytes_from_stream(stream, &mut buf[0..4]).await?; if read != 4 { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 647dc8f2..6fdd03aa 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1163,12 +1163,28 @@ impl P2pServer { } // Listen to incoming packets from a connection + // Packet is read from the same task always, while its handling is delegated to a unique task async fn listen_connection(self: &Arc, buf: &mut [u8], peer: &Arc) -> Result<(), P2pError> { - let packet = peer.get_connection().read_packet(buf, MAX_BLOCK_SIZE as u32).await?; - if let Err(e) = self.handle_incoming_packet(peer, packet).await { - error!("Error occured while handling incoming packet from {}: {}", peer, e); - peer.increment_fail_count(); - } + let bytes = peer.get_connection().read_packet_bytes(buf, MAX_BLOCK_SIZE as u32).await?; + let zelf = Arc::clone(self); + let peer = Arc::clone(peer); + tokio::spawn(async move { + // Parse the packet + let packet = match peer.get_connection().read_packet_from_bytes(&bytes).await { + Ok(packet) => packet, + Err(e) => { + error!("Error while parsing packet from bytes: {}", e); + peer.increment_fail_count(); + return; + } + }; + + // Handle the packet + if let Err(e) = zelf.handle_incoming_packet(&peer, packet).await { + error!("Error occured while handling incoming packet from {}: {}", peer, e); + peer.increment_fail_count(); + } + }); Ok(()) } From 9612d83ebaeba5a1ae92e10e84f6628e15a65c75 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 4 Oct 2023 22:43:27 +0200 Subject: [PATCH 045/160] daemon: fix pagination in inventory --- xelis_common/src/serializer/mod.rs | 7 +++-- xelis_daemon/src/p2p/mod.rs | 42 +----------------------------- xelis_daemon/src/p2p/queue.rs | 1 + 3 files changed, 5 insertions(+), 45 deletions(-) diff --git a/xelis_common/src/serializer/mod.rs b/xelis_common/src/serializer/mod.rs index b1467ae8..9b85e272 100644 --- a/xelis_common/src/serializer/mod.rs +++ b/xelis_common/src/serializer/mod.rs @@ -73,11 +73,10 @@ impl Writer { pub fn write_optional_non_zero_u8(&mut self, opt: Option) { match opt { - Some(v) => { - self.bytes.push(1); - self.write_u8(v); + Some(v) if v != 0 => { + self.bytes.push(v); }, - None => { + _ => { self.bytes.push(0); } }; diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 6fdd03aa..17a0494a 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1062,8 +1062,6 @@ impl P2pServer { peer.set_requested_inventory(false); peer.set_last_inventory(get_current_time()); - // check and add if we are missing a TX in our mempool or storage - let mut missing_txs: Vec = Vec::new(); let next_page = inventory.next(); { let txs = inventory.get_txs(); @@ -1081,49 +1079,11 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - missing_txs.push(hash.into_owned()); + self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())); } } } - // second part is to retrieve all txs we don't have concurrently - // we don't want to block the peer and others locks for too long so we do it in a separate task - if !missing_txs.is_empty() { - let peer = Arc::clone(&peer); - let zelf = Arc::clone(&self); - tokio::spawn(async move { - for hash in missing_txs { - let response = match zelf.object_tracker.fetch_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash)).await { - Ok(response) => response, - Err(e) => { - error!("Error while requesting TX to {} using ObjectTracker: {}", peer, e); - peer.increment_fail_count(); - return; - } - }; - - if let OwnedObjectResponse::Transaction(tx, hash) = response { - debug!("Received {} with nonce {}", hash, tx.get_nonce()); - if let Err(e) = zelf.blockchain.add_tx_with_hash_to_mempool(tx, hash, false).await { - match e { - BlockchainError::TxAlreadyInMempool(hash) | BlockchainError::TxAlreadyInBlockchain(hash) => { - // ignore because maybe another peer send us this same tx - trace!("Received a tx we already have in mempool: {}", hash); - }, - _ => { - error!("Error while adding tx to mempool from {} inventory: {}", peer, e); - peer.increment_fail_count(); - } - } - } - } else { - error!("Error while retrieving tx from {} inventory, got an invalid type, we should ban this peer", peer); - peer.increment_fail_count(); - } - } - }); - } - // request the next page if next_page.is_some() { trace!("Requesting next page of inventory from {}", peer); diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs index 157d5244..b1d4f1e4 100644 --- a/xelis_daemon/src/p2p/queue.rs +++ b/xelis_daemon/src/p2p/queue.rs @@ -4,6 +4,7 @@ use tokio::sync::mpsc::{UnboundedSender, unbounded_channel}; use crate::core::{blockchain::Blockchain, storage::Storage}; use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::SharedObjectTracker}; +// TODO optimize to request the data but only handle in good order pub struct QueuedFetcher { sender: UnboundedSender<(Arc, ObjectRequest)> } From 972787d6a6dfe7ed925a0a455ab67faa01365063 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 5 Oct 2023 22:22:28 +0200 Subject: [PATCH 046/160] daemon: remove the one packet one task feat --- xelis_daemon/src/p2p/mod.rs | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 17a0494a..52d94085 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1125,26 +1125,14 @@ impl P2pServer { // Listen to incoming packets from a connection // Packet is read from the same task always, while its handling is delegated to a unique task async fn listen_connection(self: &Arc, buf: &mut [u8], peer: &Arc) -> Result<(), P2pError> { - let bytes = peer.get_connection().read_packet_bytes(buf, MAX_BLOCK_SIZE as u32).await?; - let zelf = Arc::clone(self); - let peer = Arc::clone(peer); - tokio::spawn(async move { - // Parse the packet - let packet = match peer.get_connection().read_packet_from_bytes(&bytes).await { - Ok(packet) => packet, - Err(e) => { - error!("Error while parsing packet from bytes: {}", e); - peer.increment_fail_count(); - return; - } - }; + // Read & parse the packet + let packet = peer.get_connection().read_packet(buf, MAX_BLOCK_SIZE as u32).await?; + // Handle the packet + if let Err(e) = self.handle_incoming_packet(&peer, packet).await { + error!("Error occured while handling incoming packet from {}: {}", peer, e); + peer.increment_fail_count(); + } - // Handle the packet - if let Err(e) = zelf.handle_incoming_packet(&peer, packet).await { - error!("Error occured while handling incoming packet from {}: {}", peer, e); - peer.increment_fail_count(); - } - }); Ok(()) } From bcafaf83af430fbb8342a45b9ade0a1cbbaa66e9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 8 Oct 2023 20:21:37 +0200 Subject: [PATCH 047/160] daemon: add rpc method 'has_nonce', and fix 'get_nonce' --- xelis_common/src/api/daemon.rs | 13 ++++++++++++ xelis_daemon/src/rpc/rpc.rs | 39 ++++++++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 85426325..947a5ed8 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -98,6 +98,13 @@ pub struct GetNonceParams<'a> { pub topoheight: Option } +#[derive(Serialize, Deserialize)] +pub struct HasNonceParams<'a> { + pub address: Cow<'a, Address<'a>>, + #[serde(default)] + pub topoheight: Option +} + #[derive(Serialize, Deserialize)] pub struct GetNonceResult { pub topoheight: u64, @@ -105,6 +112,12 @@ pub struct GetNonceResult { pub version: VersionedNonce } +#[derive(Serialize, Deserialize)] +pub struct HasNonceResult { + #[serde(flatten)] + pub exist: bool +} + #[derive(Serialize, Deserialize)] pub struct GetLastBalanceResult { pub balance: VersionedBalance, diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 41918e4a..082b149f 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -17,7 +17,18 @@ use xelis_common::{ GetTransactionParams, P2pStatusResult, GetBlocksAtHeightParams, - GetTopoHeightRangeParams, GetBalanceAtTopoHeightParams, GetLastBalanceResult, GetInfoResult, GetTopBlockParams, GetTransactionsParams, TransactionResponse, GetHeightRangeParams, GetNonceResult, GetAssetsParams, GetAccountsParams + GetTopoHeightRangeParams, + GetBalanceAtTopoHeightParams, + GetLastBalanceResult, + GetInfoResult, + GetTopBlockParams, + GetTransactionsParams, + TransactionResponse, + GetHeightRangeParams, + GetNonceResult, + GetAssetsParams, + GetAccountsParams, + HasNonceResult }, DataHash}, async_handler, serializer::Serializer, @@ -127,6 +138,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_balance_at_topoheight", async_handler!(get_balance_at_topoheight)); handler.register_method("get_info", async_handler!(get_info)); handler.register_method("get_nonce", async_handler!(get_nonce)); + handler.register_method("has_nonce", async_handler!(has_nonce)); handler.register_method("get_assets", async_handler!(get_assets)); handler.register_method("count_assets", async_handler!(count_assets)); handler.register_method("count_transactions", async_handler!(count_transactions)); @@ -287,6 +299,22 @@ async fn get_balance_at_topoheight(blockchain: Arc>, b Ok(json!(balance)) } +async fn has_nonce(blockchain: Arc>, body: Value) -> Result { + let params: GetNonceParams = parse_params(body)?; + if params.address.is_mainnet() != blockchain.get_network().is_mainnet() { + return Err(InternalRpcError::AnyError(BlockchainError::InvalidNetwork.into())) + } + + let storage = blockchain.get_storage().read().await; + let exist = if let Some(topoheight) = params.topoheight { + storage.has_nonce_at_exact_topoheight(params.address.get_public_key(), topoheight).await.context("Error while checking nonce at topo for account")? + } else { + storage.has_nonce(params.address.get_public_key()).await.context("Error while checking nonce for account")? + }; + + Ok(json!(HasNonceResult { exist })) +} + async fn get_nonce(blockchain: Arc>, body: Value) -> Result { let params: GetNonceParams = parse_params(body)?; if params.address.is_mainnet() != blockchain.get_network().is_mainnet() { @@ -294,7 +322,14 @@ async fn get_nonce(blockchain: Arc>, body: Value) -> R } let storage = blockchain.get_storage().read().await; - let (topoheight, version) = storage.get_last_nonce(params.address.get_public_key()).await.context("Error while retrieving nonce for account")?; + let (topoheight, version) = if let Some(topoheight) = params.topoheight { + (topoheight, storage.get_nonce_at_exact_topoheight(params.address.get_public_key(), topoheight).await + .context("Error while retrieving nonce at topo for account")?) + } else { + storage.get_last_nonce(params.address.get_public_key()).await + .context("Error while retrieving nonce for account")? + }; + Ok(json!(GetNonceResult { topoheight, version })) } From 3f61be4087687d8d939e0fb7c9b6e2d14d9b491f Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 8 Oct 2023 20:42:08 +0200 Subject: [PATCH 048/160] update API.md --- API.md | 51 ++++++++++++++++++++++++++++++---- xelis_common/src/api/daemon.rs | 1 - 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/API.md b/API.md index b262da78..f6cfa608 100644 --- a/API.md +++ b/API.md @@ -400,15 +400,16 @@ NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at #### Get Nonce Retrieve the nonce for address in request params. -If no nonce is found for this address and its valid, value start at 0. -Each nonce represents how many TX has been made by this address. +If no nonce is found for this address and its a valid one, it is safe to assume its nonce start at 0. +Each nonce represents how many TX has been made by this address and prevent replay attacks. ##### Method `get_nonce` ##### Parameters -| Name | Type | Required | Note | -|:-------:|:-------:|:--------:|:---------------------------------:| -| address | Address | Required | Valid address registered on chain | +| Name | Type | Required | Note | +|:----------:|:-------:|:--------:|:------------------------------------------:| +| address | Address | Required | Valid address registered on chain | +| topoheight | Integer | Optional | nonce at specified topoheight | ##### Request ```json @@ -427,7 +428,45 @@ Each nonce represents how many TX has been made by this address. { "id": 1, "jsonrpc": "2.0", - "result": 17 + "result": { + "nonce": 6216, + "previous_topoheight": 454254, + "topoheight": 454352 + } +} +``` + +#### Has Nonce +Verify if address has a nonce on-chain registered. + +##### Method `has_nonce` + +##### Parameters +| Name | Type | Required | Note | +|:----------:|:-------:|:--------:|:------------------------------------------:| +| address | Address | Required | Valid address registered on chain | +| topoheight | Integer | Optional | nonce at specified topoheight | + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "has_nonce", + "params": { + "address": "xel1qyqxcfxdc8ywarcz3wx2leahnfn2pyp0ymvfm42waluq408j2x5680g05xfx5" + } +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "exist": true + } } ``` diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 947a5ed8..7a1e2482 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -114,7 +114,6 @@ pub struct GetNonceResult { #[derive(Serialize, Deserialize)] pub struct HasNonceResult { - #[serde(flatten)] pub exist: bool } From dcb6ca052931fb5138da44c2d9786de0af3ea459 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 8 Oct 2023 20:43:09 +0200 Subject: [PATCH 049/160] daemon: use right parameter --- xelis_daemon/src/rpc/rpc.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 082b149f..4186abb7 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -28,7 +28,8 @@ use xelis_common::{ GetNonceResult, GetAssetsParams, GetAccountsParams, - HasNonceResult + HasNonceResult, + HasNonceParams }, DataHash}, async_handler, serializer::Serializer, @@ -300,7 +301,7 @@ async fn get_balance_at_topoheight(blockchain: Arc>, b } async fn has_nonce(blockchain: Arc>, body: Value) -> Result { - let params: GetNonceParams = parse_params(body)?; + let params: HasNonceParams = parse_params(body)?; if params.address.is_mainnet() != blockchain.get_network().is_mainnet() { return Err(InternalRpcError::AnyError(BlockchainError::InvalidNetwork.into())) } From 1f8f9c9b6775c802d1a39e2c23ffc4284854c729 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 16:50:50 +0200 Subject: [PATCH 050/160] daemon: asset standard for decimals, update fastsync, get_assets, get_asset rpc method --- API.md | 42 +++++++- xelis_common/src/api/daemon.rs | 5 + xelis_common/src/asset.rs | 100 ++++++++++++++++++ xelis_common/src/config.rs | 3 +- xelis_common/src/lib.rs | 1 + xelis_daemon/src/core/blockchain.rs | 6 +- xelis_daemon/src/core/storage/mod.rs | 8 +- xelis_daemon/src/core/storage/sled.rs | 21 ++-- xelis_daemon/src/p2p/mod.rs | 3 +- .../src/p2p/packet/bootstrap_chain.rs | 6 +- xelis_daemon/src/rpc/rpc.rs | 14 ++- xelis_wallet/src/daemon_api.rs | 4 +- xelis_wallet/src/network_handler.rs | 13 +-- 13 files changed, 191 insertions(+), 35 deletions(-) create mode 100644 xelis_common/src/asset.rs diff --git a/API.md b/API.md index f6cfa608..65fc29ae 100644 --- a/API.md +++ b/API.md @@ -550,7 +550,7 @@ NOTE: Balance is returned in atomic units ``` #### Get Assets -Get all assets available on network with its registered topoheight. +Get all assets available on network with its registered topoheight and necessary decimals for a full coin. ##### Method `get_assets` @@ -576,11 +576,49 @@ Get all assets available on network with its registered topoheight. "id": 1, "jsonrpc": "2.0", "result": [ - "0000000000000000000000000000000000000000000000000000000000000000" + { + "asset": "0000000000000000000000000000000000000000000000000000000000000000", + "decimals": 5, + "topoheight": 0 + } ] } ``` +#### Get Asset +Get registered topoheight and decimals data from a specific asset. + +##### Method `get_asset` + +##### Parameters +| Name | Type | Required | Note | +|:-----:|:----:|:--------:|:------------------:| +| asset | Hash | Required | Asset ID requested | + +##### Request +```json +{ + "jsonrpc": "2.0", + "method": "get_asset", + "id": 1, + "params": { + "asset": "0000000000000000000000000000000000000000000000000000000000000000" + } +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "decimals": 5, + "topoheight": 0 + } +} +``` + #### Count Assets Counts the number of assets saved on disk diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 7a1e2482..6cf8851e 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -194,6 +194,11 @@ pub struct TransactionResponse<'a, T: Clone> { pub data: DataHash<'a, T> } +#[derive(Serialize, Deserialize)] +pub struct GetAssetParams { + pub asset: Hash +} + #[derive(Serialize, Deserialize)] pub struct GetAssetsParams { pub skip: Option, diff --git a/xelis_common/src/asset.rs b/xelis_common/src/asset.rs new file mode 100644 index 00000000..0ecd079c --- /dev/null +++ b/xelis_common/src/asset.rs @@ -0,0 +1,100 @@ +use std::hash::{Hash as StdHash, Hasher}; + +use crate::{serializer::{Serializer, Writer, Reader, ReaderError}, crypto::hash::Hash}; + +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct AssetData { + // At which topoheight this asset is registered + topoheight: u64, + // How many atomic units is needed for a full coin + decimals: u8, +} + +impl AssetData { + pub fn new(topoheight: u64, decimals: u8) -> Self { + Self { + topoheight, + decimals + } + } + + pub fn get_topoheight(&self) -> u64 { + self.topoheight + } + + pub fn get_decimals(&self) -> u8 { + self.decimals + } +} + +impl Serializer for AssetData { + fn write(&self, writer: &mut Writer) { + writer.write_u64(&self.topoheight); + writer.write_u8(self.decimals); + } + + fn read(reader: &mut Reader) -> Result { + Ok( + Self::new(reader.read_u64()?, reader.read_u8()?) + ) + } +} + +#[derive(serde::Serialize, serde::Deserialize, Debug)] +pub struct AssetWithData { + asset: Hash, + #[serde(flatten)] + data: AssetData +} + +impl AssetWithData { + pub fn new(asset: Hash, data: AssetData) -> Self { + Self { + asset, + data + } + } + + pub fn get_asset(&self) -> &Hash { + &self.asset + } + + pub fn get_data(&self) -> &AssetData { + &self.data + } + + pub fn to_asset(self) -> Hash { + self.asset + } + + pub fn consume(self) -> (Hash, AssetData) { + (self.asset, self.data) + } +} + +impl Serializer for AssetWithData { + fn write(&self, writer: &mut Writer) { + self.asset.write(writer); + self.data.write(writer); + } + + fn read(reader: &mut Reader) -> Result { + Ok( + Self::new(reader.read_hash()?, AssetData::read(reader)?) + ) + } +} + +impl StdHash for AssetWithData { + fn hash(&self, state: &mut H) { + self.asset.hash(state); + } +} + +impl PartialEq for AssetWithData { + fn eq(&self, other: &Self) -> bool { + self.asset == other.asset + } +} + +impl Eq for AssetWithData {} \ No newline at end of file diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index a9bcbe5a..dca6050d 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -25,7 +25,8 @@ pub const TIMESTAMP_IN_FUTURE_LIMIT: u128 = 2 * 1000; // 2 seconds maximum in fu pub const PREFIX_ADDRESS: &str = "xel"; // mainnet prefix address pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; // testnet prefix address -pub const COIN_VALUE: u64 = 100_000; // 5 decimals for a full coin +pub const COIN_DECIMALS: u8 = 5; // 5 decimals numbers +pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // 100 000 pub const MAX_SUPPLY: u64 = 18_400_000 * COIN_VALUE; // 18.4M full coin pub const EMISSION_SPEED_FACTOR: u64 = 21; diff --git a/xelis_common/src/lib.rs b/xelis_common/src/lib.rs index 79f3b0cd..3d7b1119 100644 --- a/xelis_common/src/lib.rs +++ b/xelis_common/src/lib.rs @@ -10,6 +10,7 @@ pub mod config; pub mod immutable; pub mod difficulty; pub mod network; +pub mod asset; #[cfg(feature = "json_rpc")] pub mod json_rpc; diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 827fc76a..65b7ba8c 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -2,14 +2,14 @@ use anyhow::Error; use lru::LruCache; use serde_json::{Value, json}; use xelis_common::{ - config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT, BLOCK_TIME_MILLIS}, + config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT, BLOCK_TIME_MILLIS, COIN_DECIMALS}, crypto::{key::PublicKey, hash::{Hashable, Hash, HASH_SIZE}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, utils::{get_current_timestamp, format_coin}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, - serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network + serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network, asset::AssetData }; use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; use super::storage::{Storage, DifficultyProvider}; @@ -268,7 +268,7 @@ impl Blockchain { // register XELIS asset debug!("Registering XELIS asset: {} at topoheight 0", XELIS_ASSET); - storage.add_asset(&XELIS_ASSET, 0).await?; + storage.add_asset(&XELIS_ASSET, AssetData::new(0, COIN_DECIMALS)).await?; let genesis_block = if GENESIS_BLOCK.len() != 0 { info!("De-serializing genesis block..."); diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index f77e740f..4724e0ea 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -9,7 +9,7 @@ use xelis_common::{ transaction::Transaction, block::{Block, BlockHeader, Difficulty}, account::{VersionedBalance, VersionedNonce}, immutable::Immutable, - network::Network, + network::Network, asset::{AssetData, AssetWithData}, }; use crate::core::error::BlockchainError; @@ -46,7 +46,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { // same as above but for nonces async fn create_snapshot_nonces_at_topoheight(&mut self, topoheight: u64) -> Result<(), BlockchainError>; - async fn get_partial_assets(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError>; + async fn get_partial_assets(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError>; async fn get_partial_keys(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError>; async fn has_key_updated_in_range(&self, key: &PublicKey, minimum_topoheight: u64, maximum_topoheight: u64) -> Result; @@ -64,11 +64,11 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn has_network(&self) -> Result; async fn asset_exist(&self, asset: &Hash) -> Result; - async fn add_asset(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; + async fn add_asset(&mut self, asset: &Hash, data: AssetData) -> Result<(), BlockchainError>; async fn get_assets(&self) -> Result, BlockchainError>; fn count_assets(&self) -> usize; - fn get_asset_registration_topoheight(&self, asset: &Hash) -> Result; + fn get_asset_data(&self, asset: &Hash) -> Result; fn has_tx_blocks(&self, hash: &Hash) -> Result; fn has_block_linked_to_tx(&self, tx: &Hash, block: &Hash) -> Result; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index a1d97b04..0e76f005 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -8,7 +8,7 @@ use xelis_common::{ transaction::Transaction, block::{BlockHeader, Block, Difficulty}, account::{VersionedBalance, VersionedNonce}, - network::Network, + network::Network, asset::{AssetData, AssetWithData}, }; use std::{ collections::HashSet, @@ -490,18 +490,19 @@ impl Storage for SledStorage { Ok(()) } - async fn get_partial_assets(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError> { - let mut assets: IndexSet = IndexSet::new(); + async fn get_partial_assets(&self, maximum: usize, skip: usize, minimum_topoheight: u64, maximum_topoheight: u64) -> Result, BlockchainError> { + let mut assets = IndexSet::new(); let mut skip_count = 0; for el in self.assets.iter() { let (key, value) = el?; - let registered_at_topo = u64::from_bytes(&value)?; + let data = AssetData::from_bytes(&value)?; // check that we have a registered asset before the maximum topoheight - if registered_at_topo >= minimum_topoheight && registered_at_topo <= maximum_topoheight { + if data.get_topoheight() >= minimum_topoheight && data.get_topoheight() <= maximum_topoheight { if skip_count < skip { skip_count += 1; } else { - assets.insert(Hash::from_bytes(&key)?); + let asset = Hash::from_bytes(&key)?; + assets.insert(AssetWithData::new(asset, data)); if assets.len() == maximum { break; @@ -685,9 +686,9 @@ impl Storage for SledStorage { self.contains_data(&self.assets, &self.assets_cache, asset).await } - async fn add_asset(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError> { - trace!("add asset {} at topoheight {}", asset, topoheight); - self.assets.insert(asset.as_bytes(), &topoheight.to_be_bytes())?; + async fn add_asset(&mut self, asset: &Hash, data: AssetData) -> Result<(), BlockchainError> { + trace!("add asset {} at topoheight {}", asset, data.get_topoheight()); + self.assets.insert(asset.as_bytes(), data.to_bytes())?; if let Some(cache) = &self.assets_cache { let mut cache = cache.lock().await; cache.put(asset.clone(), ()); @@ -714,7 +715,7 @@ impl Storage for SledStorage { self.assets.len() } - fn get_asset_registration_topoheight(&self, asset: &Hash) -> Result { + fn get_asset_data(&self, asset: &Hash) -> Result { trace!("get asset registration topoheight {}", asset); self.load_from_disk(&self.assets, asset.as_bytes()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 52d94085..1c8391f4 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1694,8 +1694,9 @@ impl P2pServer { // fetch all assets from peer StepResponse::Assets(assets, next_page) => { for asset in assets { + let (asset, data) = asset.consume(); debug!("Saving asset {} at topoheight {}", asset, stable_topoheight); - storage.add_asset(&asset, stable_topoheight).await?; + storage.add_asset(&asset, data).await?; } if next_page.is_some() { diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs index d3a0c794..26de2dd5 100644 --- a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use indexmap::IndexSet; use log::debug; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty, config::CHAIN_SYNC_REQUEST_MAX_BLOCKS}; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty, config::CHAIN_SYNC_REQUEST_MAX_BLOCKS, asset::AssetWithData}; use super::chain::{BlockId, CommonPoint}; @@ -227,7 +227,7 @@ impl Serializer for StepRequest<'_> { #[derive(Debug)] pub enum StepResponse { ChainInfo(Option, u64, u64, Hash), // common point, topoheight of stable hash, stable height, stable hash - Assets(IndexSet, Option), // Set of assets, pagination + Assets(IndexSet, Option), // Set of assets, pagination Keys(IndexSet, Option), // Set of keys, pagination Balances(Vec>), // Balances requested Nonces(Vec), // Nonces for requested accounts @@ -259,7 +259,7 @@ impl Serializer for StepResponse { Self::ChainInfo(common_point, topoheight, stable_height, hash) }, 1 => { - let assets = IndexSet::::read(reader)?; + let assets = IndexSet::::read(reader)?; let page = Option::read(reader)?; if let Some(page_number) = &page { if *page_number == 0 { diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 4186abb7..b2af03b1 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -29,7 +29,7 @@ use xelis_common::{ GetAssetsParams, GetAccountsParams, HasNonceResult, - HasNonceParams + HasNonceParams, GetAssetParams }, DataHash}, async_handler, serializer::Serializer, @@ -140,6 +140,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_info", async_handler!(get_info)); handler.register_method("get_nonce", async_handler!(get_nonce)); handler.register_method("has_nonce", async_handler!(has_nonce)); + handler.register_method("get_asset", async_handler!(get_asset)); handler.register_method("get_assets", async_handler!(get_assets)); handler.register_method("count_assets", async_handler!(count_assets)); handler.register_method("count_transactions", async_handler!(count_transactions)); @@ -334,6 +335,13 @@ async fn get_nonce(blockchain: Arc>, body: Value) -> R Ok(json!(GetNonceResult { topoheight, version })) } +async fn get_asset(blockchain: Arc>, body: Value) -> Result { + let params: GetAssetParams = parse_params(body)?; + let storage = blockchain.get_storage().read().await; + let asset = storage.get_asset_data(¶ms.asset).context("Asset was not found")?; + Ok(json!(asset)) +} + const MAX_ASSETS: usize = 100; async fn get_assets(blockchain: Arc>, body: Value) -> Result { @@ -347,11 +355,11 @@ async fn get_assets(blockchain: Arc>, body: Value) -> MAX_ASSETS }; let skip = params.skip.unwrap_or(0); - let storage = blockchain.get_storage().read().await; let min = params.minimum_topoheight.unwrap_or(0); let max = params.maximum_topoheight.unwrap_or_else(|| blockchain.get_topo_height()); - let assets = storage.get_partial_assets(maximum, skip, min, max).await.context("Error while retrieving registered assets")?; + let assets = storage.get_partial_assets(maximum, skip, min, max).await + .context("Error while retrieving registered assets")?; Ok(json!(assets)) } diff --git a/xelis_wallet/src/daemon_api.rs b/xelis_wallet/src/daemon_api.rs index 2759bd38..4d76ebff 100644 --- a/xelis_wallet/src/daemon_api.rs +++ b/xelis_wallet/src/daemon_api.rs @@ -1,7 +1,7 @@ use std::borrow::Cow; use anyhow::{Context, Result}; -use xelis_common::{json_rpc::JsonRPCClient, api::daemon::{GetLastBalanceResult, GetBalanceAtTopoHeightParams, GetBalanceParams, GetInfoResult, SubmitTransactionParams, BlockResponse, GetBlockAtTopoHeightParams, GetTransactionParams, GetNonceParams, GetNonceResult, GetAssetsParams}, account::VersionedBalance, crypto::{address::Address, hash::Hash}, transaction::Transaction, serializer::Serializer, block::{BlockHeader, Block}}; +use xelis_common::{json_rpc::JsonRPCClient, api::daemon::{GetLastBalanceResult, GetBalanceAtTopoHeightParams, GetBalanceParams, GetInfoResult, SubmitTransactionParams, BlockResponse, GetBlockAtTopoHeightParams, GetTransactionParams, GetNonceParams, GetNonceResult, GetAssetsParams}, account::VersionedBalance, crypto::{address::Address, hash::Hash}, transaction::Transaction, serializer::Serializer, block::{BlockHeader, Block}, asset::AssetWithData}; pub struct DaemonAPI { client: JsonRPCClient, @@ -29,7 +29,7 @@ impl DaemonAPI { Ok(count) } - pub async fn get_assets(&self, skip: Option, maximum: Option, minimum_topoheight: Option, maximum_topoheight: Option) -> Result> { + pub async fn get_assets(&self, skip: Option, maximum: Option, minimum_topoheight: Option, maximum_topoheight: Option) -> Result> { let assets = self.client.call_with("get_assets", &GetAssetsParams { maximum, skip, diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 1db8ba3b..98f13b10 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -3,7 +3,7 @@ use thiserror::Error; use anyhow::Error; use log::{debug, error, info, warn}; use tokio::{task::JoinHandle, sync::Mutex, time::interval}; -use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance}; +use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance, asset::AssetWithData}; use crate::{daemon_api::DaemonAPI, wallet::Wallet, entry::{EntryData, Transfer, TransactionEntry}}; @@ -320,21 +320,22 @@ impl NetworkHandler { skip += response.len(); let mut storage = self.wallet.get_storage().write().await; - for asset in &response { - if !storage.contains_asset(asset)? { + for asset_data in &response { + if !storage.contains_asset(asset_data.get_asset())? { // New asset added to the wallet, inform listeners #[cfg(feature = "api_server")] { if let Some(api_server) = self.wallet.get_api_server().lock().await.as_ref() { - api_server.notify_event(&NotifyEvent::NewAsset, &asset).await; + api_server.notify_event(&NotifyEvent::NewAsset, asset_data.get_asset()).await; } } - storage.add_asset(asset)?; + // TODO save decimals + storage.add_asset(asset_data.get_asset())?; } } - assets.extend(response); + assets.extend(response.into_iter().map(AssetWithData::to_asset).collect::>()); } } From 29e74fe2eb699475c946f29da0a89ef54bcb0f47 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 17:02:51 +0200 Subject: [PATCH 051/160] wallet: delete duplicated rpc method --- xelis_common/src/api/wallet.rs | 4 +++- xelis_wallet/src/api/rpc.rs | 12 ++---------- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 535eae50..50d6ade5 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -49,7 +49,9 @@ pub struct TransactionResponse<'a> { #[derive(Serialize, Deserialize)] pub struct GetAddressParams { - pub data: Option + // Data to use for creating an integrated address + // Returned address will contains all the data provided here + pub integrated_data: Option } #[derive(Serialize, Deserialize)] diff --git a/xelis_wallet/src/api/rpc.rs b/xelis_wallet/src/api/rpc.rs index 3290eea2..e4eeda4e 100644 --- a/xelis_wallet/src/api/rpc.rs +++ b/xelis_wallet/src/api/rpc.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, borrow::Cow}; use anyhow::Context; use log::info; -use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::{VERSION, XELIS_ASSET}, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams, GetAddressParams, GetBalanceParams, GetTransactionParams}, DataHash, DataElement}, crypto::hash::Hashable}; +use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::{VERSION, XELIS_ASSET}, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams, GetAddressParams, GetBalanceParams, GetTransactionParams}, DataHash}, crypto::hash::Hashable}; use serde_json::{Value, json}; use crate::{wallet::{Wallet, WalletError}, entry::{EntryData, TransactionEntry}}; @@ -18,7 +18,6 @@ pub fn register_methods(handler: &mut RPCHandler>) { handler.register_method("get_transaction", async_handler!(get_transaction)); handler.register_method("build_transaction", async_handler!(build_transaction)); handler.register_method("list_transactions", async_handler!(list_transactions)); - handler.register_method("make_integrated_address", async_handler!(make_integrated_address)); } async fn version(_: Arc, body: Value) -> Result { @@ -60,7 +59,7 @@ async fn get_topoheight(wallet: Arc, body: Value) -> Result, body: Value) -> Result { let params: GetAddressParams = parse_params(body)?; - let address = if let Some(data) = params.data { + let address = if let Some(data) = params.integrated_data { wallet.get_address_with(data) } else { wallet.get_address() @@ -162,11 +161,4 @@ async fn list_transactions(wallet: Arc, body: Value) -> Result, body: Value) -> Result { - let params: DataElement = parse_params(body)?; - - let integrated_address = wallet.get_address_with(params); - Ok(json!(integrated_address)) } \ No newline at end of file From 7e9ea6f10613b41e618e998586ee10689d3f8881 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 17:12:17 +0200 Subject: [PATCH 052/160] wallet: add 'split_address' rpc method --- xelis_common/src/api/wallet.rs | 16 +++++++++++++++- xelis_wallet/src/api/rpc.rs | 23 ++++++++++++++++++++--- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 50d6ade5..7bced265 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -2,7 +2,7 @@ use std::borrow::Cow; use serde::{Deserialize, Serialize}; -use crate::{transaction::{TransactionType, Transaction}, crypto::{key::PublicKey, hash::Hash}}; +use crate::{transaction::{TransactionType, Transaction}, crypto::{key::PublicKey, hash::Hash, address::Address}}; use super::{DataHash, DataElement}; @@ -54,6 +54,20 @@ pub struct GetAddressParams { pub integrated_data: Option } +#[derive(Serialize, Deserialize)] +pub struct SplitAddressParams<'a> { + // address which must be in integrated form + pub address: Address<'a> +} + +#[derive(Serialize, Deserialize)] +pub struct SplitAddressResult { + // Normal address + pub address: PublicKey, + // Encoded data from address + pub integrated_data: DataElement +} + #[derive(Serialize, Deserialize)] pub struct GetBalanceParams { pub asset: Option diff --git a/xelis_wallet/src/api/rpc.rs b/xelis_wallet/src/api/rpc.rs index e4eeda4e..ecf63bdc 100644 --- a/xelis_wallet/src/api/rpc.rs +++ b/xelis_wallet/src/api/rpc.rs @@ -2,17 +2,18 @@ use std::{sync::Arc, borrow::Cow}; use anyhow::Context; use log::info; -use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::{VERSION, XELIS_ASSET}, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams, GetAddressParams, GetBalanceParams, GetTransactionParams}, DataHash}, crypto::hash::Hashable}; +use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::{VERSION, XELIS_ASSET}, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams, GetAddressParams, GetBalanceParams, GetTransactionParams, SplitAddressParams, SplitAddressResult}, DataHash}, crypto::{hash::Hashable, address::AddressType}}; use serde_json::{Value, json}; use crate::{wallet::{Wallet, WalletError}, entry::{EntryData, TransactionEntry}}; pub fn register_methods(handler: &mut RPCHandler>) { info!("Registering RPC methods..."); - handler.register_method("version", async_handler!(version)); + handler.register_method("get_version", async_handler!(get_version)); handler.register_method("get_network", async_handler!(get_network)); handler.register_method("get_nonce", async_handler!(get_nonce)); handler.register_method("get_topoheight", async_handler!(get_topoheight)); handler.register_method("get_address", async_handler!(get_address)); + handler.register_method("split_address", async_handler!(split_address)); handler.register_method("get_balance", async_handler!(get_balance)); handler.register_method("get_tracked_assets", async_handler!(get_tracked_assets)); handler.register_method("get_transaction", async_handler!(get_transaction)); @@ -20,7 +21,7 @@ pub fn register_methods(handler: &mut RPCHandler>) { handler.register_method("list_transactions", async_handler!(list_transactions)); } -async fn version(_: Arc, body: Value) -> Result { +async fn get_version(_: Arc, body: Value) -> Result { if body != Value::Null { return Err(InternalRpcError::UnexpectedParams) } @@ -68,6 +69,22 @@ async fn get_address(wallet: Arc, body: Value) -> Result, body: Value) -> Result { + let params: SplitAddressParams<'_> = parse_params(body)?; + let address = params.address; + + let (address, addr_type) = address.split(); + let integrated_data = match addr_type { + AddressType::Data(data) => data, + AddressType::Normal => return Err(InternalRpcError::CustomStr("Address is not an integrated address")) + }; + + Ok(json!(SplitAddressResult { + address, + integrated_data + })) +} + async fn get_balance(wallet: Arc, body: Value) -> Result { let params: GetBalanceParams = parse_params(body)?; let asset = params.asset.unwrap_or(XELIS_ASSET); From 3bfd8284e74fbf593c7e0becf6d2bcbcd7b02ba8 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 22:23:37 +0200 Subject: [PATCH 053/160] daemon: try fix deadlock --- xelis_daemon/src/p2p/mod.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1c8391f4..400c361d 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -763,14 +763,9 @@ impl P2pServer { for peer_peer in peer_peers.iter() { // if we have a common peer with him if let Some(peer_peer) = peer_list.get_peer_by_addr(peer_peer) { - let peers_sent = peer_peer.get_peers(true).lock().await; - let peers = peer_peer.get_peers(false).lock().await; - // verify that we already know that he his connected to it and that we informed him we are connected too to prevent any desync - let predicate = |addr: &&SocketAddr| *addr == peer.get_outgoing_address(); - if peers.iter().find(predicate).is_some() && peers_sent.iter().find(predicate).is_some() { - debug!("{} is a common peer with {}, adding block {} to its propagation cache", peer_peer, peer, block_hash); - let mut peer_propagation = peer.get_blocks_propagation().lock().await; - peer_propagation.put(block_hash.clone(), ()); + { + let mut peers = peer_peer.get_blocks_propagation().lock().await; + peers.put(block_hash.clone(), ()); } } } From fcebbc349187a811cf52251c869bbc462c1514ce Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 23:46:08 +0200 Subject: [PATCH 054/160] common: implement ElGamal homomorphic properties using Ristretto255 --- Cargo.lock | 20 ++ xelis_common/Cargo.toml | 1 + xelis_common/src/crypto/elgamal/ciphertext.rs | 219 ++++++++++++++++++ xelis_common/src/crypto/elgamal/key.rs | 65 ++++++ xelis_common/src/crypto/elgamal/mod.rs | 107 +++++++++ xelis_common/src/crypto/mod.rs | 3 +- 6 files changed, 414 insertions(+), 1 deletion(-) create mode 100644 xelis_common/src/crypto/elgamal/ciphertext.rs create mode 100644 xelis_common/src/crypto/elgamal/key.rs create mode 100644 xelis_common/src/crypto/elgamal/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 82a3c38d..ffe4252a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -775,6 +775,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "curve25519-dalek-ng" +version = "4.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.6.4", + "subtle-ng", + "zeroize", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -2089,6 +2102,12 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "syn" version = "1.0.109" @@ -2631,6 +2650,7 @@ dependencies = [ "chrono", "clap", "crossterm", + "curve25519-dalek-ng", "ed25519-dalek", "fern", "futures-util", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index a359c0f5..75b6b84c 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -15,6 +15,7 @@ serde = { version = "1", features = ["derive", "rc"] } serde_json = "1" rand = "0.8.4" ed25519-dalek = { version = "1.0.1", features = ["serde"] } +curve25519-dalek = { package = "curve25519-dalek-ng", version = "4", default_features = false, features = ["alloc", "u64_backend"] } thiserror = "1.0.30" anyhow = "1.0.57" log = "0.4" diff --git a/xelis_common/src/crypto/elgamal/ciphertext.rs b/xelis_common/src/crypto/elgamal/ciphertext.rs new file mode 100644 index 00000000..4e7abe62 --- /dev/null +++ b/xelis_common/src/crypto/elgamal/ciphertext.rs @@ -0,0 +1,219 @@ +use curve25519_dalek::{ristretto::{RistrettoPoint, CompressedRistretto}, scalar::Scalar}; +use core::ops::{Add, Neg, Mul, Sub}; + +use crate::serializer::{Serializer, Writer, ReaderError, Reader}; + +// Each ciphertext has a size of 64 bytes in compressed form. +// Homomorphic properties can be used to add, subtract, and multiply ciphertexts. +pub struct Ciphertext { + left: RistrettoPoint, + right: RistrettoPoint, +} + +impl Ciphertext { + pub fn new(left: RistrettoPoint, right: RistrettoPoint) -> Self { + Self { + left, + right, + } + } + + pub fn points(&self) -> (RistrettoPoint, RistrettoPoint) { + (self.left, self.right) + } +} + +impl Serializer for Ciphertext { + fn write(&self, writer: &mut Writer) { + writer.write_bytes(self.left.compress().as_bytes()); + writer.write_bytes(self.right.compress().as_bytes()); + } + + fn read(reader: &mut Reader) -> Result { + let left_bytes = reader.read_bytes_32()?; + let right_bytes = reader.read_bytes_32()?; + + Ok(Self { + left: CompressedRistretto::from_slice(&left_bytes).decompress().ok_or(ReaderError::InvalidValue)?, + right: CompressedRistretto::from_slice(&right_bytes).decompress().ok_or(ReaderError::InvalidValue)?, + }) + } +} + +impl Add for Ciphertext { + type Output = Self; + + fn add(mut self, other: Self) -> Self::Output { + self.left += other.left; + self.right += other.right; + self + } +} + +impl Add<&Ciphertext> for Ciphertext { + type Output = Self; + + fn add(mut self, other: &Self) -> Self::Output { + self.left += other.left; + self.right += other.right; + self + } +} + +impl Add<&Ciphertext> for &Ciphertext { + type Output = Ciphertext; + + fn add(self, other: &Ciphertext) -> Self::Output { + Ciphertext::new(self.left + other.left, self.right + other.right) + } +} + +impl Add for Ciphertext { + type Output = Self; + + fn add(mut self, other: RistrettoPoint) -> Self::Output { + self.right += other; + self + } +} + +impl Add<&RistrettoPoint> for &Ciphertext { + type Output = Ciphertext; + + fn add(self, other: &RistrettoPoint) -> Self::Output { + Ciphertext::new(self.left, self.right + other) + } +} + +impl Add<&RistrettoPoint> for Ciphertext { + type Output = Self; + + fn add(mut self, other: &RistrettoPoint) -> Self::Output { + self.right += other; + self + } +} + +impl Add for &Ciphertext { + type Output = Ciphertext; + + fn add(self, other: RistrettoPoint) -> Self::Output { + Ciphertext::new(self.left, self.right + other) + } +} + +impl Sub for Ciphertext { + type Output = Self; + + fn sub(mut self, other: Self) -> Self::Output { + self.left -= other.left; + self.right -= other.right; + self + } +} + +impl Sub<&Ciphertext> for Ciphertext { + type Output = Self; + + fn sub(mut self, other: &Self) -> Self::Output { + self.left -= other.left; + self.right -= other.right; + self + } +} + +impl Sub<&Ciphertext> for &Ciphertext { + type Output = Ciphertext; + + fn sub(self, other: &Ciphertext) -> Self::Output { + Ciphertext::new(self.left - other.left, self.right - other.right) + } +} + +impl Sub for Ciphertext { + type Output = Self; + + fn sub(mut self, other: RistrettoPoint) -> Self::Output { + self.right -= other; + self + } +} + +impl Sub<&RistrettoPoint> for &Ciphertext { + type Output = Ciphertext; + + fn sub(self, other: &RistrettoPoint) -> Self::Output { + Ciphertext::new(self.left, self.right - other) + } +} + +impl Sub<&RistrettoPoint> for Ciphertext { + type Output = Self; + + fn sub(mut self, other: &RistrettoPoint) -> Self::Output { + self.right -= other; + self + } +} + +impl Sub for &Ciphertext { + type Output = Ciphertext; + + fn sub(self, other: RistrettoPoint) -> Self::Output { + Ciphertext::new(self.left, self.right - other) + } +} + +impl Neg for Ciphertext { + type Output = Self; + + fn neg(mut self) -> Self::Output { + self.left = -self.left; + self.right = -self.right; + self + } +} + +impl Neg for &Ciphertext { + type Output = Ciphertext; + + fn neg(self) -> Self::Output { + Ciphertext::new(-self.left, -self.right) + } +} + +impl Mul for Ciphertext { + type Output = Self; + + fn mul(mut self, other: Scalar) -> Self::Output { + self.left *= other; + self.right *= other; + self + } +} + +impl Mul for &Ciphertext { + type Output = Ciphertext; + + fn mul(self, other: Scalar) -> Self::Output { + Ciphertext::new(self.left * other, self.right * other) + } +} + +impl Mul<&Scalar> for Ciphertext { + type Output = Self; + + fn mul(mut self, other: &Scalar) -> Self::Output { + self.left *= other; + self.right *= other; + self + } +} + +impl Mul<&Scalar> for &Ciphertext { + type Output = Ciphertext; + + fn mul(self, other: &Scalar) -> Self::Output { + Ciphertext::new(self.left * other, self.right * other) + } +} \ No newline at end of file diff --git a/xelis_common/src/crypto/elgamal/key.rs b/xelis_common/src/crypto/elgamal/key.rs new file mode 100644 index 00000000..d68042da --- /dev/null +++ b/xelis_common/src/crypto/elgamal/key.rs @@ -0,0 +1,65 @@ +use curve25519_dalek::{ristretto::RistrettoPoint, scalar::Scalar, constants::RISTRETTO_BASEPOINT_TABLE}; +use rand::rngs::OsRng; + +use super::Ciphertext; + +pub struct PrivateKey { + secret: Scalar +} + +impl PrivateKey { + pub fn new(secret: Scalar) -> Self { + Self { + secret + } + } + + pub fn to_public_key(&self) -> PublicKey { + PublicKey::new(&self.secret * &RISTRETTO_BASEPOINT_TABLE) + } + + pub fn decrypt_to_point(&self, ciphertext: &Ciphertext) -> RistrettoPoint { + let (left, right) = ciphertext.points(); + right - left * &self.secret + } +} + +pub struct PublicKey { + point: RistrettoPoint +} + +impl PublicKey { + pub fn new(point: RistrettoPoint) -> Self { + Self { + point + } + } + + pub fn point(&self) -> &RistrettoPoint { + &self.point + } + + // Generate a random Scalar to be used as blinding factor for encryption + pub fn generate_random_r(&self) -> Scalar { + // Create a random number generator + let mut rng = OsRng; + let r = Scalar::random(&mut rng); + r + } + + pub fn encrypt(&self, value: u64) -> Ciphertext { + let m = &Scalar::from(value) * &RISTRETTO_BASEPOINT_TABLE; + self.encrypt_point(m) + } + + pub fn encrypt_point(&self, m: RistrettoPoint) -> Ciphertext { + let r = self.generate_random_r(); + self.encrypt_with(m, r) + } + + pub fn encrypt_with(&self, m: RistrettoPoint, r: Scalar) -> Ciphertext { + let c1 = &r * &RISTRETTO_BASEPOINT_TABLE; + let c2 = m + r * &self.point; + Ciphertext::new(c1, c2) + } +} \ No newline at end of file diff --git a/xelis_common/src/crypto/elgamal/mod.rs b/xelis_common/src/crypto/elgamal/mod.rs new file mode 100644 index 00000000..ad2b56d6 --- /dev/null +++ b/xelis_common/src/crypto/elgamal/mod.rs @@ -0,0 +1,107 @@ +mod ciphertext; +mod key; + +pub use self::{ + ciphertext::Ciphertext, + key::{PrivateKey, PublicKey}, +}; + +mod tests { + use curve25519_dalek::{scalar::Scalar, constants::RISTRETTO_BASEPOINT_TABLE, ristretto::RistrettoPoint}; + use rand::rngs::OsRng; + use super::{PrivateKey, PublicKey}; + + fn _generate_key_pair() -> (PrivateKey, PublicKey) { + let private_key = PrivateKey::new(Scalar::random(&mut OsRng)); + let public_key = private_key.to_public_key(); + (private_key, public_key) + } + + fn _generate_point(value: u64) -> RistrettoPoint { + &Scalar::from(value) * &RISTRETTO_BASEPOINT_TABLE + } + + #[test] + fn test_encrypt_decrypt() { + let (private_key, public_key) = _generate_key_pair(); + + let m = _generate_point(10); + let c = public_key.encrypt_point(m); + let m2 = private_key.decrypt_to_point(&c); + assert_eq!(m, m2); + } + + #[test] + fn test_homomorphic_add() { + let (private_key, public_key) = _generate_key_pair(); + + let m1 = _generate_point(50); + let m2 = _generate_point(100); + + let c1 = public_key.encrypt_point(m1); + let c2 = public_key.encrypt_point(m2); + let c3 = c1 + c2; + + let m3 = private_key.decrypt_to_point(&c3); + assert_eq!(m1 + m2, m3); + } + + #[test] + fn test_homomorphic_add_plaintext() { + let (private_key, public_key) = _generate_key_pair(); + + let m1 = _generate_point(50); + let m2 = _generate_point(100); + + // Enc(m1) + m2 = Enc(m1 + m2) + let c1 = public_key.encrypt_point(m1); + let c2 = c1 + m2; + + let m3 = private_key.decrypt_to_point(&c2); + assert_eq!(m1 + m2, m3); + } + + #[test] + fn test_homomorphic_sub() { + let (private_key, public_key) = _generate_key_pair(); + + let m1 = _generate_point(50); + let m2 = _generate_point(100); + + let c1 = public_key.encrypt_point(m1); + let c2 = public_key.encrypt_point(m2); + let c3 = c2 - c1; + + let m3 = private_key.decrypt_to_point(&c3); + assert_eq!(m2 - m1, m3); + } + + #[test] + fn test_homomorphic_sub_plaintext() { + let (private_key, public_key) = _generate_key_pair(); + + let m1 = _generate_point(50); + let m2 = _generate_point(100); + + // Enc(m1) + m2 = Enc(m1 + m2) + let c1 = public_key.encrypt_point(m1); + let c2 = c1 - m2; + + let m3 = private_key.decrypt_to_point(&c2); + assert_eq!(m1 - m2, m3); + } + + #[test] + fn test_homomorphic_mul() { + let (private_key, public_key) = _generate_key_pair(); + + let m1 = _generate_point(50); + let m2 = Scalar::from(100u64); + + let c1 = public_key.encrypt_point(m1); + let c2 = c1 * m2; + + let m3 = private_key.decrypt_to_point(&c2); + assert_eq!(m3, m1 * m2); + } +} \ No newline at end of file diff --git a/xelis_common/src/crypto/mod.rs b/xelis_common/src/crypto/mod.rs index 2db14d4a..8f531017 100644 --- a/xelis_common/src/crypto/mod.rs +++ b/xelis_common/src/crypto/mod.rs @@ -1,4 +1,5 @@ pub mod hash; pub mod key; pub mod bech32; -pub mod address; \ No newline at end of file +pub mod address; +pub mod elgamal; \ No newline at end of file From d384c347e67ec87b4302beb7b17c84c4dfd72c7a Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 23:50:14 +0200 Subject: [PATCH 055/160] common: keep default features, set version to 4.1.1 --- xelis_common/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index 75b6b84c..a1576d2c 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -15,7 +15,7 @@ serde = { version = "1", features = ["derive", "rc"] } serde_json = "1" rand = "0.8.4" ed25519-dalek = { version = "1.0.1", features = ["serde"] } -curve25519-dalek = { package = "curve25519-dalek-ng", version = "4", default_features = false, features = ["alloc", "u64_backend"] } +curve25519-dalek = { package = "curve25519-dalek-ng", version = "4.1.1" } thiserror = "1.0.30" anyhow = "1.0.57" log = "0.4" From 4c984b4e1135936db7035c394cc58edf507bd28c Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 9 Oct 2023 23:53:09 +0200 Subject: [PATCH 056/160] common: fix comment --- xelis_common/src/crypto/elgamal/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_common/src/crypto/elgamal/mod.rs b/xelis_common/src/crypto/elgamal/mod.rs index ad2b56d6..d998d315 100644 --- a/xelis_common/src/crypto/elgamal/mod.rs +++ b/xelis_common/src/crypto/elgamal/mod.rs @@ -83,7 +83,7 @@ mod tests { let m1 = _generate_point(50); let m2 = _generate_point(100); - // Enc(m1) + m2 = Enc(m1 + m2) + // Enc(m1) - m2 = Enc(m1 - m2) let c1 = public_key.encrypt_point(m1); let c2 = c1 - m2; From 68f3a4941116bc91377c8c162d467b1b59d38b91 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 12:18:40 +0200 Subject: [PATCH 057/160] daemon: delete unnecessary semaphore --- xelis_daemon/src/p2p/mod.rs | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 400c361d..542422bd 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -28,7 +28,7 @@ use self::packet::ping::Ping; use self::error::P2pError; use self::packet::{Packet, PacketWrapper}; use self::peer::Peer; -use tokio::{net::{TcpListener, TcpStream}, sync::{mpsc::{self, UnboundedSender, UnboundedReceiver}, Semaphore}, select, task::JoinHandle}; +use tokio::{net::{TcpListener, TcpStream}, sync::mpsc::{self, UnboundedSender, UnboundedReceiver}, select, task::JoinHandle}; use log::{info, warn, error, debug, trace}; use tokio::io::AsyncWriteExt; use tokio::time::{interval, timeout, sleep}; @@ -61,7 +61,6 @@ pub struct P2pServer { last_sync_request_sent: AtomicU64, // used to check if we are already syncing with one peer or not object_tracker: SharedObjectTracker, // used to requests objects to peers and avoid requesting the same object to multiple peers queued_fetcher: QueuedFetcher, // used to requests all propagated txs in one task only - block_propagation_semaphore: Semaphore, // used to limit the number of block propagations common peers check is_running: AtomicBool // used to check if the server is running or not in tasks } @@ -94,7 +93,6 @@ impl P2pServer { last_sync_request_sent: AtomicU64::new(0), object_tracker, queued_fetcher, - block_propagation_semaphore: Semaphore::new(1), is_running: AtomicBool::new(true) }; @@ -755,18 +753,14 @@ impl P2pServer { // Avoid sending the same block to a common peer // because we track peerlist of each peers, we can try to determinate it { - // semaphore allows to prevent any deadlock because of loop lock - let _permit = self.block_propagation_semaphore.acquire().await?; let peer_list = self.peer_list.read().await; let peer_peers = peer.get_peers(false).lock().await; // iterate over all peers of this peer broadcaster for peer_peer in peer_peers.iter() { // if we have a common peer with him if let Some(peer_peer) = peer_list.get_peer_by_addr(peer_peer) { - { - let mut peers = peer_peer.get_blocks_propagation().lock().await; - peers.put(block_hash.clone(), ()); - } + let mut blocks_propagation = peer_peer.get_blocks_propagation().lock().await; + blocks_propagation.put(block_hash.clone(), ()); } } } From 417c35d6288158b7589baab3549f67b08b3b753f Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 16:14:45 +0200 Subject: [PATCH 058/160] wallet: fix error for nonce missing --- xelis_wallet/src/network_handler.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 98f13b10..0d603f57 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -341,8 +341,7 @@ impl NetworkHandler { // Retrieve the highest nonce (in one call, in case of assets/txs not tracked correctly) { - let res = self.api.get_last_nonce(&address).await?; - let nonce = res.version.get_nonce(); + let nonce = self.api.get_last_nonce(&address).await.map(|v| v.version.get_nonce()).unwrap_or(0); debug!("New nonce found is {}", nonce); let mut storage = self.wallet.get_storage().write().await; storage.set_nonce(nonce)?; From 746b0a40106fbe17d251479ddf8bb1738215bcca Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 17:39:47 +0200 Subject: [PATCH 059/160] common: fix blocked prompt on exit --- xelis_common/src/prompt/mod.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index f72d6141..162d6ef9 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -17,7 +17,7 @@ use fern::colors::{ColoredLevelConfig, Color}; use tokio::sync::mpsc::{self, UnboundedSender, UnboundedReceiver}; use tokio::sync::oneshot; use std::sync::{PoisonError, Arc, Mutex}; -use log::{info, error, Level, debug, LevelFilter}; +use log::{info, error, Level, debug, LevelFilter, warn}; use tokio::time::interval; use std::future::Future; use std::time::Duration; @@ -260,6 +260,16 @@ impl State { } info!("ioloop thread is now stopped"); + let mut readers = self.readers.lock()?; + let mut values = Vec::with_capacity(readers.len()); + std::mem::swap(&mut *readers, &mut values); + + for reader in values { + if let Err(e) = reader.send(String::new()) { + warn!("Error while sending empty string to reader: {}", e); + } + } + Ok(()) } @@ -271,7 +281,7 @@ impl State { let lines_count = prompt.lines().count(); let previous_lines_count = self.previous_prompt_line.swap(lines_count, Ordering::SeqCst); let lines_eraser = if previous_lines_count > 1 { - format!("{}", "\x1B[A".repeat(previous_lines_count - 1)) + format!("{}", "\x1B[A".repeat(previous_lines_count)) } else { String::new() }; @@ -522,6 +532,10 @@ impl Prompt { // read a message from the user and apply the input mask if necessary pub async fn read_input(&self, prompt: String, apply_mask: bool) -> Result { + if self.state.has_exited.load(Ordering::SeqCst) { + return Err(PromptError::NotRunning) + } + // register our reader let receiver = { let mut readers = self.state.readers.lock()?; From 081fc7ecc856ca2dc8d34f4e3b3e93e3436e6c1e Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 17:43:20 +0200 Subject: [PATCH 060/160] common: add ciphertext variant --- xelis_common/src/crypto/elgamal/ciphertext.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/xelis_common/src/crypto/elgamal/ciphertext.rs b/xelis_common/src/crypto/elgamal/ciphertext.rs index 4e7abe62..7a71c29d 100644 --- a/xelis_common/src/crypto/elgamal/ciphertext.rs +++ b/xelis_common/src/crypto/elgamal/ciphertext.rs @@ -68,6 +68,16 @@ impl Add<&Ciphertext> for &Ciphertext { } } +impl Add for &Ciphertext { + type Output = Ciphertext; + + fn add(self, mut other: Ciphertext) -> Self::Output { + other.left += self.left; + other.right += self.right; + other + } +} + impl Add for Ciphertext { type Output = Self; From 7c0dc3b66b02a2027545763eb3d12953e23e3081 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 23:27:17 +0200 Subject: [PATCH 061/160] common: better prompt system --- Cargo.lock | 1 + xelis_common/Cargo.toml | 1 + xelis_common/src/prompt/mod.rs | 48 +++++++++++++++++++++++++++++----- 3 files changed, 44 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffe4252a..1146c6cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2661,6 +2661,7 @@ dependencies = [ "num-bigint", "num-traits", "rand 0.8.5", + "regex", "reqwest", "serde", "serde_json", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index a1576d2c..f32bb944 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -32,6 +32,7 @@ actix-web = { version = "4", optional = true } actix-ws = { version = "0.2.5", optional = true } futures-util = { version = "0.3.28", optional = true } async-trait = { version = "0.1.64", optional = true } +regex = "1" [features] json_rpc = ["dep:reqwest"] diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 162d6ef9..9eb060ba 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -10,10 +10,11 @@ use std::fmt::{Display, Formatter, self}; use std::io::{Write, stdout, Error as IOError}; use std::num::ParseFloatError; use std::str::FromStr; -use std::sync::atomic::{AtomicBool, Ordering, AtomicUsize}; +use std::sync::atomic::{AtomicBool, Ordering, AtomicUsize, AtomicU16}; use crossterm::event::{self, Event, KeyCode, KeyModifiers, KeyEventKind}; use crossterm::terminal; use fern::colors::{ColoredLevelConfig, Color}; +use regex::Regex; use tokio::sync::mpsc::{self, UnboundedSender, UnboundedReceiver}; use tokio::sync::oneshot; use std::sync::{PoisonError, Arc, Mutex}; @@ -108,22 +109,26 @@ impl From> for PromptError { // State used to be shared between stdin thread and Prompt instance struct State { prompt: Mutex>, + width: AtomicU16, previous_prompt_line: AtomicUsize, user_input: Mutex, mask_input: AtomicBool, readers: Mutex>>, has_exited: AtomicBool, + ascii_escape_regex: Regex, } impl State { fn new() -> Self { Self { prompt: Mutex::new(None), + width: AtomicU16::new(crossterm::terminal::size().unwrap_or((80, 0)).0), previous_prompt_line: AtomicUsize::new(0), user_input: Mutex::new(String::new()), mask_input: AtomicBool::new(false), readers: Mutex::new(Vec::new()), has_exited: AtomicBool::new(false), + ascii_escape_regex: Regex::new("\x1B\\[[0-9;]*[A-Za-z]").unwrap() } } @@ -148,7 +153,8 @@ impl State { match event::read() { Ok(event) => { match event { - Event::Resize(_, _) => { + Event::Resize(width, _) => { + self.width.store(width, Ordering::SeqCst); self.show()?; } Event::Paste(s) => { @@ -277,11 +283,36 @@ impl State { self.mask_input.load(Ordering::SeqCst) } + fn count_lines(&self, value: &String) -> usize { + let width = self.width.load(Ordering::SeqCst); + + let mut lines = 0; + let mut current_line_width = 0; + let input = self.ascii_escape_regex.replace_all(value, ""); + + for c in input.chars() { + if c == '\n' || current_line_width >= width { + lines += 1; + current_line_width = 0; + } else { + current_line_width += 1; + } + } + + if current_line_width > 0 { + lines += 1; + } + + lines + } + fn show_with_prompt_and_input(&self, prompt: &String, input: &String) -> Result<(), PromptError> { - let lines_count = prompt.lines().count(); - let previous_lines_count = self.previous_prompt_line.swap(lines_count, Ordering::SeqCst); - let lines_eraser = if previous_lines_count > 1 { - format!("{}", "\x1B[A".repeat(previous_lines_count)) + let lines_count = self.count_lines(&format!("\r{}{}", prompt, input)); + let count = self.previous_prompt_line.swap(lines_count, Ordering::SeqCst); + + // > 1 because prompt line is already counted + let lines_eraser: String = if count > 1 { + format!("\x1B[{}A", count - 1) } else { String::new() }; @@ -290,6 +321,11 @@ impl State { print!("\r\x1B[2K{}{}{}", lines_eraser, prompt, "*".repeat(input.len())); } else { print!("\r\x1B[2K{}{}{}", lines_eraser, prompt, input); + } + + // Scroll up if we a empty line + if lines_count < count { + // TODO } stdout().flush()?; From b2bb8f6a8053796451baf3f6601d806ba8273cb6 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 10 Oct 2023 23:35:19 +0200 Subject: [PATCH 062/160] daemon: set nonce to 0 on newly used keys --- xelis_daemon/src/core/blockchain.rs | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 65b7ba8c..0f33ca24 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1448,6 +1448,23 @@ impl Blockchain { // reward the miner self.reward_miner(storage, &block, block_reward, total_fees, &mut balances, highest_topo).await?; + // save balances for each topoheight + for (key, assets) in balances { + for (asset, balance) in assets { + trace!("Saving balance {} for {} at topo {}, previous: {:?}", asset, key, highest_topo, balance.get_previous_topoheight()); + storage.set_balance_to(key, asset, highest_topo, &balance).await?; + } + + // No nonce update for this key + if !local_nonces.contains_key(key) { + // Check if its a known account, otherwise set nonce to 0 + if !storage.has_nonce(key).await? { + // This public key is new, register it by setting 0 + storage.set_nonce_at_topoheight(key, 0, highest_topo).await?; + } + } + } + // save nonces for each pubkey for new topoheight for (key, nonce) in local_nonces { trace!("Saving nonce {} for {} at topoheight {}", nonce, key, highest_topo); @@ -1457,14 +1474,6 @@ impl Blockchain { nonces.insert(key, nonce); } - // save balances for each topoheight - for (key, assets) in balances { - for (asset, balance) in assets { - trace!("Saving balance {} for {} at topo {}, previous: {:?}", asset, key, highest_topo, balance.get_previous_topoheight()); - storage.set_balance_to(key, asset, highest_topo, &balance).await?; - } - } - if should_track_events.contains(&NotifyEvent::BlockOrdered) { let value = json!(BlockOrderedEvent { block_hash: Cow::Borrowed(&hash), From b04995c15303b94799e61552dd18f9b50f5c62b2 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 19:09:05 +0200 Subject: [PATCH 063/160] daemon: 'count_accounts' rpc method --- API.md | 26 ++++++++++++++++++++++++++ xelis_daemon/src/core/storage/mod.rs | 1 + xelis_daemon/src/core/storage/sled.rs | 4 ++++ xelis_daemon/src/rpc/rpc.rs | 11 +++++++++++ 4 files changed, 42 insertions(+) diff --git a/API.md b/API.md index 65fc29ae..45212dd4 100644 --- a/API.md +++ b/API.md @@ -645,6 +645,32 @@ No parameters } ``` +#### Count Accounts +Counts the number of accounts saved on disk + +##### Method `count_assets` + +##### Parameters +No parameters + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "count_accounts" +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": 1271 +} +``` + #### Count Transactions Counts the number of transactions saved on disk diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 4724e0ea..97649c0f 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -51,6 +51,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn has_key_updated_in_range(&self, key: &PublicKey, minimum_topoheight: u64, maximum_topoheight: u64) -> Result; async fn get_balances<'a, I: Iterator + Send>(&self, asset: &Hash, keys: I, maximum_topoheight: u64) -> Result>, BlockchainError>; + fn count_accounts(&self) -> usize; fn get_block_executer_for_tx(&self, tx: &Hash) -> Result; fn set_tx_executed_in_block(&mut self, tx: &Hash, block: &Hash) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 0e76f005..690a17c2 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -638,6 +638,10 @@ impl Storage for SledStorage { Ok(balances) } + fn count_accounts(&self) -> usize { + self.nonces.len() + } + fn get_block_executer_for_tx(&self, tx: &Hash) -> Result { self.load_from_disk(&self.txs_executed, tx.as_bytes()) } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index b2af03b1..40877a39 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -143,6 +143,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_asset", async_handler!(get_asset)); handler.register_method("get_assets", async_handler!(get_assets)); handler.register_method("count_assets", async_handler!(count_assets)); + handler.register_method("count_accounts", async_handler!(count_accounts)); handler.register_method("count_transactions", async_handler!(count_transactions)); handler.register_method("submit_transaction", async_handler!(submit_transaction)); handler.register_method("get_transaction", async_handler!(get_transaction)); @@ -374,6 +375,16 @@ async fn count_assets(blockchain: Arc>, body: Value) - Ok(json!(storage.count_assets())) } +// TODO Rate limiter +async fn count_accounts(blockchain: Arc>, body: Value) -> Result { + if body != Value::Null { + return Err(InternalRpcError::UnexpectedParams) + } + + let storage = blockchain.get_storage().read().await; + Ok(json!(storage.count_accounts())) +} + // TODO Rate limiter async fn count_transactions(blockchain: Arc>, body: Value) -> Result { if body != Value::Null { From 0a333843c5160efb410665dff2f8ac427a76b72a Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 19:33:17 +0200 Subject: [PATCH 064/160] daemon: check if TX is already in storage --- xelis_daemon/src/p2p/mod.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 542422bd..63f798e3 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -773,8 +773,23 @@ impl P2pServer { tokio::spawn(async move { for hash in header.get_txs_hashes() { let contains = { // we don't lock one time because we may wait on p2p response - let mempool = zelf.blockchain.get_mempool().read().await; - mempool.contains_tx(hash) + let in_mempool = { + let mempool = zelf.blockchain.get_mempool().read().await; + mempool.contains_tx(hash) + }; + + if in_mempool { + true + } else { + let storage = zelf.blockchain.get_storage().read().await; + match storage.has_transaction(hash).await { + Ok(contains) => contains, + Err(e) => { + warn!("Error while checking if we have tx {} in storage: {}", hash, e); + false + } + } + } }; if !contains { // retrieve one by one to prevent acquiring the lock for nothing From 919302be8a3637ffdaff78b177e53243a4925350 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 19:35:35 +0200 Subject: [PATCH 065/160] daemon: build block from header using stored tx --- xelis_daemon/src/core/blockchain.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0f33ca24..2311f4e4 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1072,8 +1072,15 @@ impl Blockchain { pub async fn build_block_from_header(&self, header: Immutable) -> Result { let mut transactions: Vec> = Vec::with_capacity(header.get_txs_count()); let mempool = self.mempool.read().await; + let storage = self.storage.read().await; for hash in header.get_txs_hashes() { - let tx = mempool.get_tx(hash)?; // at this point, we don't want to lose/remove any tx, we clone it only + // at this point, we don't want to lose/remove any tx, we clone it only + let tx = if mempool.contains_tx(hash) { + mempool.get_tx(hash)? + } else { + storage.get_transaction(hash).await? + }; + transactions.push(Immutable::Arc(tx)); } let block = Block::new(header, transactions); From 9d9aa440f8bd1bb22f316806bc57bf0e126532ed Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 22:22:50 +0200 Subject: [PATCH 066/160] daemon: better check/optimize block propagation on TXs not found --- xelis_daemon/src/p2p/mod.rs | 49 +++++++++++++++----- xelis_daemon/src/p2p/queue.rs | 4 +- xelis_daemon/src/p2p/tracker.rs | 79 +++++++++++++++++++++++++++++---- 3 files changed, 112 insertions(+), 20 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 63f798e3..406c4f7a 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -15,7 +15,7 @@ use xelis_common::{ block::{BlockHeader, Block}, utils::get_current_time, immutable::Immutable, account::VersionedNonce }; -use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}}}; +use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}, tracker::ResponseBlocker}}; use crate::core::error::BlockchainError; use crate::p2p::connection::ConnectionMessage; use crate::p2p::packet::chain::CommonPoint; @@ -771,33 +771,51 @@ impl P2pServer { let peer = Arc::clone(peer); // verify that we have all txs in local or ask peer to get missing txs tokio::spawn(async move { + let mut response_blockers: Vec = Vec::new(); for hash in header.get_txs_hashes() { let contains = { // we don't lock one time because we may wait on p2p response - let in_mempool = { + // Check in mempool first + let mut found = { let mempool = zelf.blockchain.get_mempool().read().await; mempool.contains_tx(hash) }; - if in_mempool { - true - } else { + // Check in ObjectTracker + if !found { + if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { + response_blockers.push(response_blocker); + found = true; + } + } + + // Check on chain directly + if !found { let storage = zelf.blockchain.get_storage().read().await; - match storage.has_transaction(hash).await { + found = match storage.has_transaction(hash).await { Ok(contains) => contains, Err(e) => { warn!("Error while checking if we have tx {} in storage: {}", hash, e); false } - } + }; } + + found }; if !contains { // retrieve one by one to prevent acquiring the lock for nothing debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); - let response = match peer.request_blocking_object(ObjectRequest::Transaction(hash.clone())).await { - Ok(response) => response, + let (response, listener) = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())) { + Ok(response) => match response.await { + Ok(Ok(response)) => response, + _ => { + error!("Error while handling response for TX {} from {}", hash, peer); + peer.increment_fail_count(); + return; + } + }, Err(e) => { - error!("Error while requesting TX {} to peer {}: {}", hash, peer, e); + error!("Error while requesting TX {} from {}: {}", hash, peer, e); peer.increment_fail_count(); return; } @@ -816,6 +834,17 @@ impl P2pServer { peer.increment_fail_count(); return; } + // if listener is dropped before it is ok, receivers will stop listening + listener.notify(); + } + } + + // Wait on all already requested txs + for mut blocker in response_blockers { + if let Err(e) = blocker.recv().await { + error!("Error while waiting on response blocker: {}", e); + peer.increment_fail_count(); + return; } } diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs index b1d4f1e4..16d84e94 100644 --- a/xelis_daemon/src/p2p/queue.rs +++ b/xelis_daemon/src/p2p/queue.rs @@ -5,6 +5,7 @@ use crate::core::{blockchain::Blockchain, storage::Storage}; use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::SharedObjectTracker}; // TODO optimize to request the data but only handle in good order +// This allow to have a special queue for this and to not block/flood the other queue pub struct QueuedFetcher { sender: UnboundedSender<(Arc, ObjectRequest)> } @@ -19,7 +20,7 @@ impl QueuedFetcher { tokio::spawn(async move { while let Some((peer, request)) = receiver.recv().await { match tracker.fetch_object_from_peer(peer.clone(), request).await { - Ok(response) => { + Ok((response, listener)) => { if let OwnedObjectResponse::Transaction(tx, hash) = response { debug!("Adding {} to mempool from {}", hash, peer); if let Err(e) = blockchain.add_tx_to_mempool(tx, true).await { @@ -30,6 +31,7 @@ impl QueuedFetcher { error!("Received non tx object from peer"); peer.increment_fail_count(); } + listener.notify(); }, Err(e) => { error!("Error while fetching object from peer: {}", e); diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 333758fd..d916231c 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -7,19 +7,74 @@ use log::{error, debug}; use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, error::P2pError, peer::Peer}; -pub type WaiterResponse = oneshot::Receiver>; +pub type WaiterResponse = oneshot::Receiver>; pub type SharedObjectTracker = Arc; +pub type ResponseBlocker = tokio::sync::broadcast::Receiver<()>; + +pub struct Listener { + sender: Option> +} + +impl Listener { + pub fn new(sender: Option>) -> Self { + Self { + sender + } + } + + pub fn notify(self) { + if let Some(sender) = self.sender { + if sender.send(()).is_err() { + error!("Error while sending notification to ObjectTracker"); + } + } + } +} + +struct Request { + request: ObjectRequest, + sender: Option> +} + + +impl Request { + pub fn new(request: ObjectRequest) -> Self { + Self { + request, + sender: None + } + } + + pub fn get_hash(&self) -> &Hash { + self.request.get_hash() + } + + pub fn get_response_blocker(&mut self) -> ResponseBlocker { + if let Some(sender) = &self.sender { + sender.subscribe() + } else { + let (sender, receiver) = tokio::sync::broadcast::channel(1); + self.sender = Some(sender); + receiver + } + } + + pub fn to_listener(self) -> Listener { + Listener::new(self.sender) + } +} + // this ObjectTracker is a unique sender allows to create a queue system in one task only // currently used to fetch in order all txs propagated by the network pub struct ObjectTracker { request_sender: UnboundedSender, - response_sender: UnboundedSender>, - queue: RwLock> + response_sender: UnboundedSender>, + queue: RwLock> } enum Message { - Request(Arc, ObjectRequest, oneshot::Sender>), + Request(Arc, ObjectRequest, oneshot::Sender>), Exit } @@ -51,7 +106,7 @@ impl ObjectTracker { } } - async fn requester_loop(&self, mut request_receiver: UnboundedReceiver, mut response_receiver: UnboundedReceiver>) { + async fn requester_loop(&self, mut request_receiver: UnboundedReceiver, mut response_receiver: UnboundedReceiver>) { debug!("Starting requester loop..."); while let Some(msg) = request_receiver.recv().await { match msg { @@ -61,7 +116,7 @@ impl ObjectTracker { error!("Error while sending error response from ObjectTracker"); } } else { - let res: Result = timeout(Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT), response_receiver.recv()).await + let res: Result<(OwnedObjectResponse, Listener), P2pError> = timeout(Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT), response_receiver.recv()).await .map_err(|e| P2pError::AsyncTimeOut(e)) .and_then(|res| res.ok_or(P2pError::NoResponse)) .and_then(|res| res); @@ -81,6 +136,12 @@ impl ObjectTracker { queue.contains_key(object_hash) } + pub async fn get_response_blocker_for_requested_object(&self, object_hash: &Hash) -> Option { + let mut queue = self.queue.write().await; + let request = queue.get_mut(object_hash)?; + Some(request.get_response_blocker()) + } + pub async fn handle_object_response(&self, response: OwnedObjectResponse) -> Result<(), P2pError> { let request = { let mut queue = self.queue.write().await; @@ -98,7 +159,7 @@ impl ObjectTracker { return Err(P2pError::InvalidObjectHash(request.get_hash().clone(), response.get_hash().clone())); } - if self.response_sender.send(Ok(response)).is_err() { + if self.response_sender.send(Ok((response, request.to_listener()))).is_err() { error!("Error while sending object response in ObjectTracker"); } @@ -111,7 +172,7 @@ impl ObjectTracker { Ok(receiver) } - pub async fn fetch_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + pub async fn fetch_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result<(OwnedObjectResponse, Listener), P2pError> { Ok(self.request_object_from_peer(peer, request)?.await??) } @@ -125,7 +186,7 @@ impl ObjectTracker { return Err(P2pError::ObjectAlreadyRequested(request)) } - queue.insert(request.get_hash().clone(), request); + queue.insert(request.get_hash().clone(), Request::new(request)); } // send the packet to the Peer From c98761e2efe56927df1b94ac5d30b3a576f0138f Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 22:24:28 +0200 Subject: [PATCH 067/160] fix API.md --- API.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API.md b/API.md index 45212dd4..0ba5b39b 100644 --- a/API.md +++ b/API.md @@ -648,7 +648,7 @@ No parameters #### Count Accounts Counts the number of accounts saved on disk -##### Method `count_assets` +##### Method `count_accounts` ##### Parameters No parameters From 2c733d0211dcf0675d151f77ca9631605b91d3fd Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 23:37:54 +0200 Subject: [PATCH 068/160] daemon: emission fixed, add block reward and supply in 'status' command --- xelis_common/src/config.rs | 2 +- xelis_daemon/src/core/blockchain.rs | 6 +++++- xelis_daemon/src/main.rs | 8 ++++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index dca6050d..4d4cc00f 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -28,7 +28,7 @@ pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; // testnet prefix address pub const COIN_DECIMALS: u8 = 5; // 5 decimals numbers pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // 100 000 pub const MAX_SUPPLY: u64 = 18_400_000 * COIN_VALUE; // 18.4M full coin -pub const EMISSION_SPEED_FACTOR: u64 = 21; +pub const EMISSION_SPEED_FACTOR: u64 = 20; pub const GENESIS_BLOCK: &str = "0000000000000000000000000000000000000001872f3e0c02000000000000000000000000000000000000000000000000000000000000000000000000000000000000006c24cdc1c8ee8f028b8cafe7b79a66a0902f26d89dd54eeff80abcf251a9a3bd"; // Genesis block in hexadecimal format pub const DEV_ADDRESS: &str = "xel1qyqxcfxdc8ywarcz3wx2leahnfn2pyp0ymvfm42waluq408j2x5680g05xfx5"; // Dev address diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 2311f4e4..87c7734a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -433,6 +433,10 @@ impl Blockchain { &self.network } + pub async fn get_supply(&self) -> Result { + self.storage.read().await.get_supply_at_topo_height(self.get_topo_height()).await + } + pub async fn get_mempool_size(&self) -> usize { self.mempool.read().await.size() } @@ -2038,5 +2042,5 @@ impl Blockchain { pub fn get_block_reward(supply: u64) -> u64 { let base_reward = (MAX_SUPPLY - supply) >> EMISSION_SPEED_FACTOR; - base_reward + base_reward * BLOCK_TIME / 180 } \ No newline at end of file diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 93b376fe..c56ada37 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -9,12 +9,13 @@ use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; use xelis_common::{ prompt::{Prompt, command::{CommandManager, CommandError, Command, CommandHandler}, PromptError, argument::{ArgumentManager, Arg, ArgType}, LogLevel, self, ShareablePrompt}, - config::{VERSION, BLOCK_TIME}, utils::{format_hashrate, set_network_to}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer + config::{VERSION, BLOCK_TIME}, utils::{format_hashrate, set_network_to, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; use crate::core::{ blockchain::{Config, Blockchain}, storage::{Storage, SledStorage} }; +use core::blockchain::get_block_reward; use std::{sync::Arc, net::SocketAddr}; use std::time::Duration; use clap::Parser; @@ -350,6 +351,7 @@ async fn status(manager: &CommandManager>>, _: Arg let tips = storage.get_tips().await.context("Error while retrieving tips")?; let top_block_hash = blockchain.get_top_block_hash().await.context("Error while retrieving top block hash")?; let avg_block_time = blockchain.get_average_block_time_for_storage(&storage).await.context("Error while retrieving average block time")?; + let supply = blockchain.get_supply().await.context("Error while retrieving supply")?; manager.message(format!("Height: {}", height)); manager.message(format!("Stable Height: {}", stableheight)); @@ -357,7 +359,9 @@ async fn status(manager: &CommandManager>>, _: Arg manager.message(format!("Difficulty: {}", difficulty)); manager.message(format!("Top block hash: {}", top_block_hash)); manager.message(format!("Average Block Time: {:.2}s", avg_block_time as f64 / 1000f64)); - manager.message(format!("Target Block Time: {}s", BLOCK_TIME)); + manager.message(format!("Target Block Time: {:.2}s", BLOCK_TIME as f64)); + manager.message(format!("Current Supply: {} XELIS", format_coin(supply))); + manager.message(format!("Current Block Reward: {} XELIS", format_coin(get_block_reward(supply)))); manager.message(format!("Tips ({}):", tips.len())); for hash in tips { From 389a557057c2c119a9e304335aaf90a757298e91 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 11 Oct 2023 23:41:21 +0200 Subject: [PATCH 069/160] daemon: fix import --- xelis_daemon/src/main.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index c56ada37..346bcf24 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -12,10 +12,9 @@ use xelis_common::{ config::{VERSION, BLOCK_TIME}, utils::{format_hashrate, set_network_to, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; use crate::core::{ - blockchain::{Config, Blockchain}, + blockchain::{Config, Blockchain, get_block_reward}, storage::{Storage, SledStorage} }; -use core::blockchain::get_block_reward; use std::{sync::Arc, net::SocketAddr}; use std::time::Duration; use clap::Parser; From cbe179e4db404898a2ca03fbd2844796cbd0beb8 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 12 Oct 2023 11:55:14 +0200 Subject: [PATCH 070/160] daemon: prevent any overflow in emission --- xelis_daemon/src/core/blockchain.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 87c7734a..0847a541 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -2041,6 +2041,12 @@ impl Blockchain { } pub fn get_block_reward(supply: u64) -> u64 { + // Prevent any overflow + if supply >= MAX_SUPPLY { + // Max supply reached, do we want to generate small fixed amount of coins? + return 0 + } + let base_reward = (MAX_SUPPLY - supply) >> EMISSION_SPEED_FACTOR; base_reward * BLOCK_TIME / 180 } \ No newline at end of file From 195422c59afaf94fe9bd6619c7a9dc3b0e0e649c Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 12 Oct 2023 23:56:31 +0200 Subject: [PATCH 071/160] common: optimized LookupTable for decryption usage --- xelis_common/src/crypto/elgamal/lookup.rs | 133 ++++++++++++++++++++++ xelis_common/src/crypto/elgamal/mod.rs | 2 + 2 files changed, 135 insertions(+) create mode 100644 xelis_common/src/crypto/elgamal/lookup.rs diff --git a/xelis_common/src/crypto/elgamal/lookup.rs b/xelis_common/src/crypto/elgamal/lookup.rs new file mode 100644 index 00000000..3d9a941a --- /dev/null +++ b/xelis_common/src/crypto/elgamal/lookup.rs @@ -0,0 +1,133 @@ +use curve25519_dalek::{ristretto::{RistrettoPoint, CompressedRistretto}, scalar::Scalar, constants::RISTRETTO_BASEPOINT_TABLE, traits::Identity}; + +// Number of elements present in the lookup table +// memory usage is following: +// 2^N * BYTES_PER_POINT +// current implementation: +// 2^24 * 5 = 80MiB +const TABLE_SIZE: usize = 2usize.pow(24); + +// number of bytes to use for the lookup table +// memory can be reduced by using less bytes +// but it will increase the number of HE operations +// Recommended values are 4 or 5 +const BYTES_PER_POINT: usize = 5; + +// elements per batch when generating the table +const ELEMENTS_PER_BATCH: usize = 256; + +// Simple structure to store the last B bytes of a compressed point +#[derive(PartialEq, Debug, Clone, Copy)] +struct C([u8; BYTES_PER_POINT]); + +// LookupTable generate a Precomputed table of TABLE_SIZE elements +// Each element is a B bytes value of the last B bytes of the compressed point +// The table is used to speed up the decryption +// And use the HE subtraction operation for values higher than the computed table +// It is generic and can be used by several wallets at same time +pub struct LookupTable { + table: Vec +} + +fn compressed_to_c(compressed: CompressedRistretto) -> C { + let bytes = compressed.to_bytes(); + let mut data: [u8; BYTES_PER_POINT] = [0; BYTES_PER_POINT]; + for i in 0..BYTES_PER_POINT { + data[i] = bytes[bytes.len() - i - 1]; + } + + C(data) +} + +fn double_point(point: &RistrettoPoint) -> C { + compressed_to_c((point + point).compress()) +} + +impl LookupTable { + pub fn new() -> Self { + let mut table = Vec::with_capacity(TABLE_SIZE); + let mut val = &Scalar::from(0u64) * &RISTRETTO_BASEPOINT_TABLE; + // Register 0 value + table.push(double_point(&val)); + + let one = &Scalar::from(1u64) * &RISTRETTO_BASEPOINT_TABLE; + for _ in 0..TABLE_SIZE / ELEMENTS_PER_BATCH { + let mut tmp = [RistrettoPoint::identity(); ELEMENTS_PER_BATCH]; + for i in 0..tmp.len() { + val += one; + tmp[i] = val; + } + + RistrettoPoint::double_and_compress_batch(&tmp) + .into_iter() + .map(compressed_to_c) + .for_each(|c| { + table.push(c); + }); + } + + LookupTable { + table + } + } + + // Decode a Ristretto Point to a u64 value by searching in the table + // which value is the closest to the given point and its index + // Even if the real value is not found, we use HE subtraction to reduce the + // value to search and try again + pub fn lookup(&self, value: &RistrettoPoint) -> u64 { + let table_size = self.table.len() as u64; + // amount to subtract to the value to search at each iteration + let sub = &Scalar::from(table_size) * &RISTRETTO_BASEPOINT_TABLE; + + let mut local_value = value.clone(); + let mut plaintext = 0; + loop { + let c = double_point(&local_value); + if let Some(part) = self.table.iter().position(|v| *v == c) { + let total = plaintext + part as u64; + if &Scalar::from(total) * &RISTRETTO_BASEPOINT_TABLE == *value { + return total; + } + } + + // Value to search is bigger than table, use HE to reduce it + local_value -= sub; + plaintext += table_size; + } + } +} + +impl Default for LookupTable { + fn default() -> Self { + Self::new() + } +} + +mod tests { + use lazy_static::lazy_static; + + lazy_static!( + static ref TABLE: super::LookupTable = super::LookupTable::default(); + ); + + fn _assert_value(value: u64) { + let m = &super::Scalar::from(value) * &super::RISTRETTO_BASEPOINT_TABLE; + assert_eq!(TABLE.lookup(&m), value); + } + + #[test] + fn test_lookup_find_0() { + _assert_value(0); + } + + #[test] + fn test_lookup_find_1_000_000() { + _assert_value(1_000_000); + } + + #[test] + fn test_lookup_find_100_000_00000() { + _assert_value(100_000_00000); + } +} \ No newline at end of file diff --git a/xelis_common/src/crypto/elgamal/mod.rs b/xelis_common/src/crypto/elgamal/mod.rs index d998d315..c512b5e0 100644 --- a/xelis_common/src/crypto/elgamal/mod.rs +++ b/xelis_common/src/crypto/elgamal/mod.rs @@ -1,9 +1,11 @@ mod ciphertext; mod key; +mod lookup; pub use self::{ ciphertext::Ciphertext, key::{PrivateKey, PublicKey}, + lookup::LookupTable }; mod tests { From eb129752a1fff6b91518217fbf62afa67d0a6997 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 13 Oct 2023 15:47:43 +0200 Subject: [PATCH 072/160] daemon: add mempool debug messages --- xelis_daemon/src/core/blockchain.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0847a541..6673f5e8 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1305,12 +1305,16 @@ impl Blockchain { cumulative_difficulty }; + debug!("Locking mempool write mode"); let mut mempool = self.mempool.write().await; + debug!("mempool write mode ok"); + let mut tips = storage.get_tips().await?; tips.insert(block_hash.clone()); for hash in block.get_tips() { tips.remove(hash); } + debug!("New tips: {}", tips.iter().map(|v| v.to_string()).collect::>().join(",")); let (base_hash, base_height) = self.find_common_base(storage, &tips).await?; let best_tip = self.find_best_tip(storage, &tips, &base_hash, base_height).await?; From 7f1e974cc3dec1d09e6dc821a685c956b1a64627 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 14 Oct 2023 15:39:32 +0200 Subject: [PATCH 073/160] wallet: support assets specific decimals --- README.md | 4 +- xelis_common/src/serializer/defaults.rs | 10 ++++ xelis_common/src/utils.rs | 10 +++- xelis_daemon/src/core/blockchain.rs | 4 +- xelis_daemon/src/main.rs | 30 +++++++--- xelis_wallet/src/entry.rs | 11 ++-- xelis_wallet/src/main.rs | 52 ++++++++++-------- xelis_wallet/src/network_handler.rs | 3 +- xelis_wallet/src/storage.rs | 73 ++++++++++++++++++++----- xelis_wallet/src/wallet.rs | 23 ++++---- 10 files changed, 151 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index 23116b29..badf9662 100644 --- a/README.md +++ b/README.md @@ -249,7 +249,9 @@ Password hashing algorithm used is Argon2id with a configuration of 15 MB and 16 Wallet implement a fully-encrypted storage system with following features: - Tree names are hashed with generated salt - Keys data are hashed with generated salt -- Values are encrypted using XChaCha20Poly1305 and a random newly generated Nonce each time its saved. +- Values are encrypted using XChaCha20Poly1305 and a random newly generated nonce each time its saved. + +Exception for assets list which has its key encrypted to be able to retrieve them later. Hash algorithm used is Keccak-256 for keys / tree names. The random salt generated is a 64 bytes length. diff --git a/xelis_common/src/serializer/defaults.rs b/xelis_common/src/serializer/defaults.rs index a2a234fa..fdcce96d 100644 --- a/xelis_common/src/serializer/defaults.rs +++ b/xelis_common/src/serializer/defaults.rs @@ -46,6 +46,16 @@ impl Serializer for u64 { } } +// Implement Serializer for u8 +impl Serializer for u8 { + fn write(&self, writer: &mut Writer) { + writer.write_u8(*self); + } + + fn read(reader: &mut Reader) -> Result { + Ok(reader.read_u8()?) + } +} const MAX_ITEMS: usize = 1024; diff --git a/xelis_common/src/utils.rs b/xelis_common/src/utils.rs index eb9914f1..6027ae0e 100644 --- a/xelis_common/src/utils.rs +++ b/xelis_common/src/utils.rs @@ -1,7 +1,7 @@ use crate::block::Difficulty; use crate::network::Network; use crate::serializer::{Reader, ReaderError}; -use crate::config::{COIN_VALUE, FEE_PER_KB}; +use crate::config::{FEE_PER_KB, COIN_DECIMALS}; use std::sync::Mutex; use std::time::{SystemTime, UNIX_EPOCH}; use std::net::{SocketAddr, IpAddr, Ipv4Addr, Ipv6Addr}; @@ -29,8 +29,12 @@ pub fn get_current_timestamp() -> u128 { time.as_millis() } -pub fn format_coin(value: u64) -> String { - format!("{:.5}", value as f64 / COIN_VALUE as f64) +pub fn format_coin(value: u64, decimals: u8) -> String { + format!("{:.1$}", value as f64 / 10usize.pow(decimals as u32) as f64, decimals as usize) +} + +pub fn format_xelis(value: u64) -> String { + format_coin(value, COIN_DECIMALS) } // format a IP:port to byte format diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 6673f5e8..5abcdecc 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -6,7 +6,7 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hashable, Hash, HASH_SIZE}}, difficulty::{check_difficulty, calculate_difficulty}, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, - utils::{get_current_timestamp, format_coin}, + utils::{get_current_timestamp, format_xelis}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network, asset::AssetData @@ -1062,7 +1062,7 @@ impl Blockchain { if let Err(e) = self.verify_transaction_with_hash(&storage, tx, hash, &mut balances, Some(&mut nonces), false).await { warn!("TX {} is not valid for mining: {}", hash, e); } else { - trace!("Selected {} (nonce: {}, fees: {}) for mining", hash, tx.get_nonce(), format_coin(fee)); + trace!("Selected {} (nonce: {}, fees: {}) for mining", hash, tx.get_nonce(), format_xelis(fee)); // TODO no clone block.txs_hashes.push(hash.as_ref().clone()); block_size += HASH_SIZE; // add the hash size diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 346bcf24..8cad3cf5 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -9,7 +9,7 @@ use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; use xelis_common::{ prompt::{Prompt, command::{CommandManager, CommandError, Command, CommandHandler}, PromptError, argument::{ArgumentManager, Arg, ArgType}, LogLevel, self, ShareablePrompt}, - config::{VERSION, BLOCK_TIME}, utils::{format_hashrate, set_network_to, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer + config::{VERSION, BLOCK_TIME, XELIS_ASSET}, utils::{format_hashrate, set_network_to, format_xelis, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; use crate::core::{ blockchain::{Config, Blockchain, get_block_reward}, @@ -86,7 +86,7 @@ async fn run_prompt(prompt: ShareablePrompt>>, blo // Register all our commands command_manager.add_command(Command::new("list_peers", "List all peers connected", CommandHandler::Async(async_handler!(list_peers)))); command_manager.add_command(Command::new("list_assets", "List all assets registered on chain", CommandHandler::Async(async_handler!(list_assets)))); - command_manager.add_command(Command::with_arguments("show_balance", "Show balance of an address", vec![Arg::new("address", ArgType::String), Arg::new("asset", ArgType::Hash)], vec![Arg::new("history", ArgType::Number)], CommandHandler::Async(async_handler!(show_balance)))); + command_manager.add_command(Command::with_arguments("show_balance", "Show balance of an address", vec![], vec![Arg::new("history", ArgType::Number)], CommandHandler::Async(async_handler!(show_balance)))); command_manager.add_command(Command::with_required_arguments("print_block", "Print block in json format", vec![Arg::new("hash", ArgType::Hash)], CommandHandler::Async(async_handler!(print_block)))); command_manager.add_command(Command::new("top_block", "Print top block", CommandHandler::Async(async_handler!(top_block)))); command_manager.add_command(Command::with_required_arguments("pop_blocks", "Delete last N blocks", vec![Arg::new("amount", ArgType::Number)], CommandHandler::Async(async_handler!(pop_blocks)))); @@ -224,8 +224,21 @@ async fn list_assets(manager: &CommandManager>>, _ } async fn show_balance(manager: &CommandManager>>, mut arguments: ArgumentManager) -> Result<(), CommandError> { - let address = arguments.get_value("address")?.to_string_value()?; - let asset = arguments.get_value("asset")?.to_hash()?; + let prompt = manager.get_prompt()?; + // read address + let str_address = prompt.read_input( + prompt::colorize_str(Color::Green, "Address: "), + false + ).await.context("Error while reading address")?; + let address = Address::from_string(&str_address).context("Invalid address")?; + + // Read asset + let asset = prompt.read_hash( + prompt::colorize_str(Color::Green, "Asset (default XELIS): ") + ).await.ok(); + + let asset = asset.unwrap_or(XELIS_ASSET); + let mut history = if arguments.has_argument("history") { let value = arguments.get_value("history")?.to_number()?; if value == 0 { @@ -236,15 +249,14 @@ async fn show_balance(manager: &CommandManager>>, 1 }; - let address = Address::from_string(&address)?; let key = address.to_public_key(); - let blockchain = manager.get_data()?; let storage = blockchain.get_storage().read().await; + let asset_data = storage.get_asset_data(&asset).context("Error while retrieving asset data")?; let (mut topo, mut version) = storage.get_last_balance(&key, &asset).await.context("Error while retrieving last balance")?; loop { history -= 1; - manager.message(format!("Balance found at topoheight {}: {}", topo, version.get_balance())); + manager.message(format!("Balance found at topoheight {}: {}", topo, format_coin(version.get_balance(), asset_data.get_decimals()))); if history == 0 || topo == 0 { break; @@ -359,8 +371,8 @@ async fn status(manager: &CommandManager>>, _: Arg manager.message(format!("Top block hash: {}", top_block_hash)); manager.message(format!("Average Block Time: {:.2}s", avg_block_time as f64 / 1000f64)); manager.message(format!("Target Block Time: {:.2}s", BLOCK_TIME as f64)); - manager.message(format!("Current Supply: {} XELIS", format_coin(supply))); - manager.message(format!("Current Block Reward: {} XELIS", format_coin(get_block_reward(supply)))); + manager.message(format!("Current Supply: {} XELIS", format_xelis(supply))); + manager.message(format!("Current Block Reward: {} XELIS", format_xelis(get_block_reward(supply)))); manager.message(format!("Tips ({}):", tips.len())); for hash in tips { diff --git a/xelis_wallet/src/entry.rs b/xelis_wallet/src/entry.rs index a64b346c..e620aaa0 100644 --- a/xelis_wallet/src/entry.rs +++ b/xelis_wallet/src/entry.rs @@ -1,7 +1,7 @@ use std::fmt::{self, Display, Formatter}; use serde::Serialize; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, utils::format_coin}; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, utils::format_xelis}; #[derive(Serialize, Clone)] pub struct Transfer { @@ -242,21 +242,22 @@ impl Serializer for TransactionEntry { } } +// TODO display values with correct decimals from asset impl Display for TransactionEntry { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let entry_str = match self.get_entry() { - EntryData::Coinbase(reward) => format!("Coinbase {} XELIS", format_coin(*reward)), + EntryData::Coinbase(reward) => format!("Coinbase {} XELIS", format_xelis(*reward)), EntryData::Burn { asset, amount } => format!("Burn {} of {}", amount, asset), EntryData::Incoming(sender, txs) => { if txs.len() == 1 { - format!("Received from {} {} {}", sender, format_coin(txs[0].amount), txs[0].asset) + format!("Received from {} {} {}", sender, format_xelis(txs[0].amount), txs[0].asset) } else { format!("Incoming from {} {} transfers", sender, txs.len()) } }, EntryData::Outgoing(txs) => { if txs.len() == 1 { - format!("Sent to {} {} {}", txs[0].key, format_coin(txs[0].amount), txs[0].asset) + format!("Sent to {} {} {}", txs[0].key, format_xelis(txs[0].amount), txs[0].asset) } else { format!("{} differents transfers", txs.len()) } @@ -264,7 +265,7 @@ impl Display for TransactionEntry { }; if let (Some(fee), Some(nonce)) = (self.fee, self.nonce) { - write!(f, "Hash {} at TopoHeight {}, Nonce {}, Fee: {}, Data: {}", self.hash, self.topoheight, nonce, format_coin(fee), entry_str) + write!(f, "Hash {} at TopoHeight {}, Nonce {}, Fee: {}, Data: {}", self.hash, self.topoheight, nonce, format_xelis(fee), entry_str) } else { // mostly coinbase write!(f, "Hash {} at TopoHeight {}: {}", self.hash, self.topoheight, entry_str) } diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index 3184371d..ff62cbf4 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -7,8 +7,8 @@ use log::{error, info}; use clap::Parser; use xelis_common::{config::{ DEFAULT_DAEMON_ADDRESS, - VERSION, XELIS_ASSET, COIN_VALUE -}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel, self, ShareablePrompt, PromptError}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, utils::{format_coin, set_network_to, get_network}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; + VERSION, XELIS_ASSET, COIN_DECIMALS +}, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel, self, ShareablePrompt, PromptError}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, utils::{format_xelis, set_network_to, get_network, format_coin}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; use xelis_wallet::wallet::Wallet; #[cfg(feature = "api_server")] @@ -189,7 +189,7 @@ async fn setup_wallet_command_manager(wallet: Arc, prompt: ShareableProm command_manager.add_command(Command::with_optional_arguments("transfer", "Send asset to a specified address", vec![Arg::new("asset", ArgType::Hash)], CommandHandler::Async(async_handler!(transfer)))); command_manager.add_command(Command::with_required_arguments("burn", "Burn amount of asset", vec![Arg::new("asset", ArgType::Hash), Arg::new("amount", ArgType::Number)], CommandHandler::Async(async_handler!(burn)))); command_manager.add_command(Command::new("display_address", "Show your wallet address", CommandHandler::Async(async_handler!(display_address)))); - command_manager.add_command(Command::with_optional_arguments("balance", "Show your current balance", vec![Arg::new("asset", ArgType::Hash)], CommandHandler::Async(async_handler!(balance)))); + command_manager.add_command(Command::with_optional_arguments("balance", "List all non-zero balances or show the selected one", vec![Arg::new("asset", ArgType::Hash)], CommandHandler::Async(async_handler!(balance)))); command_manager.add_command(Command::with_optional_arguments("history", "Show all your transactions", vec![Arg::new("page", ArgType::Number)], CommandHandler::Async(async_handler!(history)))); command_manager.add_command(Command::with_optional_arguments("online_mode", "Set your wallet in online mode", vec![Arg::new("daemon_address", ArgType::String)], CommandHandler::Async(async_handler!(online_mode)))); command_manager.add_command(Command::new("offline_mode", "Set your wallet in offline mode", CommandHandler::Async(async_handler!(offline_mode)))); @@ -248,7 +248,7 @@ async fn prompt_message_builder(prompt: &Prompt>) -> Result>, _: ArgumentManager) -> let asset = asset.unwrap_or(XELIS_ASSET); - let max_balance = { + let (max_balance, decimals) = { let storage = wallet.get_storage().read().await; - storage.get_balance_for(&asset).unwrap_or(0) + let balance = storage.get_balance_for(&asset).unwrap_or(0); + let decimals = storage.get_asset_decimals(&asset).unwrap_or(COIN_DECIMALS); + (balance, decimals) }; // read amount let float_amount = prompt.read_f64( - prompt::colorize_string(Color::Green, &format!("Amount (max: {}): ", format_coin(max_balance))) + prompt::colorize_string(Color::Green, &format!("Amount (max: {}): ", format_coin(max_balance, decimals))) ).await.context("Error while reading amount")?; - // TODO digit token standard - let amount = (float_amount * COIN_VALUE as f64) as u64; - - manager.message(format!("Sending {} of {} to {}", float_amount, asset, address.to_string())); + let amount = (float_amount * 10u32.pow(decimals as u32) as f64) as u64; + manager.message(format!("Sending {} of {} to {}", format_coin(amount, decimals), asset, address.to_string())); if !prompt.ask_confirmation().await.context("Error while confirming action")? { manager.message("Transaction has been aborted"); @@ -486,12 +486,14 @@ async fn burn(manager: &CommandManager>, mut arguments: ArgumentMana let amount = arguments.get_value("amount")?.to_number()?; let asset = arguments.get_value("asset")?.to_hash()?; let wallet = manager.get_data()?; - manager.message(format!("Burning {} of {}", format_coin(amount), asset)); - let tx = { let storage = wallet.get_storage().read().await; + let decimals = storage.get_asset_decimals(&asset).unwrap_or(COIN_DECIMALS); + + manager.message(format!("Burning {} of {}", format_coin(amount, decimals), asset)); wallet.create_transaction(&storage, TransactionType::Burn { asset, amount }, FeeBuilder::Multiplier(1f64))? }; + broadcast_tx(wallet, manager, tx).await; Ok(()) } @@ -503,18 +505,24 @@ async fn display_address(manager: &CommandManager>, _: ArgumentManag Ok(()) } -// Show current balance for specified asset +// Show current balance for specified asset or list all non-zero balances async fn balance(manager: &CommandManager>, mut arguments: ArgumentManager) -> Result<(), CommandError> { - let asset = if arguments.has_argument("asset") { - arguments.get_value("asset")?.to_hash()? - } else { - XELIS_ASSET // default asset selected is XELIS - }; - let wallet = manager.get_data()?; let storage = wallet.get_storage().read().await; - let balance = storage.get_balance_for(&asset).unwrap_or(0); - manager.message(format!("Balance for asset {}: {}", asset, balance)); + + if arguments.has_argument("asset") { + let asset = arguments.get_value("asset")?.to_hash()?; + let balance = storage.get_balance_for(&asset).unwrap_or(0); + let decimals = storage.get_asset_decimals(&asset).unwrap_or(0); + manager.message(format!("Balance for asset {}: {}", asset, format_coin(balance, decimals))); + } else { + for (asset, decimals) in storage.get_assets_with_decimals()? { + let balance = storage.get_balance_for(&asset).unwrap_or(0); + if balance > 0 { + manager.message(format!("Balance for asset {}: {}", asset, format_coin(balance, decimals))); + } + } + } Ok(()) } diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 0d603f57..92311ad5 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -330,8 +330,7 @@ impl NetworkHandler { } } - // TODO save decimals - storage.add_asset(asset_data.get_asset())?; + storage.add_asset(asset_data.get_asset(), asset_data.get_data().get_decimals())?; } } diff --git a/xelis_wallet/src/storage.rs b/xelis_wallet/src/storage.rs index 0989717b..00651e1c 100644 --- a/xelis_wallet/src/storage.rs +++ b/xelis_wallet/src/storage.rs @@ -63,15 +63,26 @@ impl EncryptedStorage { Ok(storage) } - // load from disk, decrypt the value and deserialize it - fn load_from_disk(&self, tree: &Tree, key: &[u8]) -> Result { - let hashed_key = self.cipher.hash_key(key); - let data = tree.get(hashed_key)?.context(format!("load from disk: tree = {:?}, key = {}", tree.name(), String::from_utf8_lossy(key)))?; + // Key must be hashed or encrypted before calling this function + fn internal_load(&self, tree: &Tree, key: &[u8]) -> Result { + let data = tree.get(key)?.context(format!("load from disk: tree = {:?}, key = {}", tree.name(), String::from_utf8_lossy(key)))?; let bytes = self.cipher.decrypt_value(&data).context("Error while decrypting value from disk")?; let mut reader = Reader::new(&bytes); Ok(V::read(&mut reader).context("Error while de-serializing value from disk")?) } + // load from disk using a hashed key, decrypt the value and deserialize it + fn load_from_disk(&self, tree: &Tree, key: &[u8]) -> Result { + let hashed_key = self.cipher.hash_key(key); + self.internal_load(tree, &hashed_key) + } + + // load from disk using an encrypted key, decrypt the value and deserialize it + fn load_from_disk_with_encrypted_key(&self, tree: &Tree, key: &[u8]) -> Result { + let encrypted_key = self.cipher.encrypt_value(key)?; + self.internal_load(tree, &encrypted_key) + } + // hash key, encrypt data and then save to disk fn save_to_disk(&self, tree: &Tree, key: &[u8], value: &[u8]) -> Result<()> { let hashed_key = self.cipher.hash_key(key); @@ -79,6 +90,14 @@ impl EncryptedStorage { Ok(()) } + // Encrypt key, encrypt data and then save to disk + // We encrypt instead of hashing to be able to retrieve the key + fn save_to_disk_with_encrypted_key(&self, tree: &Tree, key: &[u8], value: &[u8]) -> Result<()> { + let encrypted_key = self.cipher.encrypt_value(key)?; + tree.insert(encrypted_key, self.cipher.encrypt_value(value)?)?; + Ok(()) + } + // hash key, encrypt data and then save to disk fn delete_from_disk(&self, tree: &Tree, key: &[u8]) -> Result<()> { let hashed_key = self.cipher.hash_key(key); @@ -86,10 +105,18 @@ impl EncryptedStorage { Ok(()) } + // Search if the data is present in the tree using hashed key fn contains_data(&self, tree: &Tree, key: &[u8]) -> Result { let hashed_key = self.cipher.hash_key(key); Ok(tree.contains_key(hashed_key)?) } + + // Encrypt instead of hash the key to recover it later + fn contains_encrypted_data(&self, tree: &Tree, key: &[u8]) -> Result { + let encrypted_key = self.cipher.encrypt_value(key)?; + Ok(tree.contains_key(encrypted_key)?) + } + // this function is specific because we save the key in encrypted form (and not hashed as others) // returns all saved assets pub fn get_assets(&self) -> Result> { @@ -105,24 +132,37 @@ impl EncryptedStorage { Ok(assets) } - // we can't use a simple Tree#contains_key because of the encrypted form - // and we can't encrypt it first because of the random nonce generated each time - // so we currently read the whole tree - // TODO build a cache instead of read the whole tree each time - // will be necessary when we will have a lot of assets registered on chain + // Retrieve all assets with their decimals + pub fn get_assets_with_decimals(&self) -> Result> { + let mut assets = Vec::new(); + for res in self.assets.iter() { + let (key, value) = res?; + let asset = Hash::from_bytes(&self.cipher.decrypt_value(&key)?)?; + let decimals = u8::from_bytes(&self.cipher.decrypt_value(&value)?)?; + + assets.push((asset, decimals)); + } + + Ok(assets) + } + + // Check if the asset is already registered pub fn contains_asset(&self, asset: &Hash) -> Result { - Ok(self.get_assets()?.contains(asset)) + self.contains_encrypted_data(&self.assets, asset.as_bytes()) } - // save asset in encrypted form - pub fn add_asset(&mut self, asset: &Hash) -> Result<()> { + // save asset with its corresponding decimals + pub fn add_asset(&mut self, asset: &Hash, decimals: u8) -> Result<()> { if self.contains_asset(asset)? { return Err(WalletError::AssetAlreadyRegistered.into()); } - let encrypted_asset = self.cipher.encrypt_value(asset.as_bytes())?; - self.assets.insert(encrypted_asset, &[])?; - Ok(()) + self.save_to_disk_with_encrypted_key(&self.assets, asset.as_bytes(), &decimals.to_be_bytes()) + } + + // Retrieve the stored decimals for this asset for better display + pub fn get_asset_decimals(&self, asset: &Hash) -> Result { + self.load_from_disk_with_encrypted_key(&self.assets, asset.as_bytes()) } pub fn get_balance_for(&self, asset: &Hash) -> Result { @@ -166,6 +206,9 @@ impl EncryptedStorage { Ok(()) } + // Save the transaction with its TX hash as key + // We hash the hash of the TX to use it as a key to not let anyone being able to see txs saved on disk + // with no access to the decrypted master key pub fn save_transaction(&mut self, hash: &Hash, transaction: &TransactionEntry) -> Result<()> { self.save_to_disk(&self.transactions, hash.as_bytes(), &transaction.to_bytes()) } diff --git a/xelis_wallet/src/wallet.rs b/xelis_wallet/src/wallet.rs index 466eb743..beb35018 100644 --- a/xelis_wallet/src/wallet.rs +++ b/xelis_wallet/src/wallet.rs @@ -5,11 +5,11 @@ use anyhow::{Error, Context}; use tokio::sync::{Mutex, RwLock}; use xelis_common::api::DataElement; use xelis_common::api::wallet::FeeBuilder; -use xelis_common::config::XELIS_ASSET; +use xelis_common::config::{XELIS_ASSET, COIN_DECIMALS}; use xelis_common::crypto::address::Address; use xelis_common::crypto::hash::Hash; use xelis_common::crypto::key::{KeyPair, PublicKey}; -use xelis_common::utils::format_coin; +use xelis_common::utils::{format_xelis, format_coin}; use xelis_common::network::Network; use xelis_common::serializer::{Serializer, Writer}; use xelis_common::transaction::{TransactionType, Transfer, Transaction, EXTRA_DATA_LIMIT_SIZE}; @@ -70,9 +70,9 @@ pub enum WalletError { InvalidSaltSize, #[error("Error while fetching password salt from DB")] NoSaltFound, - #[error("Your wallet contains only {} instead of {} for asset {}", format_coin(*_0), format_coin(*_1), _2)] - NotEnoughFunds(u64, u64, Hash), - #[error("Your wallet don't have enough funds to pay fees: expected {} but have only {}", format_coin(*_0), format_coin(*_1))] + #[error("Your wallet contains only {} instead of {} for asset {}", format_coin(*_0, *_2), format_coin(*_1, *_2), _3)] + NotEnoughFunds(u64, u64, u8, Hash), + #[error("Your wallet don't have enough funds to pay fees: expected {} but have only {}", format_xelis(*_0), format_xelis(*_1))] NotEnoughFundsForFee(u64, u64), #[error("Invalid address params")] InvalidAddressParams, @@ -94,7 +94,7 @@ pub enum WalletError { RPCServerNotRunning, #[error("RPC Server is already running")] RPCServerAlreadyRunning, - #[error("Invalid fees provided, minimum fees calculated: {}, provided: {}", format_coin(*_0), format_coin(*_1))] + #[error("Invalid fees provided, minimum fees calculated: {}, provided: {}", format_xelis(*_0), format_xelis(*_1))] InvalidFeeProvided(u64, u64), #[error("Wallet name cannot be empty")] EmptyName, @@ -102,7 +102,7 @@ pub enum WalletError { #[error("No handler available for this request")] NoHandlerAvailable, #[error(transparent)] - NetworkError(#[from] NetworkError) + NetworkError(#[from] NetworkError), } pub struct Wallet { @@ -348,7 +348,8 @@ impl Wallet { let balance = storage.get_balance_for(&asset).unwrap_or(0); // check if we have enough funds for this asset if amount > balance { - return Err(WalletError::NotEnoughFunds(balance, amount, asset).into()) + let decimals = storage.get_asset_decimals(&asset).unwrap_or(COIN_DECIMALS); + return Err(WalletError::NotEnoughFunds(balance, amount, decimals, asset).into()) } // include all extra data in the TX @@ -362,8 +363,9 @@ impl Wallet { // NOTE: We must be sure to have a different key each time if writer.total_write() > EXTRA_DATA_LIMIT_SIZE { - return Err(WalletError::InvalidAddressParams.into()) + return Err(WalletError::ExtraDataTooBig(EXTRA_DATA_LIMIT_SIZE, writer.total_write()).into()) } + Some(writer.bytes()) } else { None @@ -390,7 +392,8 @@ impl Wallet { let asset: &Hash = *asset; let balance = storage.get_balance_for(asset).unwrap_or(0); if balance < *amount { - return Err(WalletError::NotEnoughFunds(balance, *amount, asset.clone()).into()) + let decimals = storage.get_asset_decimals(asset).unwrap_or(COIN_DECIMALS); + return Err(WalletError::NotEnoughFunds(balance, *amount, decimals, asset.clone()).into()) } } From 45347a8a2a3f77eea84672378b2bf51740d4fe1a Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Oct 2023 22:49:23 +0200 Subject: [PATCH 074/160] wallet: better filter transactions, add query system for extra data --- xelis_common/src/api/mod.rs | 24 +++++++-- xelis_common/src/api/wallet.rs | 12 ++++- xelis_common/src/crypto/address.rs | 2 +- xelis_wallet/src/api/rpc.rs | 39 ++------------ xelis_wallet/src/entry.rs | 28 ++++------ xelis_wallet/src/network_handler.rs | 9 +++- xelis_wallet/src/storage.rs | 80 ++++++++++++++++++++++++++--- 7 files changed, 125 insertions(+), 69 deletions(-) diff --git a/xelis_common/src/api/mod.rs b/xelis_common/src/api/mod.rs index a8520ae1..2e20e9e2 100644 --- a/xelis_common/src/api/mod.rs +++ b/xelis_common/src/api/mod.rs @@ -7,7 +7,7 @@ pub mod wallet; pub mod daemon; // All types availables -#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Clone)] +#[derive(Serialize, Deserialize, Eq, PartialEq, Hash, Clone, Copy)] pub enum DataType { Bool, String, @@ -31,12 +31,20 @@ pub enum DataElement { } impl DataElement { - pub fn get_value(&self, name: String, data_type: DataType) -> Option<&DataValue> { + pub fn has_key(&self, key: &DataValue) -> bool { + let Self::Fields(fields) = &self else { + return false + }; + + fields.contains_key(key) + } + + pub fn get_value_by_key(&self, key: &DataValue, data_type: Option) -> Option<&DataValue> { let Self::Fields(data) = &self else { return None }; - let Self::Value(value) = data.get(&DataValue::String(name))? else { + let Self::Value(value) = data.get(key)? else { return None; }; @@ -44,12 +52,18 @@ impl DataElement { return None; }; - if unwrapped.kind() != data_type { - return None + if let Some(data_type) = data_type { + if unwrapped.kind() != data_type { + return None + } } value.as_ref() } + + pub fn get_value_by_string_key(&self, name: String, data_type: DataType) -> Option<&DataValue> { + self.get_value_by_key(&DataValue::String(name), Some(data_type)) + } } impl Serializer for DataElement { diff --git a/xelis_common/src/api/wallet.rs b/xelis_common/src/api/wallet.rs index 7bced265..1ed48888 100644 --- a/xelis_common/src/api/wallet.rs +++ b/xelis_common/src/api/wallet.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use crate::{transaction::{TransactionType, Transaction}, crypto::{key::PublicKey, hash::Hash, address::Address}}; -use super::{DataHash, DataElement}; +use super::{DataHash, DataElement, DataValue, DataType}; #[derive(Serialize, Deserialize)] @@ -39,6 +39,16 @@ pub struct ListTransactionsParams { pub accept_coinbase: bool, #[serde(default = "default_filter_value")] pub accept_burn: bool, + // Filter by extra data + pub query: Option +} + +// Structure to allow for searching a precise key/value pair in extra data +// Value is nullable to allow for searching only by key also +#[derive(Serialize, Deserialize)] +pub enum QuerySearcher { + KeyValue { key: DataValue, value: Option }, + KeyType { key: DataValue, kind: DataType } } #[derive(Serialize, Deserialize)] diff --git a/xelis_common/src/crypto/address.rs b/xelis_common/src/crypto/address.rs index e241de54..aa87340a 100644 --- a/xelis_common/src/crypto/address.rs +++ b/xelis_common/src/crypto/address.rs @@ -66,7 +66,7 @@ impl<'a> Address<'a> { pub fn get_data(&self, name: String, data_type: DataType) -> Option<&DataValue> { match &self.addr_type { AddressType::Normal => None, - AddressType::Data(data) => data.get_value(name, data_type) + AddressType::Data(data) => data.get_value_by_string_key(name, data_type) } } diff --git a/xelis_wallet/src/api/rpc.rs b/xelis_wallet/src/api/rpc.rs index ecf63bdc..1744428a 100644 --- a/xelis_wallet/src/api/rpc.rs +++ b/xelis_wallet/src/api/rpc.rs @@ -4,7 +4,7 @@ use anyhow::Context; use log::info; use xelis_common::{rpc_server::{RPCHandler, InternalRpcError, parse_params}, config::{VERSION, XELIS_ASSET}, async_handler, api::{wallet::{BuildTransactionParams, FeeBuilder, TransactionResponse, ListTransactionsParams, GetAddressParams, GetBalanceParams, GetTransactionParams, SplitAddressParams, SplitAddressResult}, DataHash}, crypto::{hash::Hashable, address::AddressType}}; use serde_json::{Value, json}; -use crate::{wallet::{Wallet, WalletError}, entry::{EntryData, TransactionEntry}}; +use crate::{wallet::{Wallet, WalletError}, entry::TransactionEntry}; pub fn register_methods(handler: &mut RPCHandler>) { info!("Registering RPC methods..."); @@ -143,39 +143,6 @@ async fn build_transaction(wallet: Arc, body: Value) -> Result, body: Value) -> Result { let params: ListTransactionsParams = parse_params(body)?; let wallet = wallet.get_storage().read().await; - let txs = wallet.get_transactions()?; - let response: Vec> = txs.iter().filter(|e| { - if let Some(topoheight) = ¶ms.min_topoheight { - if e.get_topoheight() < *topoheight { - return false - } - } - - if let Some(topoheight) = ¶ms.max_topoheight { - if e.get_topoheight() > *topoheight { - return false - } - } - - match e.get_entry() { - EntryData::Coinbase(_) if params.accept_coinbase => true, - EntryData::Burn { .. } if params.accept_burn => true, - EntryData::Incoming(sender, _) if params.accept_incoming => match ¶ms.address { - Some(key) => *key == *sender, - None => true - }, - EntryData::Outgoing(txs) if params.accept_outgoing => match ¶ms.address { - Some(filter_key) => txs.iter().find(|tx| { - *tx.get_key() == *filter_key - }).is_some(), - None => true, - }, - _ => false - } - }).map(|e| { - let hash = e.get_hash(); - DataHash { hash: Cow::Borrowed(hash), data: Cow::Borrowed(e) } - }).collect(); - - Ok(json!(response)) + let txs = wallet.get_filtered_transactions(params.address.as_ref(), params.min_topoheight, params.max_topoheight, params.accept_incoming, params.accept_outgoing, params.accept_coinbase, params.accept_burn, params.query.as_ref())?; + Ok(json!(txs)) } \ No newline at end of file diff --git a/xelis_wallet/src/entry.rs b/xelis_wallet/src/entry.rs index e620aaa0..fcb91067 100644 --- a/xelis_wallet/src/entry.rs +++ b/xelis_wallet/src/entry.rs @@ -1,7 +1,7 @@ use std::fmt::{self, Display, Formatter}; use serde::Serialize; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, transaction::EXTRA_DATA_LIMIT_SIZE, utils::format_xelis}; +use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, utils::format_xelis, api::DataElement}; #[derive(Serialize, Clone)] pub struct Transfer { @@ -9,11 +9,11 @@ pub struct Transfer { asset: Hash, amount: u64, // raw (plain text) extra data if build by this wallet - extra_data: Option> + extra_data: Option } impl Transfer { - pub fn new(key: PublicKey, asset: Hash, amount: u64, extra_data: Option>) -> Self { + pub fn new(key: PublicKey, asset: Hash, amount: u64, extra_data: Option) -> Self { Self { key, asset, @@ -34,7 +34,7 @@ impl Transfer { self.amount } - pub fn get_extra_data(&self) -> &Option> { + pub fn get_extra_data(&self) -> &Option { &self.extra_data } } @@ -45,16 +45,7 @@ impl Serializer for Transfer { let asset = reader.read_hash()?; let amount = reader.read_u64()?; - let extra_data = if reader.read_bool()? { - let extra_data_size = reader.read_u16()? as usize; - if extra_data_size > EXTRA_DATA_LIMIT_SIZE { - return Err(ReaderError::InvalidSize) - } - - Some(reader.read_bytes(extra_data_size)?) - } else { - None - }; + let extra_data = Option::read(reader)?; Ok(Self { key, @@ -70,10 +61,7 @@ impl Serializer for Transfer { writer.write_u64(&self.amount); writer.write_bool(self.extra_data.is_some()); - if let Some(extra_data) = &self.extra_data { - writer.write_u16(extra_data.len() as u16); - writer.write_bytes(extra_data); - } + self.extra_data.write(writer); } } @@ -195,6 +183,10 @@ impl TransactionEntry { pub fn get_entry(&self) -> &EntryData { &self.entry } + + pub fn get_mut_entry(&mut self) -> &mut EntryData { + &mut self.entry + } } impl Serializer for TransactionEntry { diff --git a/xelis_wallet/src/network_handler.rs b/xelis_wallet/src/network_handler.rs index 92311ad5..981bfe1b 100644 --- a/xelis_wallet/src/network_handler.rs +++ b/xelis_wallet/src/network_handler.rs @@ -3,7 +3,7 @@ use thiserror::Error; use anyhow::Error; use log::{debug, error, info, warn}; use tokio::{task::JoinHandle, sync::Mutex, time::interval}; -use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance, asset::AssetWithData}; +use xelis_common::{crypto::{hash::Hash, address::Address}, block::Block, transaction::TransactionType, account::VersionedBalance, asset::AssetWithData, serializer::Serializer}; use crate::{daemon_api::DaemonAPI, wallet::Wallet, entry::{EntryData, Transfer, TransactionEntry}}; @@ -185,7 +185,12 @@ impl NetworkHandler { let mut transfers: Vec = Vec::new(); for tx in txs { if is_owner || tx.to == *address.get_public_key() { - let transfer = Transfer::new(tx.to, tx.asset, tx.amount, tx.extra_data); + let extra_data = if let Some(bytes) = tx.extra_data { + Option::from_bytes(&bytes)? + } else { + None + }; + let transfer = Transfer::new(tx.to, tx.asset, tx.amount, extra_data); transfers.push(transfer); } } diff --git a/xelis_wallet/src/storage.rs b/xelis_wallet/src/storage.rs index 00651e1c..0a602e8c 100644 --- a/xelis_wallet/src/storage.rs +++ b/xelis_wallet/src/storage.rs @@ -2,11 +2,13 @@ use std::collections::HashSet; use sled::{Tree, Db}; use xelis_common::{ - crypto::{hash::Hash, key::KeyPair}, - serializer::{Reader, Serializer}, network::Network, + crypto::{hash::Hash, key::{KeyPair, PublicKey}}, + serializer::{Reader, Serializer}, + network::Network, + api::wallet::QuerySearcher, }; use anyhow::{Context, Result, anyhow}; -use crate::{config::SALT_SIZE, cipher::Cipher, wallet::WalletError, entry::TransactionEntry}; +use crate::{config::SALT_SIZE, cipher::Cipher, wallet::WalletError, entry::{TransactionEntry, EntryData}}; // keys used to retrieve from storage const NONCE_KEY: &[u8] = b"NONCE"; @@ -179,13 +181,79 @@ impl EncryptedStorage { // read whole disk and returns all transactions pub fn get_transactions(&self) -> Result> { + self.get_filtered_transactions(None, None, None, true, true, true, true, None) + } + + // Filter when the data is deserialized to not load all transactions in memory + pub fn get_filtered_transactions(&self, address: Option<&PublicKey>, min_topoheight: Option, max_topoheight: Option, accept_incoming: bool, accept_outgoing: bool, accept_coinbase: bool, accept_burn: bool, key_value: Option<&QuerySearcher>) -> Result> { let mut transactions = Vec::new(); for res in self.transactions.iter() { let (_, value) = res?; let raw_value = &self.cipher.decrypt_value(&value)?; - let mut reader = Reader::new(raw_value); - let transaction = TransactionEntry::read(&mut reader)?; - transactions.push(transaction); + let mut e = TransactionEntry::from_bytes(raw_value)?; + if let Some(topoheight) = min_topoheight { + if e.get_topoheight() < topoheight { + continue; + } + } + + if let Some(topoheight) = &max_topoheight { + if e.get_topoheight() > *topoheight { + continue; + } + } + + let (save, mut transfers) = match e.get_mut_entry() { + EntryData::Coinbase(_) if accept_coinbase => (true, None), + EntryData::Burn { .. } if accept_burn => (true, None), + EntryData::Incoming(sender, transfers) if accept_incoming => match address { + Some(key) => (*key == *sender, Some(transfers)), + None => (true, None) + }, + EntryData::Outgoing(txs) if accept_outgoing => match address { + Some(filter_key) => (txs.iter().find(|tx| { + *tx.get_key() == *filter_key + }).is_some(), Some(txs)), + None => (true, None), + }, + _ => (false, None) + }; + + if save { + // Check if it has requested extra data + if let Some(key_value) = key_value { + if let Some(transfers) = transfers.as_mut() { + transfers.retain(|transfer| { + if let Some(element) = transfer.get_extra_data() { + match key_value { + QuerySearcher::KeyValue { key, value: Some(v) } => { + element.get_value_by_key(key, Some(v.kind())) == Some(v) + }, + QuerySearcher::KeyValue { key, value: None } => { + element.has_key(key) + }, + QuerySearcher::KeyType { key, kind } => { + element.get_value_by_key(key, Some(*kind)) != None + } + } + } else { + false + } + }); + } else { + // Coinbase, burn, etc will be discarded always with such filter + continue; + } + } + + // Keep only transactions entries that have one transfer at least + match transfers { + Some(transfers) if !transfers.is_empty() => { + transactions.push(e); + }, + _ => {} + } + } } Ok(transactions) From 0cda184523897dbbb5e624d70b175fd38c8a9fbf Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Oct 2023 23:50:15 +0200 Subject: [PATCH 075/160] daemon: add rpc method 'get_account_history' --- xelis_common/src/api/daemon.rs | 38 ++++++++++-- xelis_daemon/src/rpc/rpc.rs | 110 ++++++++++++++++++++++++++++++++- 2 files changed, 139 insertions(+), 9 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 6cf8851e..cc826630 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -72,12 +72,6 @@ pub struct SubmitBlockParams { pub block_template: String, // hex: represent the BlockHeader (Block) } -#[derive(Serialize, Deserialize)] -pub struct GetMessagesParams<'a> { - pub address: Address<'a>, - pub from: Option> -} - #[derive(Serialize, Deserialize)] pub struct GetBalanceParams<'a> { pub address: Cow<'a, Address<'a>>, @@ -194,6 +188,38 @@ pub struct TransactionResponse<'a, T: Clone> { pub data: DataHash<'a, T> } +fn default_xelis_asset() -> Hash { + crate::config::XELIS_ASSET +} + +#[derive(Serialize, Deserialize)] +pub struct GetAccountHistoryParams<'a> { + pub address: Address<'a>, + #[serde(default = "default_xelis_asset")] + pub asset: Hash, + pub minimum_topoheight: Option, + pub maximum_topoheight: Option +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum AccountHistoryType { + Mining { reward: u64 }, + Burn { amount: u64 }, + // TODO delete those two fields with upcoming privacy layer + Outgoing { amount: u64 }, + Incoming { amount: u64 }, +} + +#[derive(Serialize, Deserialize)] +pub struct AccountHistoryEntry { + pub topoheight: u64, + pub hash: Hash, + #[serde(flatten)] + pub history_type: AccountHistoryType, + pub block_timestamp: u128 +} + #[derive(Serialize, Deserialize)] pub struct GetAssetParams { pub asset: Hash diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 40877a39..56da342e 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -29,13 +29,17 @@ use xelis_common::{ GetAssetsParams, GetAccountsParams, HasNonceResult, - HasNonceParams, GetAssetParams + HasNonceParams, + GetAssetParams, + GetAccountHistoryParams, + AccountHistoryEntry, + AccountHistoryType }, DataHash}, async_handler, serializer::Serializer, - transaction::Transaction, + transaction::{Transaction, TransactionType}, crypto::hash::Hash, - block::{BlockHeader, Block}, config::{BLOCK_TIME_MILLIS, VERSION}, immutable::Immutable, rpc_server::{RPCHandler, parse_params}, + block::{BlockHeader, Block}, config::{BLOCK_TIME_MILLIS, VERSION, XELIS_ASSET}, immutable::Immutable, rpc_server::{RPCHandler, parse_params}, }; use std::{sync::Arc, borrow::Cow}; use log::{info, debug}; @@ -154,6 +158,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_blocks_range_by_topoheight", async_handler!(get_blocks_range_by_topoheight)); handler.register_method("get_blocks_range_by_height", async_handler!(get_blocks_range_by_height)); handler.register_method("get_transactions", async_handler!(get_transactions)); + handler.register_method("get_account_history", async_handler!(get_account_history)); handler.register_method("get_accounts", async_handler!(get_accounts)); } @@ -587,6 +592,105 @@ async fn get_transactions(blockchain: Arc>, body: Valu Ok(json!(transactions)) } +const MAX_HISTORY: usize = 20; +// retrieve all history changes for an account on an asset +async fn get_account_history(blockchain: Arc>, body: Value) -> Result { + let params: GetAccountHistoryParams = parse_params(body)?; + if params.address.is_mainnet() != blockchain.get_network().is_mainnet() { + return Err(InternalRpcError::AnyError(BlockchainError::InvalidNetwork.into())) + } + + let key = params.address.get_public_key(); + let minimum_topoheight = params.minimum_topoheight.unwrap_or(0); + let storage = blockchain.get_storage().read().await; + let (topo, versioned_balance) = if let Some(topo) = params.maximum_topoheight { + (topo, storage.get_balance_at_exact_topoheight(key, ¶ms.asset, topo).await.context(format!("Error while retrieving balance at topo height {topo}"))?) + } else { + storage.get_last_balance(key, ¶ms.asset).await.context("Error while retrieving last balance")? + }; + + let mut history_count = 0; + let mut history = Vec::new(); + let mut version = Some((topo, versioned_balance)); + loop { + if let Some((topo, versioned_balance)) = version { + if topo < minimum_topoheight { + break; + } + + { + let (hash, block_header) = storage.get_block_header_at_topoheight(topo).await.context(format!("Error while retrieving block header at topo height {topo}"))?; + if params.asset == XELIS_ASSET && *block_header.get_miner() == *key { + let reward = storage.get_block_reward(&hash).context(format!("Error while retrieving reward at topo height {topo}"))?; + let history_type = AccountHistoryType::Mining { reward }; + history.push(AccountHistoryEntry { + topoheight: topo, + hash: hash.clone(), + history_type, + block_timestamp: block_header.get_timestamp() + }); + } + + for tx in block_header.get_transactions() { + let tx = storage.get_transaction(tx).await.context(format!("Error while retrieving transaction {tx} at topo height {topo}"))?; + let is_sender = *tx.get_owner() == *key; + match tx.get_data() { + TransactionType::Transfer(transfers) => { + for transfer in transfers { + if transfer.asset == params.asset { + if transfer.to == *key { + history.push(AccountHistoryEntry { + topoheight: topo, + hash: hash.clone(), + history_type: AccountHistoryType::Incoming { amount: transfer.amount }, + block_timestamp: block_header.get_timestamp() + }); + } + + if is_sender { + history.push(AccountHistoryEntry { + topoheight: topo, + hash: hash.clone(), + history_type: AccountHistoryType::Outgoing { amount: transfer.amount }, + block_timestamp: block_header.get_timestamp() + }); + } + } + } + } + TransactionType::Burn { asset, amount } => { + if *asset == params.asset { + if is_sender { + history.push(AccountHistoryEntry { + topoheight: topo, + hash: hash.clone(), + history_type: AccountHistoryType::Burn { amount: *amount }, + block_timestamp: block_header.get_timestamp() + }); + } + } + }, + _ => {} + } + } + } + + history_count += 1; + if history_count >= MAX_HISTORY { + break; + } + + if let Some(previous) = versioned_balance.get_previous_topoheight() { + version = Some((previous, storage.get_balance_at_exact_topoheight(key, ¶ms.asset, previous).await.context(format!("Error while retrieving previous balance at topo height {previous}"))?)); + } + } else { + break; + } + } + + Ok(json!(history)) +} + const MAX_ACCOUNTS: usize = 100; // retrieve all available accounts (each account got at least one interaction on chain) async fn get_accounts(blockchain: Arc>, body: Value) -> Result { From 56316c35e9e0057bb110a48334bd39cb9a0f5e76 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 16 Oct 2023 23:52:15 +0200 Subject: [PATCH 076/160] daemon: fix hash, give tx hash for transfers --- xelis_daemon/src/rpc/rpc.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 56da342e..5eb3e2da 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -631,8 +631,8 @@ async fn get_account_history(blockchain: Arc>, body: V }); } - for tx in block_header.get_transactions() { - let tx = storage.get_transaction(tx).await.context(format!("Error while retrieving transaction {tx} at topo height {topo}"))?; + for hash in block_header.get_transactions() { + let tx = storage.get_transaction(hash).await.context(format!("Error while retrieving transaction {hash} at topo height {topo}"))?; let is_sender = *tx.get_owner() == *key; match tx.get_data() { TransactionType::Transfer(transfers) => { From 3a59ba8d98acf655bb0a828f632152f41d3c5c56 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 00:00:00 +0200 Subject: [PATCH 077/160] update API.md --- API.md | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/API.md b/API.md index 0ba5b39b..7d080af0 100644 --- a/API.md +++ b/API.md @@ -991,6 +991,57 @@ Fetch transactions by theirs hashes from database and mempool of daemon and keep } ``` +#### Get Account History +Fetch up to 20 history events for an account on a specific asset + +##### Method `get_account_history` + +##### Parameters +| Name | Type | Required | Note | +|:------------------:|:-------:|:--------:|:---------------------------------:| +| address | Address | Required | Valid address registered on chain | +| asset | Hash | Optional | Asset to track | +| minimum_topoheight | Integer | Optional | minimum topoheight for history | +| maximum_topoheight | Integer | Optional | Maximum topoheight for history | + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "get_account_history", + "params": { + "address": "xet1qqqyvh9vgkcurtj2la0e4jspnfsq7vkaqm863zcfdnej92xg4mpzz3suf96k4" + } +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + { + "block_timestamp": 1697492997128, + "hash": "0000006f160df7d7aaa5d519f341136ae95fce1324280546070fecd8efe93751", + "mining": { + "reward": 117059 + }, + "topoheight": 485818 + }, + { + "block_timestamp": 1697492967931, + "hash": "0000001f62cc170349de2475a7f2338513f5340481c73af9e94c35aa2805d9cf", + "mining": { + "reward": 117059 + }, + "topoheight": 485817 + } + ] +} +``` + #### Submit Block Submit a block to the daemon From 36f39979a4639f5cc2d7498ac88b8f6b2c6c1d29 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 00:09:03 +0200 Subject: [PATCH 078/160] daemon: add 'get_account_assets' rpc method --- xelis_common/src/api/daemon.rs | 5 +++++ xelis_daemon/src/rpc/rpc.rs | 16 +++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index cc826630..152fcd38 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -220,6 +220,11 @@ pub struct AccountHistoryEntry { pub block_timestamp: u128 } +#[derive(Serialize, Deserialize)] +pub struct GetAccountAssetsParams<'a> { + pub address: Address<'a>, +} + #[derive(Serialize, Deserialize)] pub struct GetAssetParams { pub asset: Hash diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 5eb3e2da..c2eac571 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -33,7 +33,8 @@ use xelis_common::{ GetAssetParams, GetAccountHistoryParams, AccountHistoryEntry, - AccountHistoryType + AccountHistoryType, + GetAccountAssetsParams }, DataHash}, async_handler, serializer::Serializer, @@ -159,6 +160,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("get_blocks_range_by_height", async_handler!(get_blocks_range_by_height)); handler.register_method("get_transactions", async_handler!(get_transactions)); handler.register_method("get_account_history", async_handler!(get_account_history)); + handler.register_method("get_account_assets", async_handler!(get_account_assets)); handler.register_method("get_accounts", async_handler!(get_accounts)); } @@ -691,6 +693,18 @@ async fn get_account_history(blockchain: Arc>, body: V Ok(json!(history)) } +async fn get_account_assets(blockchain: Arc>, body: Value) -> Result { + let params: GetAccountAssetsParams = parse_params(body)?; + if params.address.is_mainnet() != blockchain.get_network().is_mainnet() { + return Err(InternalRpcError::AnyError(BlockchainError::InvalidNetwork.into())) + } + + let key = params.address.get_public_key(); + let storage = blockchain.get_storage().read().await; + let assets = storage.get_assets_for(key).await.context("Error while retrieving assets for account")?; + Ok(json!(assets)) +} + const MAX_ACCOUNTS: usize = 100; // retrieve all available accounts (each account got at least one interaction on chain) async fn get_accounts(blockchain: Arc>, body: Value) -> Result { From e0354d40203699bc750ac3d8f2e749be28827b38 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 00:12:41 +0200 Subject: [PATCH 079/160] update API.md --- API.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/API.md b/API.md index 7d080af0..d2fd187b 100644 --- a/API.md +++ b/API.md @@ -1042,6 +1042,32 @@ Fetch up to 20 history events for an account on a specific asset } ``` +#### Get Account Assets +Retrieve all assets for an account + +##### Method `get_account_assets` + +##### Parameters +| Name | Type | Required | Note | +|:-------:|:-------:|:--------:|:---------------------------------:| +| address | Address | Required | Valid address registered on chain | + +##### Request +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + "0000000000000000000000000000000000000000000000000000000000000000" + ] +} +``` + +##### Response +```json + +``` + #### Submit Block Submit a block to the daemon From ab1ad6faf7eb0c2172ba8793926abb1a7b4447b3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 12:26:43 +0200 Subject: [PATCH 080/160] all: reworks config/imports --- Cargo.lock | 2 +- xelis_common/Cargo.toml | 1 - xelis_common/src/config.rs | 57 ++----------- xelis_common/src/crypto/elgamal/lookup.rs | 9 +- xelis_common/src/difficulty.rs | 24 ------ xelis_daemon/Cargo.toml | 1 + xelis_daemon/src/config.rs | 79 +++++++++++++++++ xelis_daemon/src/core/blockchain.rs | 21 ++++- xelis_daemon/src/core/difficulty.rs | 25 ++++++ xelis_daemon/src/core/mod.rs | 3 +- xelis_daemon/src/main.rs | 12 ++- xelis_daemon/src/p2p/chain_validator.rs | 11 ++- xelis_daemon/src/p2p/mod.rs | 84 ++++++++++++++----- .../src/p2p/packet/bootstrap_chain.rs | 9 +- xelis_daemon/src/p2p/packet/chain.rs | 3 +- xelis_daemon/src/p2p/packet/ping.rs | 6 +- xelis_daemon/src/p2p/peer.rs | 5 +- xelis_daemon/src/p2p/peer_list.rs | 8 +- xelis_daemon/src/p2p/tracker.rs | 3 +- xelis_daemon/src/rpc/getwork_server.rs | 15 +++- xelis_daemon/src/rpc/rpc.rs | 6 +- xelis_miner/src/main.rs | 11 +-- xelis_wallet/src/config.rs | 3 + xelis_wallet/src/main.rs | 6 +- 24 files changed, 264 insertions(+), 140 deletions(-) create mode 100644 xelis_daemon/src/config.rs create mode 100644 xelis_daemon/src/core/difficulty.rs diff --git a/Cargo.lock b/Cargo.lock index 1146c6cd..a659dfa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2656,7 +2656,6 @@ dependencies = [ "futures-util", "hex", "indexmap 2.0.0", - "lazy_static", "log", "num-bigint", "num-traits", @@ -2688,6 +2687,7 @@ dependencies = [ "human_bytes", "humantime", "indexmap 2.0.0", + "lazy_static", "log", "lru", "rand 0.8.5", diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index f32bb944..039440aa 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -23,7 +23,6 @@ fern = { version = "0.6", features = ["colored"] } chrono = "0.4.19" tokio = { version = "1.25", features = ["macros", "signal", "time", "sync"], optional = true } reqwest = { version = "0.11.10", default-features = false, features = ["json", "rustls"], optional = true } -lazy_static = "1.4.0" clap = { version = "3.1.18", features = ["derive"], optional = true } crossterm = "*" indexmap = { version = "2.0.0", features = ["serde"] } diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 4d4cc00f..863b1ce8 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -1,57 +1,12 @@ -use lazy_static::lazy_static; - -use crate::{crypto::{hash::{Hash, Hashable}, key::PublicKey, address::Address}, serializer::Serializer, block::{BlockHeader, Difficulty}}; -pub const NETWORK_ID_SIZE: usize = 16; +use crate::crypto::hash::Hash; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); -pub const NETWORK_ID: [u8; NETWORK_ID_SIZE] = [0x73, 0x6c, 0x69, 0x78, 0x65, 0x5f, 0x78, 0x65, 0x6c, 0x69, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x63]; -pub const SEED_NODES: [&str; 2] = ["74.208.251.149:2125", "217.160.96.80:2125"]; -pub const DEFAULT_P2P_BIND_ADDRESS: &str = "0.0.0.0:2125"; -pub const DEFAULT_RPC_BIND_ADDRESS: &str = "0.0.0.0:8080"; -pub const DEFAULT_CACHE_SIZE: usize = 1024; pub const XELIS_ASSET: Hash = Hash::zero(); -pub const SIDE_BLOCK_REWARD_PERCENT: u64 = 30; // only 30% of reward for side block -pub const BLOCK_TIME: u64 = 15; // Block Time in seconds -pub const BLOCK_TIME_MILLIS: u64 = BLOCK_TIME * 1000; // Block Time in milliseconds -pub const MINIMUM_DIFFICULTY: Difficulty = BLOCK_TIME_MILLIS as Difficulty * 10; -pub const GENESIS_BLOCK_DIFFICULTY: Difficulty = 1; -pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); // 1.25 MB -pub const FEE_PER_KB: u64 = 1000; // 0.01000 XLS per KB -pub const DEV_FEE_PERCENT: u64 = 5; // 5% per block going to dev address -pub const TIPS_LIMIT: usize = 3; // maximum 3 previous blocks -pub const STABLE_LIMIT: u64 = 8; // in how many height we consider the block stable -pub const PRUNE_SAFETY_LIMIT: u64 = STABLE_LIMIT * 10; // keep at least last N blocks until top topoheight -pub const TIMESTAMP_IN_FUTURE_LIMIT: u128 = 2 * 1000; // 2 seconds maximum in future - -pub const PREFIX_ADDRESS: &str = "xel"; // mainnet prefix address -pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; // testnet prefix address +// 0.01000 XLS per KB +pub const FEE_PER_KB: u64 = 1000; pub const COIN_DECIMALS: u8 = 5; // 5 decimals numbers pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // 100 000 -pub const MAX_SUPPLY: u64 = 18_400_000 * COIN_VALUE; // 18.4M full coin -pub const EMISSION_SPEED_FACTOR: u64 = 20; - -pub const GENESIS_BLOCK: &str = "0000000000000000000000000000000000000001872f3e0c02000000000000000000000000000000000000000000000000000000000000000000000000000000000000006c24cdc1c8ee8f028b8cafe7b79a66a0902f26d89dd54eeff80abcf251a9a3bd"; // Genesis block in hexadecimal format -pub const DEV_ADDRESS: &str = "xel1qyqxcfxdc8ywarcz3wx2leahnfn2pyp0ymvfm42waluq408j2x5680g05xfx5"; // Dev address -pub const CHAIN_SYNC_TIMEOUT_SECS: u64 = 3; // wait maximum between each chain sync request to peers -pub const CHAIN_SYNC_DELAY: u64 = 5; // minimum X seconds between each chain sync request per peer -pub const CHAIN_SYNC_REQUEST_MAX_BLOCKS: usize = 64; // allows up to X blocks id (hash + height) sent for request -pub const CHAIN_SYNC_RESPONSE_MAX_BLOCKS: usize = 512; // allows up to X blocks hashes sent for response -pub const CHAIN_SYNC_TOP_BLOCKS: usize = 10; // send last 10 heights -pub const P2P_PING_DELAY: u64 = 10; // time between each ping -pub const P2P_PING_PEER_LIST_DELAY: u64 = 60 * 5; // time in seconds between each update of peerlist -pub const P2P_PING_PEER_LIST_LIMIT: usize = 16; // maximum number of addresses to be send -pub const P2P_DEFAULT_MAX_PEERS: usize = 32; // default number of maximum peers -pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter -pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer -pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout -pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 10000; // millis until we timeout -pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // time in seconds between each time we try to connect to a new peer - -// Wallet config -pub const DEFAULT_DAEMON_ADDRESS: &str = "http://127.0.0.1:8080"; - -lazy_static! { - pub static ref DEV_PUBLIC_KEY: PublicKey = Address::from_string(&DEV_ADDRESS.to_owned()).unwrap().to_public_key(); - pub static ref GENESIS_BLOCK_HASH: Hash = BlockHeader::from_hex(GENESIS_BLOCK.to_owned()).unwrap().hash(); -} \ No newline at end of file +// Addresses format +pub const PREFIX_ADDRESS: &str = "xel"; // mainnet prefix address +pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; // testnet prefix address \ No newline at end of file diff --git a/xelis_common/src/crypto/elgamal/lookup.rs b/xelis_common/src/crypto/elgamal/lookup.rs index 3d9a941a..5996fd56 100644 --- a/xelis_common/src/crypto/elgamal/lookup.rs +++ b/xelis_common/src/crypto/elgamal/lookup.rs @@ -105,15 +105,10 @@ impl Default for LookupTable { } mod tests { - use lazy_static::lazy_static; - - lazy_static!( - static ref TABLE: super::LookupTable = super::LookupTable::default(); - ); - fn _assert_value(value: u64) { let m = &super::Scalar::from(value) * &super::RISTRETTO_BASEPOINT_TABLE; - assert_eq!(TABLE.lookup(&m), value); + let table = super::LookupTable::default(); + assert_eq!(table.lookup(&m), value); } #[test] diff --git a/xelis_common/src/difficulty.rs b/xelis_common/src/difficulty.rs index 4e8bc27f..a693eb4b 100644 --- a/xelis_common/src/difficulty.rs +++ b/xelis_common/src/difficulty.rs @@ -1,15 +1,8 @@ -use std::f64::consts::E; - use crate::block::Difficulty; -use crate::config::{MINIMUM_DIFFICULTY, BLOCK_TIME_MILLIS, STABLE_LIMIT}; use crate::crypto::hash::Hash; use num_bigint::{BigUint, ToBigUint}; use thiserror::Error; use num_traits::One; -use log::trace; - -const M: f64 = STABLE_LIMIT as f64; -const BLOCK_TIME: f64 = BLOCK_TIME_MILLIS as f64; #[derive(Error, Debug)] pub enum DifficultyError { @@ -41,21 +34,4 @@ pub fn difficulty_to_big(difficulty: Difficulty) -> Result BigUint { BigUint::from_bytes_be(hash.as_bytes()) -} - -pub fn calculate_difficulty(parent_timestamp: u128, new_timestamp: u128, previous_difficulty: Difficulty) -> Difficulty { - let mut solve_time = (new_timestamp - parent_timestamp) as f64; - if solve_time > BLOCK_TIME * 2f64 { - solve_time = BLOCK_TIME * 2f64; - } - - let easypart = (E.powf((1f64 - solve_time as f64 / BLOCK_TIME) / M) * 10000f64) as i64; - let diff = ((previous_difficulty as i64 * easypart) / 10000) as Difficulty; - trace!("Difficulty calculated, easypart: {}, previous diff: {}, diff: {}", easypart, previous_difficulty, diff); - - if diff < MINIMUM_DIFFICULTY { - return MINIMUM_DIFFICULTY - } - - diff } \ No newline at end of file diff --git a/xelis_daemon/Cargo.toml b/xelis_daemon/Cargo.toml index 04b1b373..f7bf9a65 100644 --- a/xelis_daemon/Cargo.toml +++ b/xelis_daemon/Cargo.toml @@ -19,6 +19,7 @@ async-recursion = "1" async-trait = "0.1.64" humantime = "2.1.0" human_bytes = "0.4.2" +lazy_static = "1.4.0" # Common dependencies tokio = { version = "1", features = ["rt-multi-thread", "io-util", "io-std", "time", "macros", "sync", "net"] } diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs new file mode 100644 index 00000000..641802b2 --- /dev/null +++ b/xelis_daemon/src/config.rs @@ -0,0 +1,79 @@ +use lazy_static::lazy_static; +use xelis_common::{ + block::{Difficulty, BlockHeader}, + config::COIN_VALUE, + crypto::{ + key::PublicKey, address::Address, hash::{Hash, Hashable} + }, + serializer::Serializer +}; + +// In case of potential forks, have a unique network id to not connect to others compatible chains +pub const NETWORK_ID_SIZE: usize = 16; +pub const NETWORK_ID: [u8; NETWORK_ID_SIZE] = [0x73, 0x6c, 0x69, 0x78, 0x65, 0x5f, 0x78, 0x65, 0x6c, 0x69, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x63]; +pub const SEED_NODES: [&str; 2] = ["74.208.251.149:2125", "217.160.96.80:2125"]; + +// bind addresses +pub const DEFAULT_P2P_BIND_ADDRESS: &str = "0.0.0.0:2125"; +pub const DEFAULT_RPC_BIND_ADDRESS: &str = "0.0.0.0:8080"; + +// Default cache size for storage DB +pub const DEFAULT_CACHE_SIZE: usize = 1024; + +// Block rules +// Block Time in seconds +pub const BLOCK_TIME: u64 = 15; +// Block Time in milliseconds +pub const BLOCK_TIME_MILLIS: u64 = BLOCK_TIME * 1000; +// Minimum difficulty (each difficulty point is in H/s) +// Current: 15 000 * 10 = 150 KH/s minimum +pub const MINIMUM_DIFFICULTY: Difficulty = BLOCK_TIME_MILLIS as Difficulty * 10; +pub const GENESIS_BLOCK_DIFFICULTY: Difficulty = 1; +// 1024 * 1024 + (256 * 1024) bytes = 1.25 MB maximum size per block with txs +pub const MAX_BLOCK_SIZE: usize = (1024 * 1024) + (256 * 1024); +// 2 seconds maximum in future (prevent any attack on reducing difficulty but keep margin for unsynced devices) +pub const TIMESTAMP_IN_FUTURE_LIMIT: u128 = 2 * 1000; + +// keep at least last N blocks until top topoheight when pruning the chain +pub const PRUNE_SAFETY_LIMIT: u64 = STABLE_LIMIT * 10; + +// BlockDAG rules +pub const TIPS_LIMIT: usize = 3; // maximum 3 previous blocks +pub const STABLE_LIMIT: u64 = 8; // in how many height we consider the block stable + +// Emission rules +pub const DEV_FEE_PERCENT: u64 = 5; // 5% per block going to dev address +pub const SIDE_BLOCK_REWARD_PERCENT: u64 = 30; // only 30% of reward for side block +pub const EMISSION_SPEED_FACTOR: u64 = 20; +pub const MAX_SUPPLY: u64 = 18_400_000 * COIN_VALUE; // 18.4M full coin + +// Genesis block to have the same starting point for every nodes +pub const GENESIS_BLOCK: &str = "0000000000000000000000000000000000000001872f3e0c02000000000000000000000000000000000000000000000000000000000000000000000000000000000000006c24cdc1c8ee8f028b8cafe7b79a66a0902f26d89dd54eeff80abcf251a9a3bd"; // Genesis block in hexadecimal format +// Developer address for paying dev fees until Smart Contracts integration +// (testnet/mainnet format is converted lazily later) +pub const DEV_ADDRESS: &str = "xel1qyqxcfxdc8ywarcz3wx2leahnfn2pyp0ymvfm42waluq408j2x5680g05xfx5"; + +// Chain sync config +pub const CHAIN_SYNC_TIMEOUT_SECS: u64 = 3; // wait maximum between each chain sync request to peers +pub const CHAIN_SYNC_DELAY: u64 = 5; // minimum X seconds between each chain sync request per peer +pub const CHAIN_SYNC_REQUEST_MAX_BLOCKS: usize = 64; // allows up to X blocks id (hash + height) sent for request +pub const CHAIN_SYNC_RESPONSE_MAX_BLOCKS: usize = 512; // allows up to X blocks hashes sent for response +pub const CHAIN_SYNC_TOP_BLOCKS: usize = 10; // send last 10 heights + +// P2p rules +pub const P2P_PING_DELAY: u64 = 10; // time between each ping +pub const P2P_PING_PEER_LIST_DELAY: u64 = 60 * 5; // time in seconds between each update of peerlist +pub const P2P_PING_PEER_LIST_LIMIT: usize = 16; // maximum number of addresses to be send +pub const P2P_DEFAULT_MAX_PEERS: usize = 32; // default number of maximum peers +pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // time in seconds between each time we try to connect to a new peer + +// Peer rules +pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter +pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer +pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout +pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 10000; // millis until we timeout + +lazy_static! { + pub static ref DEV_PUBLIC_KEY: PublicKey = Address::from_string(&DEV_ADDRESS.to_owned()).unwrap().to_public_key(); + pub static ref GENESIS_BLOCK_HASH: Hash = BlockHeader::from_hex(GENESIS_BLOCK.to_owned()).unwrap().hash(); +} \ No newline at end of file diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 5abcdecc..99586944 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -2,16 +2,31 @@ use anyhow::Error; use lru::LruCache; use serde_json::{Value, json}; use xelis_common::{ - config::{DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, XELIS_ASSET, SIDE_BLOCK_REWARD_PERCENT, DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT, BLOCK_TIME_MILLIS, COIN_DECIMALS}, + config::{XELIS_ASSET, COIN_DECIMALS}, crypto::{key::PublicKey, hash::{Hashable, Hash, HASH_SIZE}}, - difficulty::{check_difficulty, calculate_difficulty}, + difficulty::check_difficulty, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, utils::{get_current_timestamp, format_xelis}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network, asset::AssetData }; -use crate::{p2p::P2pServer, rpc::{rpc::{get_block_response_for_hash, get_block_type_for_block}, DaemonRpcServer, SharedDaemonRpcServer}}; +use crate::{ + config::{ + DEFAULT_P2P_BIND_ADDRESS, P2P_DEFAULT_MAX_PEERS, DEFAULT_RPC_BIND_ADDRESS, DEFAULT_CACHE_SIZE, MAX_BLOCK_SIZE, + EMISSION_SPEED_FACTOR, MAX_SUPPLY, DEV_FEE_PERCENT, GENESIS_BLOCK, TIPS_LIMIT, TIMESTAMP_IN_FUTURE_LIMIT, + STABLE_LIMIT, GENESIS_BLOCK_HASH, MINIMUM_DIFFICULTY, GENESIS_BLOCK_DIFFICULTY, SIDE_BLOCK_REWARD_PERCENT, + DEV_PUBLIC_KEY, BLOCK_TIME, PRUNE_SAFETY_LIMIT, BLOCK_TIME_MILLIS, + }, + core::difficulty::calculate_difficulty, + p2p::P2pServer, + rpc::{ + rpc::{ + get_block_response_for_hash, get_block_type_for_block + }, + DaemonRpcServer, SharedDaemonRpcServer + } +}; use super::storage::{Storage, DifficultyProvider}; use std::{sync::atomic::{Ordering, AtomicU64}, collections::hash_map::Entry, time::{Duration, Instant}, borrow::Cow}; use std::collections::{HashMap, HashSet}; diff --git a/xelis_daemon/src/core/difficulty.rs b/xelis_daemon/src/core/difficulty.rs new file mode 100644 index 00000000..5578b9a3 --- /dev/null +++ b/xelis_daemon/src/core/difficulty.rs @@ -0,0 +1,25 @@ +use std::f64::consts::E; + +use log::trace; +use xelis_common::block::Difficulty; +use crate::config::{STABLE_LIMIT, BLOCK_TIME_MILLIS, MINIMUM_DIFFICULTY}; + +const M: f64 = STABLE_LIMIT as f64; +const BLOCK_TIME: f64 = BLOCK_TIME_MILLIS as f64; + +pub fn calculate_difficulty(parent_timestamp: u128, new_timestamp: u128, previous_difficulty: Difficulty) -> Difficulty { + let mut solve_time = (new_timestamp - parent_timestamp) as f64; + if solve_time > BLOCK_TIME * 2f64 { + solve_time = BLOCK_TIME * 2f64; + } + + let easypart = (E.powf((1f64 - solve_time as f64 / BLOCK_TIME) / M) * 10000f64) as i64; + let diff = ((previous_difficulty as i64 * easypart) / 10000) as Difficulty; + trace!("Difficulty calculated, easypart: {}, previous diff: {}, diff: {}", easypart, previous_difficulty, diff); + + if diff < MINIMUM_DIFFICULTY { + return MINIMUM_DIFFICULTY + } + + diff +} \ No newline at end of file diff --git a/xelis_daemon/src/core/mod.rs b/xelis_daemon/src/core/mod.rs index 37888277..e523e3e7 100644 --- a/xelis_daemon/src/core/mod.rs +++ b/xelis_daemon/src/core/mod.rs @@ -2,4 +2,5 @@ pub mod blockchain; pub mod mempool; pub mod error; pub mod blockdag; -pub mod storage; \ No newline at end of file +pub mod storage; +pub mod difficulty; \ No newline at end of file diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index 8cad3cf5..ddd67f37 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -1,6 +1,7 @@ pub mod rpc; pub mod p2p; pub mod core; +pub mod config; use fern::colors::Color; use humantime::format_duration; @@ -9,11 +10,14 @@ use p2p::P2pServer; use rpc::{getwork_server::SharedGetWorkServer, rpc::get_block_response_for_hash}; use xelis_common::{ prompt::{Prompt, command::{CommandManager, CommandError, Command, CommandHandler}, PromptError, argument::{ArgumentManager, Arg, ArgType}, LogLevel, self, ShareablePrompt}, - config::{VERSION, BLOCK_TIME, XELIS_ASSET}, utils::{format_hashrate, set_network_to, format_xelis, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer + config::{VERSION, XELIS_ASSET}, utils::{format_hashrate, set_network_to, format_xelis, format_coin}, async_handler, crypto::{address::Address, hash::Hashable}, network::Network, transaction::Transaction, serializer::Serializer }; -use crate::core::{ - blockchain::{Config, Blockchain, get_block_reward}, - storage::{Storage, SledStorage} +use crate::{ + core::{ + blockchain::{Config, Blockchain, get_block_reward}, + storage::{Storage, SledStorage} + }, + config::BLOCK_TIME }; use std::{sync::Arc, net::SocketAddr}; use std::time::Duration; diff --git a/xelis_daemon/src/p2p/chain_validator.rs b/xelis_daemon/src/p2p/chain_validator.rs index b46313b7..6f91ea8d 100644 --- a/xelis_daemon/src/p2p/chain_validator.rs +++ b/xelis_daemon/src/p2p/chain_validator.rs @@ -1,7 +1,14 @@ use std::{collections::{HashMap, HashSet}, sync::Arc}; use async_trait::async_trait; -use xelis_common::{crypto::hash::Hash, block::{BlockHeader, Difficulty}, config::TIPS_LIMIT}; -use crate::core::{error::BlockchainError, blockchain::Blockchain, storage::{DifficultyProvider, Storage}}; +use xelis_common::{crypto::hash::Hash, block::{BlockHeader, Difficulty}}; +use crate::{ + core::{ + error::BlockchainError, + blockchain::Blockchain, + storage::{DifficultyProvider, Storage} + }, + config::TIPS_LIMIT +}; use log::{error, trace}; struct Data { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 406c4f7a..c66eacea 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -9,34 +9,74 @@ mod queue; use indexmap::IndexSet; use xelis_common::{ - config::{VERSION, NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT, CHAIN_SYNC_TIMEOUT_SECS, P2P_EXTEND_PEERLIST_DELAY}, + config::VERSION, serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, utils::get_current_time, immutable::Immutable, account::VersionedNonce }; -use crate::{core::{blockchain::Blockchain, storage::Storage}, p2p::{chain_validator::ChainValidator, packet::{bootstrap_chain::{StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata}, inventory::{NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse}}, tracker::ResponseBlocker}}; -use crate::core::error::BlockchainError; -use crate::p2p::connection::ConnectionMessage; -use crate::p2p::packet::chain::CommonPoint; -use self::{packet::chain::{BlockId, ChainRequest, ChainResponse}, tracker::{ObjectTracker, SharedObjectTracker}, queue::QueuedFetcher}; -use self::packet::object::{ObjectRequest, ObjectResponse, OwnedObjectResponse}; -use self::peer_list::{SharedPeerList, PeerList}; -use self::connection::{State, Connection}; -use self::packet::handshake::Handshake; -use self::packet::ping::Ping; -use self::error::P2pError; -use self::packet::{Packet, PacketWrapper}; -use self::peer::Peer; -use tokio::{net::{TcpListener, TcpStream}, sync::mpsc::{self, UnboundedSender, UnboundedReceiver}, select, task::JoinHandle}; +use crate::{ + core::{ + blockchain::Blockchain, + storage::Storage, + error::BlockchainError + }, + p2p::{ + chain_validator::ChainValidator, + packet::{ + bootstrap_chain::{ + StepRequest, StepResponse, BootstrapChainResponse, MAX_ITEMS_PER_PAGE, BlockMetadata + }, + inventory::{ + NOTIFY_MAX_LEN, NotifyInventoryRequest, NotifyInventoryResponse + }, + chain::CommonPoint + }, + tracker::ResponseBlocker, + connection::ConnectionMessage, + }, + config::{ + NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, + P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, + CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT, + CHAIN_SYNC_TIMEOUT_SECS, P2P_EXTEND_PEERLIST_DELAY + } +}; +use self::{ + packet::{ + chain::{BlockId, ChainRequest, ChainResponse}, + object::{ObjectRequest, ObjectResponse, OwnedObjectResponse}, + handshake::Handshake, + ping::Ping, + {Packet, PacketWrapper} + }, + peer::Peer, + tracker::{ObjectTracker, SharedObjectTracker}, + queue::QueuedFetcher, + peer_list::{SharedPeerList, PeerList}, + connection::{State, Connection}, + error::P2pError +}; +use tokio::{ + net::{TcpListener, TcpStream}, + sync::mpsc::{self, UnboundedSender, UnboundedReceiver}, + select, + task::JoinHandle, + io::AsyncWriteExt, + time::{interval, timeout, sleep} +}; use log::{info, warn, error, debug, trace}; -use tokio::io::AsyncWriteExt; -use tokio::time::{interval, timeout, sleep}; -use std::{borrow::Cow, sync::atomic::{AtomicBool, Ordering, AtomicU64}, collections::HashSet}; -use std::convert::TryInto; -use std::net::SocketAddr; -use std::time::Duration; -use std::sync::Arc; +use std::{ + borrow::Cow, + sync::{ + Arc, + atomic::{AtomicBool, Ordering, AtomicU64} + }, + collections::HashSet, + convert::TryInto, + net::SocketAddr, + time::Duration, +}; use bytes::Bytes; use rand::Rng; diff --git a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs index 26de2dd5..fcfbd055 100644 --- a/xelis_daemon/src/p2p/packet/bootstrap_chain.rs +++ b/xelis_daemon/src/p2p/packet/bootstrap_chain.rs @@ -1,9 +1,14 @@ use std::borrow::Cow; use indexmap::IndexSet; use log::debug; -use xelis_common::{crypto::{hash::Hash, key::PublicKey}, serializer::{Serializer, ReaderError, Reader, Writer}, block::Difficulty, config::CHAIN_SYNC_REQUEST_MAX_BLOCKS, asset::AssetWithData}; - +use xelis_common::{ + crypto::{hash::Hash, key::PublicKey}, + serializer::{Serializer, ReaderError, Reader, Writer}, + block::Difficulty, + asset::AssetWithData +}; use super::chain::{BlockId, CommonPoint}; +use crate::config::CHAIN_SYNC_REQUEST_MAX_BLOCKS; // this file implements the protocol for the fast sync (bootstrapped chain) // You will have to request through StepRequest::FetchAssets all the registered assets diff --git a/xelis_daemon/src/p2p/packet/chain.rs b/xelis_daemon/src/p2p/packet/chain.rs index df74e67f..9c3d1d98 100644 --- a/xelis_daemon/src/p2p/packet/chain.rs +++ b/xelis_daemon/src/p2p/packet/chain.rs @@ -6,8 +6,9 @@ use xelis_common::{ Writer, ReaderError, Reader - }, config::{CHAIN_SYNC_REQUEST_MAX_BLOCKS, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, TIPS_LIMIT} + }, }; +use crate::config::{CHAIN_SYNC_REQUEST_MAX_BLOCKS, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, TIPS_LIMIT}; #[derive(Clone, Debug)] pub struct BlockId { diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 2728f09a..b5d906b5 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -1,5 +1,4 @@ use xelis_common::{ - config::P2P_PING_PEER_LIST_LIMIT, crypto::hash::Hash, serializer::{ Writer, @@ -12,7 +11,10 @@ use xelis_common::{ ip_from_bytes }, block::Difficulty }; -use crate::p2p::{peer::Peer, error::P2pError}; +use crate::{ + p2p::{peer::Peer, error::P2pError}, + config::P2P_PING_PEER_LIST_LIMIT +}; use std::{ fmt::Display, borrow::Cow, diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 23d77c1d..89afb38f 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -1,9 +1,10 @@ use lru::LruCache; -use xelis_common::config::{PEER_FAIL_TIME_RESET, STABLE_LIMIT, TIPS_LIMIT, PEER_TIMEOUT_BOOTSTRAP_STEP}; +use crate::config::{ + PEER_FAIL_TIME_RESET, STABLE_LIMIT, TIPS_LIMIT, PEER_TIMEOUT_BOOTSTRAP_STEP, PEER_TIMEOUT_REQUEST_OBJECT +}; use xelis_common::utils::get_current_time; use xelis_common::{ crypto::hash::Hash, - config::PEER_TIMEOUT_REQUEST_OBJECT, serializer::Serializer }; use super::packet::bootstrap_chain::{StepRequest, BootstrapChainRequest, StepResponse}; diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 7715e480..8043213e 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -1,10 +1,12 @@ -use crate::p2p::packet::peer_disconnected::PacketPeerDisconnected; - +use crate::{ + p2p::packet::peer_disconnected::PacketPeerDisconnected, + config::{P2P_EXTEND_PEERLIST_DELAY, PEER_FAIL_LIMIT} +}; use super::{peer::Peer, packet::Packet, error::P2pError}; use std::{collections::HashMap, net::SocketAddr, fs}; use serde::{Serialize, Deserialize}; use tokio::sync::RwLock; -use xelis_common::{serializer::Serializer, utils::get_current_time, config::{P2P_EXTEND_PEERLIST_DELAY, PEER_FAIL_LIMIT}}; +use xelis_common::{serializer::Serializer, utils::get_current_time}; use std::sync::Arc; use bytes::Bytes; use log::{info, debug, trace, error, warn}; diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index d916231c..1edb04e5 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -2,7 +2,8 @@ use std::{borrow::Cow, collections::HashMap, time::Duration, sync::Arc}; use bytes::Bytes; use tokio::{sync::{mpsc::{UnboundedSender, UnboundedReceiver}, RwLock, oneshot}, time::timeout}; -use xelis_common::{crypto::hash::Hash, config::PEER_TIMEOUT_REQUEST_OBJECT, serializer::Serializer}; +use xelis_common::{crypto::hash::Hash, serializer::Serializer}; +use crate::config::PEER_TIMEOUT_REQUEST_OBJECT; use log::{error, debug}; use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, error::P2pError, peer::Peer}; diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 48d138eb..e8663458 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -8,8 +8,19 @@ use rand::{rngs::OsRng, RngCore}; use serde::Serialize; use serde_json::json; use tokio::sync::Mutex; -use xelis_common::{crypto::{key::PublicKey, hash::Hash}, utils::get_current_timestamp, api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, serializer::Serializer, block::{BlockHeader, BlockMiner, Difficulty}, config::{DEV_PUBLIC_KEY, STABLE_LIMIT}, immutable::Immutable, rpc_server::{RpcResponseError, InternalRpcError}}; -use crate::core::{blockchain::Blockchain, storage::Storage}; +use xelis_common::{ + crypto::{key::PublicKey, hash::Hash}, + utils::get_current_timestamp, + api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, + serializer::Serializer, + block::{BlockHeader, BlockMiner, Difficulty}, + immutable::Immutable, + rpc_server::{RpcResponseError, InternalRpcError} +}; +use crate::{ + core::{blockchain::Blockchain, storage::Storage}, + config::{DEV_PUBLIC_KEY, STABLE_LIMIT} +}; pub type SharedGetWorkServer = Arc>; diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index c2eac571..edf8897b 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -40,8 +40,12 @@ use xelis_common::{ serializer::Serializer, transaction::{Transaction, TransactionType}, crypto::hash::Hash, - block::{BlockHeader, Block}, config::{BLOCK_TIME_MILLIS, VERSION, XELIS_ASSET}, immutable::Immutable, rpc_server::{RPCHandler, parse_params}, + block::{BlockHeader, Block}, + config::{XELIS_ASSET, VERSION}, + immutable::Immutable, + rpc_server::{RPCHandler, parse_params}, }; +use crate::config::BLOCK_TIME_MILLIS; use std::{sync::Arc, borrow::Cow}; use log::{info, debug}; diff --git a/xelis_miner/src/main.rs b/xelis_miner/src/main.rs index 37fd7304..efd6c464 100644 --- a/xelis_miner/src/main.rs +++ b/xelis_miner/src/main.rs @@ -11,7 +11,7 @@ use xelis_common::{ block::{BlockMiner, BLOCK_WORK_SIZE}, serializer::Serializer, difficulty::check_difficulty, - config::{VERSION, DEV_ADDRESS}, + config::VERSION, utils::{get_current_timestamp, format_hashrate, format_difficulty}, crypto::{hash::{Hashable, Hash, hash}, address::Address}, api::daemon::{GetBlockTemplateResult, SubmitBlockParams}, prompt::{Prompt, command::CommandManager, LogLevel, ShareablePrompt, self}, @@ -25,7 +25,7 @@ use lazy_static::lazy_static; #[clap(version = VERSION, about = "XELIS Miner")] pub struct MinerConfig { /// Wallet address to mine and receive block rewards on - #[clap(short, long, default_value_t = String::from(DEV_ADDRESS))] + #[clap(short, long)] miner_address: String, /// Daemon address to connect to for mining #[clap(short = 'a', long, default_value_t = String::from(DEFAULT_DAEMON_ADDRESS))] @@ -110,12 +110,7 @@ async fn main() -> Result<()> { return Ok(()) } - info!("Miner address: {}", address); - if address.to_string() == *DEV_ADDRESS { - warn!("You are using the default developer address. Please consider using your own address."); - } - - + info!("Miner address: {}", address); if config.num_threads != 0 && threads as usize != threads_count { warn!("Attention, the number of threads used may not be optimal, recommended is: {}", threads_count); } diff --git a/xelis_wallet/src/config.rs b/xelis_wallet/src/config.rs index ed9f14ea..731478e5 100644 --- a/xelis_wallet/src/config.rs +++ b/xelis_wallet/src/config.rs @@ -7,6 +7,9 @@ pub const PASSWORD_HASH_SIZE: usize = 32; pub const SALT_SIZE: usize = 32; pub const KEY_SIZE: usize = 32; +// daemon address by default when no specified +pub const DEFAULT_DAEMON_ADDRESS: &str = "http://127.0.0.1:8080"; + lazy_static! { pub static ref PASSWORD_ALGORITHM: Argon2<'static> = { // 15 MB, 16 iterations diff --git a/xelis_wallet/src/main.rs b/xelis_wallet/src/main.rs index ff62cbf4..f89cba8b 100644 --- a/xelis_wallet/src/main.rs +++ b/xelis_wallet/src/main.rs @@ -6,10 +6,12 @@ use fern::colors::Color; use log::{error, info}; use clap::Parser; use xelis_common::{config::{ - DEFAULT_DAEMON_ADDRESS, VERSION, XELIS_ASSET, COIN_DECIMALS }, prompt::{Prompt, command::{CommandManager, Command, CommandHandler, CommandError}, argument::{Arg, ArgType, ArgumentManager}, LogLevel, self, ShareablePrompt, PromptError}, async_handler, crypto::{address::{Address, AddressType}, hash::Hashable}, transaction::{TransactionType, Transaction}, utils::{format_xelis, set_network_to, get_network, format_coin}, serializer::Serializer, network::Network, api::wallet::FeeBuilder}; -use xelis_wallet::wallet::Wallet; +use xelis_wallet::{ + wallet::Wallet, + config::DEFAULT_DAEMON_ADDRESS +}; #[cfg(feature = "api_server")] use xelis_wallet::api::AuthConfig; From 6a16ff37196a64c3f40c392250f637f2725cba0e Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 14:08:51 +0200 Subject: [PATCH 081/160] daemon: add peers in `p2p_status` --- xelis_common/src/api/daemon.rs | 18 ++++++++++++++++-- xelis_daemon/src/p2p/mod.rs | 10 +++++----- xelis_daemon/src/rpc/rpc.rs | 26 +++++++++++++++++++++++--- 3 files changed, 44 insertions(+), 10 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 152fcd38..8e716130 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -1,4 +1,4 @@ -use std::{borrow::Cow, collections::HashSet}; +use std::{borrow::Cow, collections::HashSet, net::SocketAddr}; use serde::{Deserialize, Serialize}; @@ -145,9 +145,23 @@ pub struct GetTransactionParams<'a> { pub hash: Cow<'a, Hash> } +#[derive(Serialize, Deserialize)] +pub struct PeerEntry<'a> { + pub id: u64, + pub addr: Cow<'a, SocketAddr>, + pub tag: Cow<'a, Option>, + pub version: Cow<'a, String>, + pub top_block_hash: Hash, + pub topoheight: u64, + pub height: u64, + pub last_ping: u64, + pub pruned_topoheight: Option, + pub cumulative_difficulty: Difficulty +} + #[derive(Serialize, Deserialize)] pub struct P2pStatusResult<'a> { - pub peer_count: usize, + pub peers: Vec>, pub max_peers: usize, pub tag: Cow<'a, Option>, pub our_topoheight: u64, diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index c66eacea..70033885 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1450,13 +1450,13 @@ impl P2pServer { } pub async fn get_best_topoheight(&self) -> u64 { - let our_height = self.blockchain.get_topo_height(); + let our = self.blockchain.get_topo_height(); let peer_list = self.peer_list.read().await; - let best_height = peer_list.get_best_topoheight(); - if best_height > our_height { - best_height + let best = peer_list.get_best_topoheight(); + if best > our { + best } else { - our_height + our } } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index edf8897b..f6532f30 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -34,7 +34,8 @@ use xelis_common::{ GetAccountHistoryParams, AccountHistoryEntry, AccountHistoryType, - GetAccountAssetsParams + GetAccountAssetsParams, + PeerEntry }, DataHash}, async_handler, serializer::Serializer, @@ -429,15 +430,34 @@ async fn p2p_status(blockchain: Arc>, body: Value) -> let p2p = blockchain.get_p2p().lock().await; match p2p.as_ref() { Some(p2p) => { - let peer_count = p2p.get_peer_count().await; let tag = p2p.get_tag(); let peer_id = p2p.get_peer_id(); let best_topoheight = p2p.get_best_topoheight().await; let max_peers = p2p.get_max_peers(); let our_topoheight = blockchain.get_topo_height(); + let peer_list = p2p.get_peer_list().read().await; + let peers_values = peer_list.get_peers().values(); + let mut peers = Vec::new(); + for p in peers_values { + let top_block_hash = p.get_top_block_hash().lock().await.clone(); + peers.push( + PeerEntry { + id: p.get_id(), + addr: Cow::Borrowed(p.get_outgoing_address()), + tag: Cow::Borrowed(p.get_node_tag()), + version: Cow::Borrowed(p.get_version()), + top_block_hash, + topoheight: p.get_topoheight(), + height: p.get_height(), + last_ping: p.get_last_ping(), + pruned_topoheight: p.get_pruned_topoheight(), + cumulative_difficulty: p.get_cumulative_difficulty() + } + ); + } Ok(json!(P2pStatusResult { - peer_count, + peers, tag: Cow::Borrowed(tag), peer_id, our_topoheight, From d8f8ce3f1fee5e6dfb7773376682b64283317380 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 14:11:58 +0200 Subject: [PATCH 082/160] fix comments --- xelis_common/src/config.rs | 2 +- xelis_miner/src/config.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index 863b1ce8..ea7a87e9 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -2,7 +2,7 @@ use crate::crypto::hash::Hash; pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const XELIS_ASSET: Hash = Hash::zero(); -// 0.01000 XLS per KB +// 0.01000 XEL per KB pub const FEE_PER_KB: u64 = 1000; pub const COIN_DECIMALS: u8 = 5; // 5 decimals numbers pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // 100 000 diff --git a/xelis_miner/src/config.rs b/xelis_miner/src/config.rs index ff1c7ea9..d32a5c09 100644 --- a/xelis_miner/src/config.rs +++ b/xelis_miner/src/config.rs @@ -1 +1,2 @@ +// daemon address by default when no specified pub const DEFAULT_DAEMON_ADDRESS: &str = "127.0.0.1:8080"; \ No newline at end of file From fa60ae2528d2a59aba7feb6d1fc278284c76941e Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 16:14:06 +0200 Subject: [PATCH 083/160] daemon: 'get_peers' rpc method --- xelis_common/src/api/daemon.rs | 2 +- xelis_common/src/config.rs | 12 +++++++---- xelis_daemon/src/rpc/rpc.rs | 37 ++++++++++++++++++++++++---------- 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 8e716130..7ae54660 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -161,7 +161,7 @@ pub struct PeerEntry<'a> { #[derive(Serialize, Deserialize)] pub struct P2pStatusResult<'a> { - pub peers: Vec>, + pub peer_count: usize, pub max_peers: usize, pub tag: Cow<'a, Option>, pub our_topoheight: u64, diff --git a/xelis_common/src/config.rs b/xelis_common/src/config.rs index ea7a87e9..cab5d09e 100644 --- a/xelis_common/src/config.rs +++ b/xelis_common/src/config.rs @@ -4,9 +4,13 @@ pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const XELIS_ASSET: Hash = Hash::zero(); // 0.01000 XEL per KB pub const FEE_PER_KB: u64 = 1000; -pub const COIN_DECIMALS: u8 = 5; // 5 decimals numbers -pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // 100 000 +// 5 decimals numbers +pub const COIN_DECIMALS: u8 = 5; +// 100 000 to represent 1 XEL +pub const COIN_VALUE: u64 = 10u64.pow(COIN_DECIMALS as u32); // Addresses format -pub const PREFIX_ADDRESS: &str = "xel"; // mainnet prefix address -pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; // testnet prefix address \ No newline at end of file +// mainnet prefix address +pub const PREFIX_ADDRESS: &str = "xel"; +// testnet prefix address +pub const TESTNET_PREFIX_ADDRESS: &str = "xet"; \ No newline at end of file diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index f6532f30..8e927d8c 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -158,6 +158,7 @@ pub fn register_methods(handler: &mut RPCHandler>> handler.register_method("submit_transaction", async_handler!(submit_transaction)); handler.register_method("get_transaction", async_handler!(get_transaction)); handler.register_method("p2p_status", async_handler!(p2p_status)); + handler.register_method("get_peers", async_handler!(get_peers)); handler.register_method("get_mempool", async_handler!(get_mempool)); handler.register_method("get_tips", async_handler!(get_tips)); handler.register_method("get_dag_order", async_handler!(get_dag_order)); @@ -435,10 +436,32 @@ async fn p2p_status(blockchain: Arc>, body: Value) -> let best_topoheight = p2p.get_best_topoheight().await; let max_peers = p2p.get_max_peers(); let our_topoheight = blockchain.get_topo_height(); + let peer_count = p2p.get_peer_count().await; + + Ok(json!(P2pStatusResult { + peer_count, + tag: Cow::Borrowed(tag), + peer_id, + our_topoheight, + best_topoheight, + max_peers + })) + }, + None => Err(InternalRpcError::AnyError(ApiError::NoP2p.into())) + } +} + +async fn get_peers(blockchain: Arc>, body: Value) -> Result { + if body != Value::Null { + return Err(InternalRpcError::UnexpectedParams) + } + + let p2p = blockchain.get_p2p().lock().await; + match p2p.as_ref() { + Some(p2p) => { let peer_list = p2p.get_peer_list().read().await; - let peers_values = peer_list.get_peers().values(); let mut peers = Vec::new(); - for p in peers_values { + for p in peer_list.get_peers().values() { let top_block_hash = p.get_top_block_hash().lock().await.clone(); peers.push( PeerEntry { @@ -455,15 +478,7 @@ async fn p2p_status(blockchain: Arc>, body: Value) -> } ); } - - Ok(json!(P2pStatusResult { - peers, - tag: Cow::Borrowed(tag), - peer_id, - our_topoheight, - best_topoheight, - max_peers - })) + Ok(json!(peers)) }, None => Err(InternalRpcError::AnyError(ApiError::NoP2p.into())) } From b255ed3819f23ca5d70c0d95685e2b57de47e5d5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 18:26:43 +0200 Subject: [PATCH 084/160] update API.md --- API.md | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/API.md b/API.md index d2fd187b..95f8770e 100644 --- a/API.md +++ b/API.md @@ -234,7 +234,6 @@ Retrieve a block at a specific topo height } } ``` - NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). #### Get Blocks At Height @@ -343,7 +342,6 @@ Retrieve a block by its hash } } ``` - NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). #### Get Top Block @@ -394,7 +392,6 @@ Retrieve the highest block based on the topological height } } ``` - NOTE: `total_fees` field is not `null` when TXs are fetched (`include_txs` is at `true`). #### Get Nonce @@ -474,6 +471,7 @@ Verify if address has a nonce on-chain registered. Get up-to-date asset's balance for a specific address NOTE: Balance is returned in atomic units + ##### Method `get_last_balance` ##### Parameters @@ -514,6 +512,7 @@ NOTE: Balance is returned in atomic units Get asset's balance from address at exact topoheight NOTE: Balance is returned in atomic units + ##### Method `get_balance_at_topoheight` ##### Parameters @@ -758,6 +757,58 @@ No parameters } ``` +#### Get Peers +Retrieve all peers connected + +##### Method `get_peers` + +##### Parameters +No parameters + +##### Request +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "get_peers" +} +``` + +##### Response +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": [ + { + "addr": "255.255.255.255:2125", + "cumulative_difficulty": 15429361306853, + "height": 488400, + "id": 8185485348476293826, + "last_ping": 1697559833, + "pruned_topoheight": 488000, + "tag": null, + "top_block_hash": "0000006a04cccb82b11e68468be07e4a1da46de8b47dc41d66b2300ff494f80e", + "topoheight": 489291, + "version": "1.5.0" + }, + { + "addr": "192.168.55.43:2125", + "cumulative_difficulty": 15429361306853, + "height": 488400, + "id": 2491091954271682078, + "last_ping": 1697559834, + "pruned_topoheight": 489200, + "tag": null, + "top_block_hash": "0000006a04cccb82b11e68468be07e4a1da46de8b47dc41d66b2300ff494f80e", + "topoheight": 489291, + "version": "1.5.0" + } + ] +} +``` +NOTE: Addresses are not real one and were replaced for privacy reasons. + #### Get DAG Order Retrieve the whole DAG order (all blocks hash ordered by topoheight). If no parameters are set, it will retrieve the last 64 blocks hash ordered descending. From e53ed03a857993102c676efc864f13d91ed2ed49 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 23:50:52 +0200 Subject: [PATCH 085/160] daemon: test forcing integration of orphaned blocks --- xelis_daemon/src/core/blockchain.rs | 42 ++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 99586944..587da364 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1038,9 +1038,25 @@ impl Blockchain { tips.push(hash); } - let mut sorted_tips = blockdag::sort_tips(storage, &tips).await?; + if tips.len() > 1 { + let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &tips).await?.clone(); + debug!("Best tip selected for this block template is {}", best_tip); + let mut selected_tips = Vec::with_capacity(tips.len()); + for hash in tips { + if best_tip != hash { + if !self.validate_tips(storage, &best_tip, &hash).await? { + debug!("Tip {} is invalid, not selecting it because difficulty can't be less than 91% of {}", hash, best_tip); + continue; + } + } + selected_tips.push(hash); + } + tips = selected_tips; + } + + let mut sorted_tips = blockdag::sort_tips(storage, &tips).await.unwrap(); sorted_tips.truncate(TIPS_LIMIT); // keep only first 3 heavier tips - let height = blockdag::calculate_height_at_tips(storage, &tips).await?; + let height = blockdag::calculate_height_at_tips(storage, &sorted_tips).await?; let mut block = BlockHeader::new(self.get_version_at_height(height), height, get_current_timestamp(), sorted_tips, extra_nonce, address, Vec::new()); let mempool = self.mempool.read().await; @@ -1531,18 +1547,18 @@ impl Blockchain { tips = HashSet::new(); debug!("find best tip by cumulative difficulty"); - let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &new_tips).await?.clone(); + // let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &new_tips).await?.clone(); for hash in new_tips { - if best_tip != hash { - if !self.validate_tips(&storage, &best_tip, &hash).await? { - warn!("Rusty TIP {} declared stale", hash); - } else { - debug!("Tip {} is valid, adding to final Tips list", hash); - tips.insert(hash); - } - } - } - tips.insert(best_tip); + // if best_tip != hash { + // if !self.validate_tips(&storage, &best_tip, &hash).await? { + // warn!("Rusty TIP {} declared stale", hash); + // } else { + // debug!("Tip {} is valid, adding to final Tips list", hash); + // } + tips.insert(hash); + // } + } + // tips.insert(best_tip); // save highest topo height debug!("Highest topo height found: {}", highest_topo); From 08eb0b333da3a37dd19f8b4a471dbb521eb76f6f Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 17 Oct 2023 23:57:42 +0200 Subject: [PATCH 086/160] daemon: keep validating tips --- xelis_daemon/src/core/blockchain.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 587da364..77cd949c 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1547,18 +1547,18 @@ impl Blockchain { tips = HashSet::new(); debug!("find best tip by cumulative difficulty"); - // let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &new_tips).await?.clone(); + let best_tip = blockdag::find_best_tip_by_cumulative_difficulty(storage, &new_tips).await?.clone(); for hash in new_tips { - // if best_tip != hash { - // if !self.validate_tips(&storage, &best_tip, &hash).await? { - // warn!("Rusty TIP {} declared stale", hash); - // } else { - // debug!("Tip {} is valid, adding to final Tips list", hash); - // } - tips.insert(hash); - // } - } - // tips.insert(best_tip); + if best_tip != hash { + if !self.validate_tips(&storage, &best_tip, &hash).await? { + warn!("Rusty TIP {} declared stale", hash); + } else { + debug!("Tip {} is valid, adding to final Tips list", hash); + tips.insert(hash); + } + } + } + tips.insert(best_tip); // save highest topo height debug!("Highest topo height found: {}", highest_topo); From cc24a447fe9c97ff7bd6182560acb8495568b0af Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 00:24:45 +0200 Subject: [PATCH 087/160] daemon: debug assert, debug message on truncate --- xelis_daemon/src/core/blockchain.rs | 8 +++++++- xelis_daemon/src/core/blockdag.rs | 4 ++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 77cd949c..08594981 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1055,7 +1055,13 @@ impl Blockchain { } let mut sorted_tips = blockdag::sort_tips(storage, &tips).await.unwrap(); - sorted_tips.truncate(TIPS_LIMIT); // keep only first 3 heavier tips + if sorted_tips.len() > TIPS_LIMIT { + let dropped_tips = sorted_tips.drain(TIPS_LIMIT..); // keep only first 3 heavier tips + for hash in dropped_tips { + debug!("Dropping tip {} because it is not in the first 3 heavier tips", hash); + } + } + let height = blockdag::calculate_height_at_tips(storage, &sorted_tips).await?; let mut block = BlockHeader::new(self.get_version_at_height(height), height, get_current_timestamp(), sorted_tips, extra_nonce, address, Vec::new()); diff --git a/xelis_daemon/src/core/blockdag.rs b/xelis_daemon/src/core/blockdag.rs index e0041047..d4660a65 100644 --- a/xelis_daemon/src/core/blockdag.rs +++ b/xelis_daemon/src/core/blockdag.rs @@ -12,6 +12,10 @@ pub fn sort_descending_by_cumulative_difficulty(scores: &mut Vec<(&Hash, Difficu b_hash.cmp(a_hash) } }); + + if scores.len() >= 2 { + debug_assert!(scores[0].1 >= scores[1].1); + } } // TODO Refactor From 9f2329335a0b5ed81ed681b1d322c74c35e0d6d0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 01:03:16 +0200 Subject: [PATCH 088/160] common: fix buggy terminal on error --- xelis_common/src/prompt/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 9eb060ba..5f6a6d85 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -686,6 +686,16 @@ impl Prompt { } } +impl Drop for Prompt { + fn drop(&mut self) { + if let Ok(true) = terminal::is_raw_mode_enabled() { + if let Err(e) = terminal::disable_raw_mode() { + error!("Error while forcing to disable raw mode: {}", e); + } + } + } +} + pub fn colorize_string(color: Color, message: &String) -> String { format!("\x1B[{}m{}\x1B[0m", color.to_fg_str(), message) } From 3a23930207ce46200be478aae302b5121a88e37a Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 11:24:43 +0200 Subject: [PATCH 089/160] daemon: display network hashrate in status command --- xelis_daemon/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index ddd67f37..c2e5db35 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -372,6 +372,7 @@ async fn status(manager: &CommandManager>>, _: Arg manager.message(format!("Stable Height: {}", stableheight)); manager.message(format!("Topo Height: {}", topoheight)); manager.message(format!("Difficulty: {}", difficulty)); + manager.message(format!("Network Hashrate: {}", format_hashrate((difficulty / BLOCK_TIME) as f64))); manager.message(format!("Top block hash: {}", top_block_hash)); manager.message(format!("Average Block Time: {:.2}s", avg_block_time as f64 / 1000f64)); manager.message(format!("Target Block Time: {:.2}s", BLOCK_TIME as f64)); From 73ecd6b80653111bedfa4ca2791d1e528e50ab3b Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 11:30:56 +0200 Subject: [PATCH 090/160] update API.md --- API.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API.md b/API.md index 95f8770e..59910c40 100644 --- a/API.md +++ b/API.md @@ -807,7 +807,7 @@ No parameters ] } ``` -NOTE: Addresses are not real one and were replaced for privacy reasons. +NOTE: Addresses displayed in this example are not real one and were replaced for privacy reasons. #### Get DAG Order Retrieve the whole DAG order (all blocks hash ordered by topoheight). From c1b2cc331dec9094d94ee46e823609d40de690da Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 14:03:39 +0200 Subject: [PATCH 091/160] daemon: find common peer on block propagation and register them --- xelis_daemon/src/p2p/mod.rs | 22 +++++++++++++++++----- xelis_daemon/src/p2p/peer_list.rs | 2 +- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 70033885..39c16293 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -796,11 +796,23 @@ impl P2pServer { let peer_list = self.peer_list.read().await; let peer_peers = peer.get_peers(false).lock().await; // iterate over all peers of this peer broadcaster - for peer_peer in peer_peers.iter() { + for common_peer_addr in peer_peers.iter() { // if we have a common peer with him - if let Some(peer_peer) = peer_list.get_peer_by_addr(peer_peer) { - let mut blocks_propagation = peer_peer.get_blocks_propagation().lock().await; - blocks_propagation.put(block_hash.clone(), ()); + if let Some(common_peer) = peer_list.get_peer_by_addr(common_peer_addr) { + if peer.get_id() != common_peer.get_id() { + let peers_received = common_peer.get_peers(false).lock().await; + let peers_sent = common_peer.get_peers(true).lock().await; + // verify that we already know that he his connected to it and that we informed him we are connected too to prevent any desync + if peers_received.iter().find( + |addr: &&SocketAddr| *addr == peer.get_outgoing_address() + ).is_some() && peers_sent.iter().find( + |addr: &&SocketAddr| *addr == common_peer.get_outgoing_address() + ).is_some() { + debug!("{} is a common peer with {}, adding block {} to its propagation cache", common_peer, peer, block_hash); + let mut blocks_propagation = peer.get_blocks_propagation().lock().await; + blocks_propagation.put(block_hash.clone(), ()); + } + } } } } @@ -1183,8 +1195,8 @@ impl P2pServer { Packet::PeerDisconnected(packet) => { let addr = packet.get_addr(); debug!("{} disconnected from {}", addr, peer); - let mut peer_peers_sent = peer.get_peers(true).lock().await; let mut peer_peers = peer.get_peers(false).lock().await; + let mut peer_peers_sent = peer.get_peers(true).lock().await; // peer should be a common one (we sent it, and received it from him) if !(peer_peers.remove(&addr) && peer_peers_sent.remove(&addr)) { warn!("{} disconnected from {} but we didn't have it in our peer list", addr, peer); diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 8043213e..7d6c885a 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -110,8 +110,8 @@ impl PeerList { let addr = peer.get_outgoing_address(); let packet = Bytes::from(Packet::PeerDisconnected(PacketPeerDisconnected::new(*addr)).to_bytes()); for peer in self.peers.values() { - let peer_peers_sent = peer.get_peers(true).lock().await; let peer_peers = peer.get_peers(false).lock().await; + let peer_peers_sent = peer.get_peers(true).lock().await; // check if it was in common (we sent it and we received it) if peer_peers_sent.contains(addr) && peer_peers.contains(addr) { debug!("Sending PeerDisconnected packet to peer {} for {}", peer, addr); From 3e5473575569983d003b31a99878695b05bf3bc6 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 15:18:48 +0200 Subject: [PATCH 092/160] daemon: better p2p errors --- xelis_daemon/src/p2p/error.rs | 22 ++++++++++- xelis_daemon/src/p2p/mod.rs | 37 ++++++++++--------- .../src/p2p/packet/peer_disconnected.rs | 4 +- 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index 9b30a686..0179502e 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -5,7 +5,7 @@ use tokio::sync::oneshot::error::RecvError; use xelis_common::crypto::hash::Hash; use xelis_common::serializer::ReaderError; use std::array::TryFromSliceError; -use std::net::AddrParseError; +use std::net::{AddrParseError, SocketAddr}; use tokio::time::error::Elapsed; use std::sync::mpsc::SendError; use std::io::Error as IOError; @@ -19,6 +19,26 @@ use super::packet::object::ObjectRequest; pub enum P2pError { #[error("Invalid protocol rules")] InvalidProtocolRules, + #[error("Invalid list size in pagination with a next page")] + InvalidInventoryPagination, + #[error("unknown common peer {} received: not found in list", _0)] + UnknownPeerReceived(SocketAddr), + #[error("Block {} at height {} propagated is under our stable height", _0, _1)] + BlockPropagatedUnderStableHeight(Hash, u64), + #[error("Block {} propagated is already tracked", _0)] + AlreadyTrackedBlock(Hash), + #[error("Transaction {} propagated is already tracked", _0)] + AlreadyTrackedTx(Hash), + #[error("Malformed chain request, received {} blocks id", _0)] + MalformedChainRequest(usize), + #[error("Received a unrequested chain response")] + UnrequestedChainResponse, + #[error("Received a unrequested bootstrap chain response")] + UnrequestedBootstrapChainResponse, + #[error("Malformed chain response, received {} blocks id", _0)] + MalformedChainResponse(usize), + #[error("Invalid common point at topoheight {}", _0)] + InvalidCommonPoint(u64), #[error("Peer disconnected")] Disconnected, #[error("Invalid handshake")] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 39c16293..e6944e0d 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -745,7 +745,7 @@ impl P2pServer { let mut txs_cache = peer.get_txs_cache().lock().await; if txs_cache.contains(&hash) { warn!("{} send us a transaction ({}) already tracked by him", peer, hash); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::AlreadyTrackedTx(hash)) } txs_cache.put(hash.clone(), ()); } @@ -762,20 +762,19 @@ impl P2pServer { let block_height = header.get_height(); // check that the block height is valid + let header = header.into_owned(); + let block_hash = header.hash(); if block_height < self.blockchain.get_stable_height() { error!("{} send us a block propagation packet which is under stable height (height = {})!", peer, block_height); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::BlockPropagatedUnderStableHeight(block_hash, block_height)) } - let header = header.into_owned(); - let block_hash = header.hash(); - // verify that this block wasn't already sent by him { let mut blocks_propagation = peer.get_blocks_propagation().lock().await; if blocks_propagation.contains(&block_hash) { warn!("{} send us a block ({}) already tracked by him", peer, block_hash); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::AlreadyTrackedBlock(block_hash)) } debug!("Saving {} in blocks propagation cache for {}", block_hash, peer); blocks_propagation.put(block_hash.clone(), ()); @@ -932,9 +931,10 @@ impl P2pServer { peer.set_last_chain_sync(time); // at least one block necessary (genesis block) - if request.size() == 0 || request.size() > CHAIN_SYNC_REQUEST_MAX_BLOCKS { // allows maximum 64 blocks id (2560 bytes max) - warn!("{} sent us a malformed chain request ({} blocks)!", peer, request.size()); - return Err(P2pError::InvalidProtocolRules) + let request_size = request.size(); + if request_size == 0 || request_size > CHAIN_SYNC_REQUEST_MAX_BLOCKS { // allows maximum 64 blocks id (2560 bytes max) + warn!("{} sent us a malformed chain request ({} blocks)!", peer, request_size); + return Err(P2pError::MalformedChainRequest(request_size)) } let zelf = Arc::clone(self); @@ -953,26 +953,27 @@ impl P2pServer { trace!("Received a chain response from {}", peer); if !peer.chain_sync_requested() { warn!("{} sent us a chain response but we haven't requested any.", peer); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::UnrequestedChainResponse) } peer.set_chain_sync_requested(false); self.last_sync_request_sent.store(get_current_time(), Ordering::SeqCst); + let response_size = response.size(); if response.size() > CHAIN_SYNC_RESPONSE_MAX_BLOCKS { // peer is trying to spam us warn!("{} is maybe trying to spam us", peer); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::MalformedChainResponse(response_size)) } if let Some(common_point) = response.get_common_point() { - debug!("{} found a common point with block {} at {} for sync, received {} blocks", peer, common_point.get_hash(), common_point.get_topoheight(), response.size()); + debug!("{} found a common point with block {} at {} for sync, received {} blocks", peer, common_point.get_hash(), common_point.get_topoheight(), response_size); let pop_count = { let storage = self.blockchain.get_storage().read().await; let block_height = match storage.get_height_for_block_hash(common_point.get_hash()).await { Ok(height) => height, Err(e) => { warn!("{} sent us an invalid common point: {}", peer, e); - return Err(P2pError::InvalidPacket) + return Err(P2pError::InvalidCommonPoint(common_point.get_topoheight())) } }; let topoheight = storage.get_topo_height_for_hash(common_point.get_hash()).await?; @@ -1156,7 +1157,7 @@ impl P2pServer { if next_page.is_some() { if total_count != NOTIFY_MAX_LEN { error!("Received only {} while maximum is {} elements, and tell us that there is another page", total_count, NOTIFY_MAX_LEN); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::InvalidInventoryPagination) } } @@ -1188,19 +1189,19 @@ impl P2pServer { error!("Error while sending bootstrap response to channel: {:?}", e); } } else { - error!("{} send us a bootstrap chain response but we didn't asked it", peer); - return Err(P2pError::InvalidProtocolRules) + debug!("{} send us a bootstrap chain response but we didn't asked it", peer); + return Err(P2pError::UnrequestedBootstrapChainResponse) } }, Packet::PeerDisconnected(packet) => { - let addr = packet.get_addr(); + let addr = packet.to_addr(); debug!("{} disconnected from {}", addr, peer); let mut peer_peers = peer.get_peers(false).lock().await; let mut peer_peers_sent = peer.get_peers(true).lock().await; // peer should be a common one (we sent it, and received it from him) if !(peer_peers.remove(&addr) && peer_peers_sent.remove(&addr)) { warn!("{} disconnected from {} but we didn't have it in our peer list", addr, peer); - return Err(P2pError::InvalidProtocolRules) + return Err(P2pError::UnknownPeerReceived(addr)) } } }; diff --git a/xelis_daemon/src/p2p/packet/peer_disconnected.rs b/xelis_daemon/src/p2p/packet/peer_disconnected.rs index 32737cbe..0850a6ad 100644 --- a/xelis_daemon/src/p2p/packet/peer_disconnected.rs +++ b/xelis_daemon/src/p2p/packet/peer_disconnected.rs @@ -19,8 +19,8 @@ impl PacketPeerDisconnected { } } - pub fn get_addr(&self) -> &SocketAddr { - &self.addr + pub fn to_addr(self) -> SocketAddr { + self.addr } } From 20ec8e40d0aeeb27a1e8f7bd453b6d3e9675240d Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 18 Oct 2023 17:46:52 +0200 Subject: [PATCH 093/160] daemon: fix potential deadlock --- xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 08594981..469a6dd9 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1112,8 +1112,8 @@ impl Blockchain { pub async fn build_block_from_header(&self, header: Immutable) -> Result { let mut transactions: Vec> = Vec::with_capacity(header.get_txs_count()); - let mempool = self.mempool.read().await; let storage = self.storage.read().await; + let mempool = self.mempool.read().await; for hash in header.get_txs_hashes() { // at this point, we don't want to lose/remove any tx, we clone it only let tx = if mempool.contains_tx(hash) { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index e6944e0d..fe233a76 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1100,7 +1100,6 @@ impl P2pServer { error!("Error while sending object response to sender!"); } } - }, Packet::NotifyInventoryRequest(packet_wrapper) => { trace!("Received a inventory request from {}", peer); From 3606dfbdd8fe9b53b73dce874cbd9c3712ce1a2d Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 10:40:44 +0200 Subject: [PATCH 094/160] daemon: rewind until pruned topoheight --- xelis_daemon/src/core/storage/sled.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 690a17c2..0de70fa4 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -1092,9 +1092,9 @@ impl Storage for SledStorage { trace!("Lowest topoheight for rewind: {}", lowest_topo); let pruned_topoheight = self.get_pruned_topoheight()?.unwrap_or(0); - if lowest_topo <= pruned_topoheight { - warn!("Pruned topoheight is {}, lowest topoheight is {}, rewind only until {}", pruned_topoheight, lowest_topo, pruned_topoheight + 1); - lowest_topo = pruned_topoheight + 1; + if lowest_topo < pruned_topoheight { + warn!("Pruned topoheight is {}, lowest topoheight is {}, rewind only until {}", pruned_topoheight, lowest_topo, pruned_topoheight); + lowest_topo = pruned_topoheight; } // new TIPS for chain From 32f8fe2c15a5b50bdc4c95d28b8695af11e35c7e Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 10:41:34 +0200 Subject: [PATCH 095/160] common: rotate logs based on date --- Cargo.lock | 1 + xelis_common/Cargo.toml | 2 +- xelis_common/src/prompt/mod.rs | 17 +++++++++++++---- xelis_daemon/src/core/blockchain.rs | 2 +- 4 files changed, 16 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a659dfa2..5325398b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -888,6 +888,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" dependencies = [ + "chrono", "colored", "log", ] diff --git a/xelis_common/Cargo.toml b/xelis_common/Cargo.toml index 039440aa..cce7ddda 100644 --- a/xelis_common/Cargo.toml +++ b/xelis_common/Cargo.toml @@ -19,7 +19,7 @@ curve25519-dalek = { package = "curve25519-dalek-ng", version = "4.1.1" } thiserror = "1.0.30" anyhow = "1.0.57" log = "0.4" -fern = { version = "0.6", features = ["colored"] } +fern = { version = "0.6", features = ["colored", "date-based"] } chrono = "0.4.19" tokio = { version = "1.25", features = ["macros", "signal", "time", "sync"], optional = true } reqwest = { version = "0.11.10", default-features = false, features = ["json", "rustls"], optional = true } diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 5f6a6d85..74c7e068 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -7,8 +7,10 @@ use crate::serializer::{Serializer, ReaderError}; use self::command::{CommandManager, CommandError}; use std::collections::VecDeque; use std::fmt::{Display, Formatter, self}; +use std::fs::create_dir; use std::io::{Write, stdout, Error as IOError}; use std::num::ParseFloatError; +use std::path::Path; use std::str::FromStr; use std::sync::atomic::{AtomicBool, Ordering, AtomicUsize, AtomicU16}; use crossterm::event::{self, Event, KeyCode, KeyModifiers, KeyEventKind}; @@ -652,10 +654,19 @@ impl Prompt { error!("Error on prompt refresh: {}", e); } res - }).chain(std::io::stdout()); + }) + .chain(std::io::stdout()) + .level(level.into()); let mut base = base.chain(stdout_log); if !disable_file_logging { + let logs_path = Path::new("logs/"); + if !logs_path.exists() { + if let Err(e) = create_dir(logs_path) { + error!("Error while creating logs folder: {}", e); + }; + } + let file_log = fern::Dispatch::new() .format(move |out, message, record| { let pad = " ".repeat((30i16 - record.target().len() as i16).max(0) as usize); @@ -669,12 +680,10 @@ impl Prompt { pad, message )) - }).chain(fern::log_file(filename_log)?); + }).chain(fern::DateBased::new(logs_path, format!("%Y-%m-%d.{filename_log}"))); base = base.chain(file_log); } - base = base.level(level.into()); - base.level_for("sled", log::LevelFilter::Warn) .level_for("actix_server", log::LevelFilter::Warn) .level_for("actix_web", log::LevelFilter::Warn) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 469a6dd9..2ee26436 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1143,7 +1143,7 @@ impl Blockchain { } if block.get_timestamp() > get_current_timestamp() + TIMESTAMP_IN_FUTURE_LIMIT { // accept 2s in future - error!("Block timestamp in too much in future!"); + error!("Block timestamp is too much in future!"); return Err(BlockchainError::TimestampIsInFuture(get_current_timestamp(), block.get_timestamp())); } From 051230a05ebd361d0a5950af3998d476670d5b6d Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 14:06:34 +0200 Subject: [PATCH 096/160] daemon: synced blocks propagation queue --- xelis_daemon/src/p2p/mod.rs | 53 ++++++++++++++++++++----------- xelis_daemon/src/p2p/peer_list.rs | 10 +++--- 2 files changed, 39 insertions(+), 24 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index fe233a76..6b08c05c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -8,6 +8,7 @@ mod tracker; mod queue; use indexmap::IndexSet; +use lru::LruCache; use xelis_common::{ config::VERSION, serializer::Serializer, @@ -39,7 +40,7 @@ use crate::{ NETWORK_ID, SEED_NODES, MAX_BLOCK_SIZE, CHAIN_SYNC_DELAY, P2P_PING_DELAY, CHAIN_SYNC_REQUEST_MAX_BLOCKS, P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT, - CHAIN_SYNC_TIMEOUT_SECS, P2P_EXTEND_PEERLIST_DELAY + CHAIN_SYNC_TIMEOUT_SECS, P2P_EXTEND_PEERLIST_DELAY, TIPS_LIMIT } }; use self::{ @@ -59,7 +60,7 @@ use self::{ }; use tokio::{ net::{TcpListener, TcpStream}, - sync::mpsc::{self, UnboundedSender, UnboundedReceiver}, + sync::{mpsc::{self, UnboundedSender, UnboundedReceiver}, Mutex}, select, task::JoinHandle, io::AsyncWriteExt, @@ -101,7 +102,8 @@ pub struct P2pServer { last_sync_request_sent: AtomicU64, // used to check if we are already syncing with one peer or not object_tracker: SharedObjectTracker, // used to requests objects to peers and avoid requesting the same object to multiple peers queued_fetcher: QueuedFetcher, // used to requests all propagated txs in one task only - is_running: AtomicBool // used to check if the server is running or not in tasks + is_running: AtomicBool, // used to check if the server is running or not in tasks + blocks_propagation_queue: Mutex> // Synced cache to prevent concurrent tasks adding the block } impl P2pServer { @@ -133,7 +135,8 @@ impl P2pServer { last_sync_request_sent: AtomicU64::new(0), object_tracker, queued_fetcher, - is_running: AtomicBool::new(true) + is_running: AtomicBool::new(true), + blocks_propagation_queue: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)) }; let arc = Arc::new(server); @@ -550,7 +553,7 @@ impl P2pServer { let mut new_peers = Vec::new(); // all the peers we already sent to this current peer - let mut peer_peers_sent = peer.get_peers(true).lock().await; + let mut peers_sent = peer.get_peers(true).lock().await; // iterate through our peerlist to determinate which peers we have to send for p in peer_list.get_peers().values() { @@ -561,9 +564,9 @@ impl P2pServer { // if we haven't send him this peer addr and that he don't have him already, insert it let addr = p.get_outgoing_address(); - if !peer_peers_sent.contains(addr) { + if !peers_sent.contains(addr) { // add it in our side to not re send it again - peer_peers_sent.insert(*addr); + peers_sent.insert(*addr); // add it to new list to send it new_peers.push(*addr); if new_peers.len() >= P2P_PING_PEER_LIST_LIMIT { @@ -780,15 +783,6 @@ impl P2pServer { blocks_propagation.put(block_hash.clone(), ()); } - // check that we don't have this block in our chain - { - let storage = self.blockchain.get_storage().read().await; - if storage.has_block(&block_hash).await? { - debug!("{}: {} with hash {} is already in our chain. Skipping", peer, header, block_hash); - return Ok(()) - } - } - // Avoid sending the same block to a common peer // because we track peerlist of each peers, we can try to determinate it { @@ -816,6 +810,25 @@ impl P2pServer { } } + // check that we don't have this block in our chain + { + let storage = self.blockchain.get_storage().read().await; + if storage.has_block(&block_hash).await? { + debug!("{}: {} with hash {} is already in our chain. Skipping", peer, header, block_hash); + return Ok(()) + } + } + + // Check that we are not already waiting on it + { + let mut blocks_propagation_queue = self.blocks_propagation_queue.lock().await; + if blocks_propagation_queue.contains(&block_hash) { + debug!("Block {} propagated is already in processing from another peer", block_hash); + return Ok(()) + } + blocks_propagation_queue.put(block_hash.clone(), ()); + } + let block_height = header.get_height(); debug!("Received block at height {} from {}", block_height, peer); let zelf = Arc::clone(self); @@ -909,7 +922,7 @@ impl P2pServer { } }; - debug!("Adding received block {} to chain", block_hash); + debug!("Adding received block {} from {} to chain", block_hash, peer); if let Err(e) = zelf.blockchain.add_new_block(block, true, false).await { error!("Error while adding new block: {}", e); peer.increment_fail_count(); @@ -1198,8 +1211,10 @@ impl P2pServer { let mut peer_peers = peer.get_peers(false).lock().await; let mut peer_peers_sent = peer.get_peers(true).lock().await; // peer should be a common one (we sent it, and received it from him) - if !(peer_peers.remove(&addr) && peer_peers_sent.remove(&addr)) { - warn!("{} disconnected from {} but we didn't have it in our peer list", addr, peer); + let recv_removed = peer_peers.remove(&addr); + let sent_removed = peer_peers_sent.remove(&addr); + if !recv_removed || !sent_removed { + warn!("{} disconnected from {} but we didn't have it in our peer list: {recv_removed} {sent_removed}", addr, peer); return Err(P2pError::UnknownPeerReceived(addr)) } } diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 7d6c885a..672631f8 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -110,14 +110,14 @@ impl PeerList { let addr = peer.get_outgoing_address(); let packet = Bytes::from(Packet::PeerDisconnected(PacketPeerDisconnected::new(*addr)).to_bytes()); for peer in self.peers.values() { - let peer_peers = peer.get_peers(false).lock().await; - let peer_peers_sent = peer.get_peers(true).lock().await; - // check if it was in common (we sent it and we received it) - if peer_peers_sent.contains(addr) && peer_peers.contains(addr) { + let peers_received = peer.get_peers(false).lock().await; + let peers_sent = peer.get_peers(true).lock().await; + // check if it was a common peer (we sent it and we received it) + if peers_sent.contains(addr) && peers_received.contains(addr) { debug!("Sending PeerDisconnected packet to peer {} for {}", peer, addr); // we send the packet to notify the peer that we don't have it in common anymore if let Err(e) = peer.send_bytes(packet.clone()).await { - error!("Error while trying to send RemovePeer packet to peer {}: {}", peer.get_connection().get_address(), e); + error!("Error while trying to send PeerDisconnected packet to peer {}: {}", peer.get_connection().get_address(), e); } } } From 49d829a893a38958ad2d3a74bae2ff074a62f775 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 14:07:04 +0200 Subject: [PATCH 097/160] common: exclude modules not required in logs --- xelis_common/src/prompt/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 74c7e068..1da64f6d 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -689,6 +689,8 @@ impl Prompt { .level_for("actix_web", log::LevelFilter::Warn) .level_for("actix_http", log::LevelFilter::Warn) .level_for("mio", log::LevelFilter::Warn) + .level_for("tokio_tungstenite", log::LevelFilter::Warn) + .level_for("tungstenite", log::LevelFilter::Warn) .apply()?; Ok(()) From f396d59bac80ffdeec41f5005e37b0e204353377 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 14:31:02 +0200 Subject: [PATCH 098/160] daemon: update startup name to uppercase --- xelis_daemon/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/main.rs b/xelis_daemon/src/main.rs index c2e5db35..59aa9a11 100644 --- a/xelis_daemon/src/main.rs +++ b/xelis_daemon/src/main.rs @@ -48,7 +48,7 @@ async fn main() -> Result<()> { let mut config: NodeConfig = NodeConfig::parse(); let prompt = Prompt::new(config.log_level, config.filename_log, config.disable_file_logging)?; - info!("Xelis Blockchain running version: {}", VERSION); + info!("XELIS Blockchain running version: {}", VERSION); info!("----------------------------------------------"); if config.nested.simulator && config.network != Network::Dev { From b1a5559dd9d549f2ac69a26ed06c59718ea67cd4 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 18:24:36 +0200 Subject: [PATCH 099/160] daemon: fix rewind error --- README.md | 1 + xelis_daemon/src/core/storage/sled.rs | 30 +++++++++++++++------------ 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index badf9662..538b4140 100644 --- a/README.md +++ b/README.md @@ -228,6 +228,7 @@ All theses data are saved in plaintext. **NOTE**: - Balances and nonces are versioned, which means they are stored each time a change happened on disk. - Assets registered have in value their topoheight at which it was registered. +- Supply and block rewards are only stored when the block is topologically ordered The database engine used is sled. It may changes in future. diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 0de70fa4..bf8f0124 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -169,7 +169,7 @@ impl SledStorage { Ok(value) } - async fn get_cacheable_data(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { + async fn delete_cacheable_data(&self, tree: &Tree, cache: &Option>>, key: &K) -> Result { let bytes = match tree.remove(key.to_bytes())? { Some(data) => data.to_vec(), None => return Err(BlockchainError::NotFoundOnDisk(DiskContext::DeleteData)) @@ -353,8 +353,8 @@ impl Storage for SledStorage { async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError> { // delete topoheight<->hash pointers - let hash = self.get_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; - self.get_cacheable_data::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; + let hash = self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; + self.delete_cacheable_data::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; let topoheight_bytes = topoheight.to_be_bytes(); // delete block reward @@ -387,7 +387,7 @@ impl Storage for SledStorage { } async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError> { - self.get_cacheable_data::>(&self.tx_blocks, &None, hash).await?; + self.delete_cacheable_data::>(&self.tx_blocks, &None, hash).await?; self.delete_data(&self.transactions, &self.transactions_cache, hash).await } @@ -884,7 +884,7 @@ impl Storage for SledStorage { async fn delete_balance_at_topoheight(&mut self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result { trace!("delete balance {} for {} at topoheight {}", asset, key, topoheight); let tree = self.get_versioned_balance_tree(asset, topoheight).await?; - self.get_cacheable_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) + self.delete_cacheable_data(&tree, &None, key).await.map_err(|_| BlockchainError::NoBalanceChanges(key.clone())) } // returns a new versioned balance with already-set previous topoheight @@ -1121,7 +1121,7 @@ impl Storage for SledStorage { // get all blocks at same height, and delete current block hash from the list trace!("Searching blocks at height {}", height); - let blocks_at_height: Tips = self.get_cacheable_data(&self.blocks_at_height, &None, &height).await?; + let blocks_at_height: Tips = self.delete_cacheable_data(&self.blocks_at_height, &None, &height).await?; trace!("Blocks at height {}: {}", height, blocks_at_height.len()); for hash in blocks_at_height { @@ -1129,20 +1129,24 @@ impl Storage for SledStorage { let block = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; trace!("block header deleted successfully"); - let _: Difficulty = self.get_cacheable_data(&self.supply, &None, &hash).await?; - let _: Difficulty = self.get_cacheable_data(&self.difficulty, &None, &hash).await?; + if self.is_block_topological_ordered(&hash).await { + trace!("Deleting supply and difficulty"); + let _: u64 = self.delete_cacheable_data(&self.supply, &None, &hash).await?; + let _: Difficulty = self.delete_cacheable_data(&self.difficulty, &None, &hash).await?; + } trace!("Deleting cumulative difficulty"); - let cumulative_difficulty: u64 = self.get_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; + let cumulative_difficulty: u64 = self.delete_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; trace!("Cumulative difficulty deleted: {}", cumulative_difficulty); - let reward: u64 = self.get_cacheable_data(&self.rewards, &None, &hash).await?; + + let reward: u64 = self.delete_cacheable_data(&self.rewards, &None, &hash).await?; trace!("Reward for block {} was: {}", hash, reward); for tx_hash in block.get_transactions() { let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; if self.has_tx_blocks(tx_hash)? { - let mut blocks: Tips = self.get_cacheable_data(&self.tx_blocks, &None, tx_hash).await?; + let mut blocks: Tips = self.delete_cacheable_data(&self.tx_blocks, &None, tx_hash).await?; let blocks_len = blocks.len(); blocks.remove(&hash); self.set_blocks_for_tx(tx_hash, &blocks)?; @@ -1167,12 +1171,12 @@ impl Storage for SledStorage { } trace!("Block was at topoheight {}", topo); - self.get_cacheable_data(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; + self.delete_cacheable_data(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; if let Ok(hash_at_topo) = self.get_hash_at_topo_height(topo).await { if hash_at_topo == hash { trace!("Deleting hash '{}' at topo height '{}'", hash_at_topo, topo); - self.get_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topo).await?; + self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topo).await?; } } } From 4a7cf5333d84e1353c6fc92a7f35a47f7b043a55 Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 19:07:20 +0200 Subject: [PATCH 100/160] daemon: optimize disk usage --- README.md | 2 +- xelis_daemon/src/core/blockchain.rs | 10 +++-- xelis_daemon/src/core/storage/mod.rs | 8 ++-- xelis_daemon/src/core/storage/sled.rs | 64 ++++++++++++++------------- xelis_daemon/src/p2p/mod.rs | 12 ++--- xelis_daemon/src/rpc/rpc.rs | 13 +++--- 6 files changed, 56 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index 538b4140..8190ae6f 100644 --- a/README.md +++ b/README.md @@ -211,7 +211,7 @@ All theses data are saved in plaintext. | transactions | Hash | Transaction | Save the whole transaction based on its hash | | blocks | Hash | Block Header | Save the block header only based on its hash | | blocks_at_height | Integer | Array of Hash | Save all blocks hash at a specific height | -| extra | Bytes | No specific type | Actually used to save the highest topo height and TIPS | +| extra | Bytes | No specific type |Save the highest topo height, pruned topoheight and TIPS| | topo_by_hash | Hash | Integer | Save a block hash at a specific topo height | | hash_by_topo | Integer | Hash | Save a topo height for a specific block hash | | cumulative_difficulty | Hash | Integer | Save the cumulative difficulty for each block hash | diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 2ee26436..e31cc456 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1446,10 +1446,12 @@ impl Blockchain { get_block_reward(past_supply) }; - trace!("set block {} reward to {}", hash, block_reward); - storage.set_block_reward(&hash, block_reward)?; - trace!("set block {} supply to {}", hash, past_supply + block_reward); - storage.set_supply_for_block_hash(&hash, past_supply + block_reward)?; + trace!("set block reward to {} at {}", block_reward, highest_topo); + storage.set_block_reward_at_topo_height(highest_topo, block_reward)?; + + let supply = past_supply + block_reward; + trace!("set block supply to {} at {}", supply, highest_topo); + storage.set_supply_at_topo_height(highest_topo, supply)?; // track all changes in balances let mut balances: HashMap<&PublicKey, HashMap<&Hash, VersionedBalance>> = HashMap::new(); diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 97649c0f..87daa95b 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -100,8 +100,8 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn set_last_topoheight_for_nonce(&mut self, key: &PublicKey, topoheight: u64) -> Result<(), BlockchainError>; async fn set_nonce_at_topoheight(&mut self, key: &PublicKey, nonce: u64, topoheight: u64) -> Result<(), BlockchainError>; - fn get_block_reward(&self, hash: &Hash) -> Result; - fn set_block_reward(&mut self, hash: &Hash, reward: u64) -> Result<(), BlockchainError>; + fn get_block_reward_at_topo_height(&self, topoheight: u64) -> Result; + fn set_block_reward_at_topo_height(&mut self, topoheight: u64, reward: u64) -> Result<(), BlockchainError>; async fn get_transaction(&self, hash: &Hash) -> Result, BlockchainError>; fn count_transactions(&self) -> usize; @@ -129,9 +129,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn get_hash_at_topo_height(&self, topoheight: u64) -> Result; async fn get_supply_at_topo_height(&self, topoheight: u64) -> Result; - - fn get_supply_for_block_hash(&self, hash: &Hash) -> Result; - fn set_supply_for_block_hash(&mut self, hash: &Hash, supply: u64) -> Result<(), BlockchainError>; + fn set_supply_at_topo_height(&mut self, topoheight: u64, supply: u64) -> Result<(), BlockchainError>; async fn set_cumulative_difficulty_for_block_hash(&mut self, hash: &Hash, cumulative_difficulty: u64) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index bf8f0124..ca963fa6 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -40,8 +40,8 @@ pub struct SledStorage { cumulative_difficulty: Tree, // cumulative difficulty for each block hash on disk assets: Tree, // keep tracks of all available assets on network nonces: Tree, // account nonces to prevent TX replay attack - rewards: Tree, // all block rewards for blocks - supply: Tree, // supply for each block hash + rewards: Tree, // block reward for each block topoheight + supply: Tree, // supply for each block topoheight difficulty: Tree, // difficulty for each block hash tx_blocks: Tree, // tree to store all blocks hashes where a tx was included in db: sled::Db, // opened DB used for assets to create dynamic assets @@ -351,6 +351,7 @@ impl Storage for SledStorage { Ok(()) } + // Delete the whole block using its topoheight async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError> { // delete topoheight<->hash pointers let hash = self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; @@ -362,9 +363,10 @@ impl Storage for SledStorage { // delete supply self.supply.remove(topoheight_bytes)?; // delete difficulty - self.difficulty.remove(topoheight_bytes)?; + self.difficulty.remove(hash.as_bytes())?; + // delete cummulative difficulty - self.cumulative_difficulty.remove(topoheight_bytes)?; + self.cumulative_difficulty.remove(hash.as_bytes())?; // delete block header let block_header = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; @@ -1013,14 +1015,14 @@ impl Storage for SledStorage { Ok(()) } - fn get_block_reward(&self, hash: &Hash) -> Result { - trace!("get block reward for {}", hash); - Ok(self.load_from_disk(&self.rewards, hash.as_bytes())?) + fn get_block_reward_at_topo_height(&self, topoheight: u64) -> Result { + trace!("get block reward at topo height {}", topoheight); + Ok(self.load_from_disk(&self.rewards, &topoheight.to_be_bytes())?) } - fn set_block_reward(&mut self, hash: &Hash, reward: u64) -> Result<(), BlockchainError> { - trace!("set block reward for {} to {}", hash, reward); - self.rewards.insert(hash.as_bytes(), &reward.to_be_bytes())?; + fn set_block_reward_at_topo_height(&mut self, topoheight: u64, reward: u64) -> Result<(), BlockchainError> { + trace!("set block reward to {} at topo height {}", reward, topoheight); + self.rewards.insert(topoheight.to_be_bytes(), &reward.to_be_bytes())?; Ok(()) } @@ -1129,20 +1131,26 @@ impl Storage for SledStorage { let block = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; trace!("block header deleted successfully"); - if self.is_block_topological_ordered(&hash).await { - trace!("Deleting supply and difficulty"); - let _: u64 = self.delete_cacheable_data(&self.supply, &None, &hash).await?; - let _: Difficulty = self.delete_cacheable_data(&self.difficulty, &None, &hash).await?; - } + let block_topoheight = if self.is_block_topological_ordered(&hash).await { + let topoheight = self.get_topo_height_for_hash(&hash).await?; + trace!("Deleting supply and block reward"); + let supply: u64 = self.delete_cacheable_data(&self.supply, &None, &topoheight).await?; + trace!("Supply was {}", supply); + + let reward: u64 = self.delete_cacheable_data(&self.rewards, &None, &topoheight).await?; + trace!("Reward for block {} was: {}", hash, reward); + Some(topoheight) + } else { + None + }; + + trace!("Deleting difficulty"); + let _: Difficulty = self.delete_cacheable_data(&self.difficulty, &None, &hash).await?; trace!("Deleting cumulative difficulty"); - let cumulative_difficulty: u64 = self.delete_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; + let cumulative_difficulty: Difficulty = self.delete_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; trace!("Cumulative difficulty deleted: {}", cumulative_difficulty); - - let reward: u64 = self.delete_cacheable_data(&self.rewards, &None, &hash).await?; - trace!("Reward for block {} was: {}", hash, reward); - for tx_hash in block.get_transactions() { let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; if self.has_tx_blocks(tx_hash)? { @@ -1165,7 +1173,7 @@ impl Storage for SledStorage { } // if block is ordered, delete data that are linked to it - if let Ok(topo) = self.get_topo_height_for_hash(&hash).await { + if let Some(topo) = block_topoheight { if topo < topoheight { topoheight = topo; } @@ -1483,18 +1491,12 @@ impl Storage for SledStorage { async fn get_supply_at_topo_height(&self, topoheight: u64) -> Result { trace!("get supply at topo height {}", topoheight); - let hash = self.get_hash_at_topo_height(topoheight).await?; - self.get_supply_for_block_hash(&hash) - } - - fn get_supply_for_block_hash(&self, hash: &Hash) -> Result { - trace!("get supply for hash {}", hash); - self.load_from_disk(&self.supply, hash.as_bytes()) + self.load_from_disk(&self.supply, &topoheight.to_be_bytes()) } - fn set_supply_for_block_hash(&mut self, hash: &Hash, supply: u64) -> Result<(), BlockchainError> { - trace!("set supply for hash {}", hash); - self.supply.insert(hash.as_bytes(), &supply.to_be_bytes())?; + fn set_supply_at_topo_height(&mut self, topoheight: u64, supply: u64) -> Result<(), BlockchainError> { + trace!("set supply at topo height {}", topoheight); + self.supply.insert(topoheight.to_be_bytes(), &supply.to_be_bytes())?; Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 6b08c05c..a7aaf3ec 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1677,8 +1677,8 @@ impl P2pServer { for topoheight in (lower..=topoheight).rev() { let hash = storage.get_hash_at_topo_height(topoheight).await?; - let supply = storage.get_supply_for_block_hash(&hash)?; - let reward = storage.get_block_reward(&hash)?; + let supply = storage.get_supply_at_topo_height(topoheight).await?; + let reward = storage.get_block_reward_at_topo_height(topoheight)?; let difficulty = storage.get_difficulty_for_block_hash(&hash).await?; let cumulative_difficulty = storage.get_cumulative_difficulty_for_block_hash(&hash).await?; @@ -1882,12 +1882,12 @@ impl P2pServer { } // save metadata of this block - storage.set_supply_for_block_hash(&hash, metadata.supply)?; - storage.set_cumulative_difficulty_for_block_hash(&hash, metadata.cumulative_difficulty).await?; - storage.set_block_reward(&hash, metadata.reward)?; - + storage.set_supply_at_topo_height(lowest_topoheight, metadata.supply)?; + storage.set_block_reward_at_topo_height(lowest_topoheight, metadata.reward)?; storage.set_topo_height_for_block(&hash, lowest_topoheight).await?; + storage.set_cumulative_difficulty_for_block_hash(&hash, metadata.cumulative_difficulty).await?; + // save the block with its transactions, difficulty storage.add_new_block(Arc::new(header), &txs, metadata.difficulty, hash).await?; } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 8e927d8c..01befd26 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -67,11 +67,12 @@ pub async fn get_block_response_for_hash(blockchain: &Blockchain, return Err(InternalRpcError::AnyError(BlockchainError::BlockNotFound(hash).into())) } - let (topoheight, supply, reward) = if storage.is_block_topological_ordered(&hash).await { + let (topoheight, supply, reward) = if storage.is_block_topological_ordered(&hash).await { + let topoheight = storage.get_topo_height_for_hash(&hash).await.context("Error while retrieving topo height")?; ( - Some(storage.get_topo_height_for_hash(&hash).await.context("Error while retrieving topo height")?), - Some(storage.get_supply_for_block_hash(&hash).context("Error while retrieving supply")?), - Some(storage.get_block_reward(&hash).context("Error while retrieving block reward")?), + Some(topoheight), + Some(storage.get_supply_at_topo_height(topoheight).await.context("Error while retrieving supply")?), + Some(storage.get_block_reward_at_topo_height(topoheight).context("Error while retrieving block reward")?), ) } else { ( @@ -270,7 +271,7 @@ async fn get_info(blockchain: Arc>, body: Value) -> Re let (top_hash, native_supply, pruned_topoheight, average_block_time) = { let storage = blockchain.get_storage().read().await; let top_hash = storage.get_hash_at_topo_height(topoheight).await.context("Error while retrieving hash at topo height")?; - let supply = storage.get_supply_for_block_hash(&top_hash).context("Error while supply for hash")?; + let supply = storage.get_supply_at_topo_height(topoheight).await.context("Error while retrieving supply at topo height")?; let pruned_topoheight = storage.get_pruned_topoheight().context("Error while retrieving pruned topoheight")?; let average_block_time = blockchain.get_average_block_time_for_storage(&storage).await.context("Error while retrieving average block time")?; (top_hash, supply, pruned_topoheight, average_block_time) @@ -662,7 +663,7 @@ async fn get_account_history(blockchain: Arc>, body: V { let (hash, block_header) = storage.get_block_header_at_topoheight(topo).await.context(format!("Error while retrieving block header at topo height {topo}"))?; if params.asset == XELIS_ASSET && *block_header.get_miner() == *key { - let reward = storage.get_block_reward(&hash).context(format!("Error while retrieving reward at topo height {topo}"))?; + let reward = storage.get_block_reward_at_topo_height(topo).context(format!("Error while retrieving reward at topo height {topo}"))?; let history_type = AccountHistoryType::Mining { reward }; history.push(AccountHistoryEntry { topoheight: topo, From 6d2f6e02ebae1bb657013868740fa92ae48867cd Mon Sep 17 00:00:00 2001 From: Slixe Date: Fri, 20 Oct 2023 19:16:37 +0200 Subject: [PATCH 101/160] daemon: add info logs about fast sync --- xelis_daemon/src/p2p/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index a7aaf3ec..719cf681 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1734,7 +1734,7 @@ impl P2pServer { // reload blockchain cache from disk, and we're ready to sync the rest of the chain // NOTE: it could be even faster without retrieving each TXs, but we do it in case user don't enable pruning async fn bootstrap_chain(&self, peer: &Arc) -> Result<(), BlockchainError> { - debug!("Starting fast sync with {}", peer); + info!("Starting fast sync with {}", peer); let mut our_topoheight = self.blockchain.get_topo_height(); @@ -1750,6 +1750,7 @@ impl P2pServer { loop { let response = if let Some(step) = step.take() { + info!("Requesting step {:?}", step.kind()); peer.request_boostrap_chain(step).await? } else { break; @@ -1904,6 +1905,7 @@ impl P2pServer { }; } self.blockchain.reload_from_disk(&storage).await?; + info!("Fast sync done with {}", peer); Ok(()) } From 3b5072b761e504ed18312464c7aaa56a8fad9958 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 16:04:56 +0200 Subject: [PATCH 102/160] daemon: use get_balance_at_maximum_topoheight instead of get_balance_at_exact_topoheight --- xelis_daemon/src/rpc/rpc.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 01befd26..ee96f92b 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -645,15 +645,14 @@ async fn get_account_history(blockchain: Arc>, body: V let key = params.address.get_public_key(); let minimum_topoheight = params.minimum_topoheight.unwrap_or(0); let storage = blockchain.get_storage().read().await; - let (topo, versioned_balance) = if let Some(topo) = params.maximum_topoheight { - (topo, storage.get_balance_at_exact_topoheight(key, ¶ms.asset, topo).await.context(format!("Error while retrieving balance at topo height {topo}"))?) + let mut version = if let Some(topo) = params.maximum_topoheight { + storage.get_balance_at_maximum_topoheight(key, ¶ms.asset, topo).await.context(format!("Error while retrieving balance at topo height {topo}"))? } else { - storage.get_last_balance(key, ¶ms.asset).await.context("Error while retrieving last balance")? + Some(storage.get_last_balance(key, ¶ms.asset).await.context("Error while retrieving last balance")?) }; let mut history_count = 0; let mut history = Vec::new(); - let mut version = Some((topo, versioned_balance)); loop { if let Some((topo, versioned_balance)) = version { if topo < minimum_topoheight { From 16b238f0e91df483bcd8168ab0f91f39c9537710 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 16:12:36 +0200 Subject: [PATCH 103/160] daemon: inclusive maximum topoheight balance search --- xelis_daemon/src/core/storage/sled.rs | 11 +++++++++-- xelis_daemon/src/p2p/mod.rs | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index ca963fa6..d5a78477 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -846,6 +846,7 @@ impl Storage for SledStorage { // get the latest balance at maximum specified topoheight // when a DAG re-ordering happens, we need to select the right balance and not the last one // returns None if the key has no balances for this asset + // Maximum topoheight is inclusive async fn get_balance_at_maximum_topoheight(&self, key: &PublicKey, asset: &Hash, topoheight: u64) -> Result, BlockchainError> { trace!("get balance {} for {} at maximum topoheight {}", asset, key, topoheight); // check first that this address has balance for this asset, if no returns None @@ -853,10 +854,16 @@ impl Storage for SledStorage { return Ok(None) } + // Fast path: if the balance is at exact topoheight, return it + if self.has_balance_at_exact_topoheight(key, asset, topoheight).await? { + trace!("Balance version found at exact (maximum) topoheight {}", topoheight); + return Ok(Some((topoheight, self.get_balance_at_exact_topoheight(key, asset, topoheight).await?))) + } + let (topo, mut version) = self.get_last_balance(key, asset).await?; trace!("Last version balance {} for {} is at topoheight {}", asset, key, topo); // if it's the latest and its under the maximum topoheight - if topo < topoheight { + if topo <= topoheight { trace!("Last version balance (valid) found at {} (maximum topoheight = {})", topo, topoheight); return Ok(Some((topo, version))) } @@ -865,7 +872,7 @@ impl Storage for SledStorage { while let Some(previous) = version.get_previous_topoheight() { let previous_version = self.get_balance_at_exact_topoheight(key, asset, previous).await?; trace!("previous version {}", previous); - if previous < topoheight { + if previous <= topoheight { trace!("Highest version balance found at {} (maximum topoheight = {})", topo, topoheight); return Ok(Some((previous, previous_version))) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 719cf681..1c0dcd47 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1198,7 +1198,7 @@ impl P2pServer { if let Some(sender) = peer.get_bootstrap_chain_channel().lock().await.take() { let response = response.response(); if let Err(e) = sender.send(response) { - error!("Error while sending bootstrap response to channel: {:?}", e); + error!("Error while sending bootstrap response to channel: {:?}", e.kind()); } } else { debug!("{} send us a bootstrap chain response but we didn't asked it", peer); From 1a4d1b9829adab4bd1942fb0891919fd825d3da1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 17:43:21 +0200 Subject: [PATCH 104/160] daemon: add peers of peer in get_peers rpc method --- xelis_common/src/api/daemon.rs | 1 + xelis_daemon/src/core/blockchain.rs | 3 +++ xelis_daemon/src/rpc/rpc.rs | 2 ++ 3 files changed, 6 insertions(+) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 7ae54660..96f3f22a 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -156,6 +156,7 @@ pub struct PeerEntry<'a> { pub height: u64, pub last_ping: u64, pub pruned_topoheight: Option, + pub peers: HashSet, pub cumulative_difficulty: Difficulty } diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index e31cc456..3f32ba79 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1390,6 +1390,7 @@ impl Blockchain { trace!("Cleaning txs at topoheight {} ({})", topoheight, hash_at_topo); if !is_written { if let Some(order) = full_order.get(0) { + // Verify that the block is still at the same topoheight if storage.is_block_topological_ordered(order).await && *order == hash_at_topo { trace!("Hash {} at topo {} stay the same, skipping cleaning", hash_at_topo, topoheight); // remove the hash from the order because we don't need to recompute it @@ -1399,6 +1400,7 @@ impl Blockchain { continue; } } + // if we are here, it means that the block was re-ordered is_written = true; } @@ -1506,6 +1508,7 @@ impl Blockchain { for (key, assets) in balances { for (asset, balance) in assets { trace!("Saving balance {} for {} at topo {}, previous: {:?}", asset, key, highest_topo, balance.get_previous_topoheight()); + // Save the balance as the latest one storage.set_balance_to(key, asset, highest_topo, &balance).await?; } diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index ee96f92b..355dbb5e 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -464,6 +464,7 @@ async fn get_peers(blockchain: Arc>, body: Value) -> R let mut peers = Vec::new(); for p in peer_list.get_peers().values() { let top_block_hash = p.get_top_block_hash().lock().await.clone(); + let peer_peers = p.get_peers(false).lock().await.clone(); peers.push( PeerEntry { id: p.get_id(), @@ -474,6 +475,7 @@ async fn get_peers(blockchain: Arc>, body: Value) -> R topoheight: p.get_topoheight(), height: p.get_height(), last_ping: p.get_last_ping(), + peers: peer_peers, pruned_topoheight: p.get_pruned_topoheight(), cumulative_difficulty: p.get_cumulative_difficulty() } From 5534b6cae7d32a27bfa0c47d901b373ea644b1ff Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 19:01:03 +0200 Subject: [PATCH 105/160] daemon: implement PeerConnected, PeerPeerListUpdated, PeerPeerDisconnected events --- xelis_common/src/api/daemon.rs | 33 +++++++++++++++++++++-- xelis_daemon/src/p2p/mod.rs | 42 ++++++++++++++++++++++++----- xelis_daemon/src/p2p/packet/ping.rs | 27 ++++++++++++++----- xelis_daemon/src/rpc/mod.rs | 8 +++++- xelis_daemon/src/rpc/rpc.rs | 38 +++++++++++++------------- 5 files changed, 114 insertions(+), 34 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 96f3f22a..2d65ce47 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -261,7 +261,7 @@ pub struct GetAccountsParams { pub maximum_topoheight: Option } -#[derive(Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum NotifyEvent { // When a new block is accepted by chain // it contains Block struct as value @@ -283,7 +283,20 @@ pub enum NotifyEvent { TransactionSCResult, // When a new asset has been registered // TODO: Smart Contracts - NewAsset + NewAsset, + // When a new peer has connected to us + // It contains PeerEntry struct as value + PeerConnected, + // When a peer has disconnected from us + // It contains peer id as value + PeerDisconnected, + // Peer peerlist updated, its all its connected peers + // It contains PeerPeerListUpdatedEvent as value + PeerPeerListUpdated, + // When a peer of a peer has disconnected + // and that he notified us + // It contains PeerPeerDisconnectedEvent as value + PeerPeerDisconnected, } #[derive(Serialize, Deserialize)] @@ -306,4 +319,20 @@ pub struct TransactionExecutedEvent<'a> { pub block_hash: Cow<'a, Hash>, pub tx_hash: Cow<'a, Hash>, pub topoheight: u64, +} + +#[derive(Serialize, Deserialize)] +pub struct PeerPeerListUpdatedEvent { + // Peer ID of the peer that sent us the new peer list + pub peer_id: u64, + // Peerlist received from this peer + pub peerlist: Vec +} + +#[derive(Serialize, Deserialize)] +pub struct PeerPeerDisconnectedEvent { + // Peer ID of the peer that sent us this notification + pub peer_id: u64, + // address of the peer that disconnected from him + pub peer_addr: SocketAddr } \ No newline at end of file diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1c0dcd47..41914dbd 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -14,7 +14,10 @@ use xelis_common::{ serializer::Serializer, crypto::hash::{Hashable, Hash}, block::{BlockHeader, Block}, - utils::get_current_time, immutable::Immutable, account::VersionedNonce + utils::get_current_time, + immutable::Immutable, + account::VersionedNonce, + api::daemon::{NotifyEvent, PeerPeerDisconnectedEvent} }; use crate::{ core::{ @@ -41,7 +44,7 @@ use crate::{ P2P_PING_PEER_LIST_DELAY, P2P_PING_PEER_LIST_LIMIT, STABLE_LIMIT, PEER_FAIL_LIMIT, CHAIN_SYNC_RESPONSE_MAX_BLOCKS, CHAIN_SYNC_TOP_BLOCKS, GENESIS_BLOCK_HASH, PRUNE_SAFETY_LIMIT, CHAIN_SYNC_TIMEOUT_SECS, P2P_EXTEND_PEERLIST_DELAY, TIPS_LIMIT - } + }, rpc::rpc::get_peer_entry }; use self::{ packet::{ @@ -363,6 +366,17 @@ impl P2pServer { peer_list.add_peer(peer_id, peer) }; + { + trace!("Locking RPC Server to notify PeerConnected event"); + if let Some(rpc) = self.blockchain.get_rpc().lock().await.as_ref() { + if rpc.is_event_tracked(&NotifyEvent::PeerConnected).await { + debug!("Notifying clients with PeerConnected event"); + rpc.notify_clients_with(&NotifyEvent::PeerConnected, get_peer_entry(&peer).await).await; + } + } + trace!("End locking for PeerConnected event"); + } + self.handle_connection(peer).await } @@ -753,7 +767,7 @@ impl P2pServer { txs_cache.put(hash.clone(), ()); } - ping.into_owned().update_peer(peer).await?; + ping.into_owned().update_peer(peer, &self.blockchain).await?; if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)); } @@ -761,7 +775,7 @@ impl P2pServer { Packet::BlockPropagation(packet_wrapper) => { trace!("Received a block propagation packet from {}", peer); let (header, ping) = packet_wrapper.consume(); - ping.into_owned().update_peer(peer).await?; + ping.into_owned().update_peer(peer, &self.blockchain).await?; let block_height = header.get_height(); // check that the block height is valid @@ -932,7 +946,7 @@ impl P2pServer { Packet::ChainRequest(packet_wrapper) => { trace!("Received a chain request from {}", peer); let (request, ping) = packet_wrapper.consume(); - ping.into_owned().update_peer(peer).await?; + ping.into_owned().update_peer(peer, &self.blockchain).await?; let request = request.into_owned(); let last_request = peer.get_last_chain_sync(); let time = get_current_time(); @@ -1046,7 +1060,7 @@ impl P2pServer { self.try_to_connect_to_peer(peer, false).await; } } - ping.into_owned().update_peer(peer).await?; + ping.into_owned().update_peer(peer, &self.blockchain).await?; }, Packet::ObjectRequest(request) => { trace!("Received a object request from {}", peer); @@ -1117,7 +1131,7 @@ impl P2pServer { Packet::NotifyInventoryRequest(packet_wrapper) => { trace!("Received a inventory request from {}", peer); let (request, ping) = packet_wrapper.consume(); - ping.into_owned().update_peer(peer).await?; + ping.into_owned().update_peer(peer, &self.blockchain).await?; let request = request.into_owned(); @@ -1217,6 +1231,20 @@ impl P2pServer { warn!("{} disconnected from {} but we didn't have it in our peer list: {recv_removed} {sent_removed}", addr, peer); return Err(P2pError::UnknownPeerReceived(addr)) } + + if recv_removed { + trace!("Locking RPC Server to notify PeerDisconnected event"); + if let Some(rpc) = self.blockchain.get_rpc().lock().await.as_ref() { + if rpc.is_event_tracked(&NotifyEvent::PeerDisconnected).await { + let value = PeerPeerDisconnectedEvent { + peer_id: peer.get_id(), + peer_addr: addr + }; + rpc.notify_clients_with(&NotifyEvent::PeerDisconnected, value).await; + } + } + trace!("End locking for PeerDisconnected event"); + } } }; Ok(()) diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index b5d906b5..3011cbca 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -9,11 +9,14 @@ use xelis_common::{ utils::{ ip_to_bytes, ip_from_bytes - }, block::Difficulty + }, + block::Difficulty, + api::daemon::{NotifyEvent, PeerPeerListUpdatedEvent} }; use crate::{ p2p::{peer::Peer, error::P2pError}, - config::P2P_PING_PEER_LIST_LIMIT + config::P2P_PING_PEER_LIST_LIMIT, + core::{blockchain::Blockchain, storage::Storage} }; use std::{ fmt::Display, @@ -46,7 +49,7 @@ impl<'a> Ping<'a> { } } - pub async fn update_peer(self, peer: &Arc) -> Result<(), P2pError> { + pub async fn update_peer(self, peer: &Arc, blockchain: &Arc>) -> Result<(), P2pError> { trace!("Updating {} with {}", peer, self); peer.set_block_top_hash(self.top_hash.into_owned()).await; peer.set_topoheight(self.topoheight); @@ -79,8 +82,8 @@ impl<'a> Ping<'a> { let mut peers = peer.get_peers(false).lock().await; let peer_addr = peer.get_connection().get_address(); let peer_outgoing_addr = peer.get_outgoing_address(); - for addr in self.peer_list { - if *peer_addr == addr || *peer_outgoing_addr == addr { + for addr in &self.peer_list { + if peer_addr == addr || peer_outgoing_addr == addr { error!("Invalid protocol rules: peer {} sent us its own socket address in ping packet", peer); return Err(P2pError::InvalidProtocolRules) } @@ -90,8 +93,20 @@ impl<'a> Ping<'a> { return Err(P2pError::InvalidProtocolRules) } debug!("Adding {} for {} in ping packet", addr, peer); - peers.insert(addr); + peers.insert(*addr); + } + + trace!("Locking RPC Server to notify PeerPeerListUpdated event"); + if let Some(rpc) = blockchain.get_rpc().lock().await.as_ref() { + if rpc.is_event_tracked(&NotifyEvent::PeerPeerListUpdated).await { + let value = PeerPeerListUpdatedEvent { + peer_id: peer.get_id(), + peerlist: self.peer_list + }; + rpc.notify_clients_with(&NotifyEvent::PeerPeerListUpdated, value).await; + } } + trace!("End locking for PeerPeerListUpdated event"); } Ok(()) diff --git a/xelis_daemon/src/rpc/mod.rs b/xelis_daemon/src/rpc/mod.rs index 774212e3..6ebef364 100644 --- a/xelis_daemon/src/rpc/mod.rs +++ b/xelis_daemon/src/rpc/mod.rs @@ -12,7 +12,7 @@ use actix_web::{ error::Error }; use actix_web_actors::ws::WsResponseBuilder; -use serde_json::Value; +use serde_json::{Value, json}; use tokio::sync::Mutex; use xelis_common::api::daemon::NotifyEvent; use xelis_common::config; @@ -99,6 +99,12 @@ impl DaemonRpcServer { self.get_websocket().get_handler().is_event_tracked(event).await } + pub async fn notify_clients_with(&self, event: &NotifyEvent, value: V) { + if let Err(e) = self.notify_clients(event, json!(value)).await { + error!("Error while notifying event {:?}: {}", event, e); + } + } + pub async fn notify_clients(&self, event: &NotifyEvent, value: Value) -> Result<(), anyhow::Error> { self.get_websocket().get_handler().notify(event, value).await; Ok(()) diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 355dbb5e..76cbd7a6 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -1,4 +1,4 @@ -use crate::core::{blockchain::{Blockchain, get_block_reward}, storage::Storage, error::BlockchainError, mempool::Mempool}; +use crate::{core::{blockchain::{Blockchain, get_block_reward}, storage::Storage, error::BlockchainError, mempool::Mempool}, p2p::peer::Peer}; use super::{InternalRpcError, ApiError}; use anyhow::Context; use serde_json::{json, Value}; @@ -134,6 +134,24 @@ pub async fn get_transaction_response_for_hash(storage: &S, mempool: } } +pub async fn get_peer_entry(peer: &Peer) -> PeerEntry { + let top_block_hash = peer.get_top_block_hash().lock().await.clone(); + let peer_peers = peer.get_peers(false).lock().await.clone(); + PeerEntry { + id: peer.get_id(), + addr: Cow::Borrowed(peer.get_outgoing_address()), + tag: Cow::Borrowed(peer.get_node_tag()), + version: Cow::Borrowed(peer.get_version()), + top_block_hash, + topoheight: peer.get_topoheight(), + height: peer.get_height(), + last_ping: peer.get_last_ping(), + peers: peer_peers, + pruned_topoheight: peer.get_pruned_topoheight(), + cumulative_difficulty: peer.get_cumulative_difficulty() + } +} + pub fn register_methods(handler: &mut RPCHandler>>) { info!("Registering RPC methods..."); handler.register_method("get_version", async_handler!(version)); @@ -463,23 +481,7 @@ async fn get_peers(blockchain: Arc>, body: Value) -> R let peer_list = p2p.get_peer_list().read().await; let mut peers = Vec::new(); for p in peer_list.get_peers().values() { - let top_block_hash = p.get_top_block_hash().lock().await.clone(); - let peer_peers = p.get_peers(false).lock().await.clone(); - peers.push( - PeerEntry { - id: p.get_id(), - addr: Cow::Borrowed(p.get_outgoing_address()), - tag: Cow::Borrowed(p.get_node_tag()), - version: Cow::Borrowed(p.get_version()), - top_block_hash, - topoheight: p.get_topoheight(), - height: p.get_height(), - last_ping: p.get_last_ping(), - peers: peer_peers, - pruned_topoheight: p.get_pruned_topoheight(), - cumulative_difficulty: p.get_cumulative_difficulty() - } - ); + peers.push(get_peer_entry(p).await); } Ok(json!(peers)) }, From e56e2111b2784a2edc0541ca8ad14762a566dbcc Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 19:01:27 +0200 Subject: [PATCH 106/160] daemon: add comment about missing event impl --- xelis_common/src/api/daemon.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index 2d65ce47..b7b1794c 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -289,6 +289,7 @@ pub enum NotifyEvent { PeerConnected, // When a peer has disconnected from us // It contains peer id as value + // TODO not implemented yet PeerDisconnected, // Peer peerlist updated, its all its connected peers // It contains PeerPeerListUpdatedEvent as value From f7a81baf95534b1a9f8a99d4a3342b7baef20040 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sat, 21 Oct 2023 19:02:09 +0200 Subject: [PATCH 107/160] daemon: increase time for bootstrap step time out --- xelis_daemon/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs index 641802b2..22a62066 100644 --- a/xelis_daemon/src/config.rs +++ b/xelis_daemon/src/config.rs @@ -71,7 +71,7 @@ pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // time in seconds between each t pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout -pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 10000; // millis until we timeout +pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 20000; // millis until we timeout lazy_static! { pub static ref DEV_PUBLIC_KEY: PublicKey = Address::from_string(&DEV_ADDRESS.to_owned()).unwrap().to_public_key(); From fe6e8fe5e9cf5df80eaee6fc7fe0e09bc58be656 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 12:56:36 +0200 Subject: [PATCH 108/160] daemon: clear caches after rewinding chain --- xelis_daemon/src/core/storage/sled.rs | 55 ++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index d5a78477..951bdc81 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -123,6 +123,53 @@ impl SledStorage { Ok(storage) } + async fn clear_caches(&self) { + if let Some(cache) = self.transactions_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.blocks_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.past_blocks_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.topo_by_hash_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.hash_at_topo_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.cumulative_difficulty_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.assets_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.balances_trees_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + + if let Some(cache) = self.nonces_trees_cache.as_ref() { + let mut cache = cache.lock().await; + cache.clear(); + } + } + fn load_from_disk(&self, tree: &Tree, key: &[u8]) -> Result { match tree.get(key)? { Some(bytes) => { @@ -1082,6 +1129,7 @@ impl Storage for SledStorage { // search the lowest topo height available based on count + 1 // (last lowest topo height accepted) let mut lowest_topo = topoheight; + let mut lowest_height = height; trace!("search lowest topo height available, height = {}, count = {}", height, count); for i in (height-count..=height).rev() { trace!("checking lowest topoheight for blocks at {}", i); @@ -1094,11 +1142,12 @@ impl Storage for SledStorage { } } } + lowest_height = i; } else { warn!("No blocks found at {}, how ?", i); } } - trace!("Lowest topoheight for rewind: {}", lowest_topo); + trace!("Lowest topoheight for rewind: {}, height: {}", lowest_topo, lowest_height); let pruned_topoheight = self.get_pruned_topoheight()?.unwrap_or(0); if lowest_topo < pruned_topoheight { @@ -1304,6 +1353,9 @@ impl Storage for SledStorage { self.delete_versioned_nonces_at_topoheight(topoheight).await?; } + // Clear all caches to not have old data after rewind + self.clear_caches().await; + // store the new tips and topo topoheight self.store_tips(&tips)?; self.set_top_topoheight(topoheight)?; @@ -1311,6 +1363,7 @@ impl Storage for SledStorage { // reverse order of txs so its ascending order txs.reverse(); + Ok((height, topoheight, txs)) } From f7fbf3e63207ede49bcf3eab2a6ae08b28f594a1 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 19:44:15 +0200 Subject: [PATCH 109/160] daemon: use TransactionResponse for mempool event --- xelis_common/src/api/daemon.rs | 4 ++-- xelis_daemon/src/core/blockchain.rs | 27 ++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/xelis_common/src/api/daemon.rs b/xelis_common/src/api/daemon.rs index b7b1794c..41ef0efa 100644 --- a/xelis_common/src/api/daemon.rs +++ b/xelis_common/src/api/daemon.rs @@ -2,7 +2,7 @@ use std::{borrow::Cow, collections::HashSet, net::SocketAddr}; use serde::{Deserialize, Serialize}; -use crate::{crypto::{hash::Hash, address::Address}, account::{VersionedBalance, VersionedNonce}, network::Network, block::Difficulty}; +use crate::{crypto::{hash::Hash, address::Address}, account::{VersionedBalance, VersionedNonce}, network::Network, block::Difficulty, transaction::Transaction}; use super::DataHash; @@ -188,7 +188,7 @@ pub struct GetTransactionsParams { } #[derive(Serialize, Deserialize)] -pub struct TransactionResponse<'a, T: Clone> { +pub struct TransactionResponse<'a, T: Clone + AsRef> { // in which blocks it was included pub blocks: Option>, // in which blocks it was executed diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 3f32ba79..bef9026f 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -6,10 +6,24 @@ use xelis_common::{ crypto::{key::PublicKey, hash::{Hashable, Hash, HASH_SIZE}}, difficulty::check_difficulty, transaction::{Transaction, TransactionType, EXTRA_DATA_LIMIT_SIZE}, - utils::{get_current_timestamp, format_xelis}, + utils::{get_current_timestamp, format_xelis, get_current_time}, block::{Block, BlockHeader, EXTRA_NONCE_SIZE, Difficulty}, immutable::Immutable, - serializer::Serializer, account::VersionedBalance, api::{daemon::{NotifyEvent, BlockOrderedEvent, TransactionExecutedEvent, BlockType, StableHeightChangedEvent}, DataHash}, network::Network, asset::AssetData + serializer::Serializer, + account::VersionedBalance, + api::{ + daemon::{ + NotifyEvent, + BlockOrderedEvent, + TransactionExecutedEvent, + BlockType, + StableHeightChangedEvent, + TransactionResponse + }, + DataHash + }, + network::Network, + asset::AssetData }; use crate::{ config::{ @@ -992,7 +1006,14 @@ impl Blockchain { if rpc.is_event_tracked(&NotifyEvent::TransactionAddedInMempool).await { let rpc = rpc.clone(); tokio::spawn(async move { - let data: DataHash<'_, Arc> = DataHash { hash: Cow::Owned(hash), data: Cow::Owned(tx) }; + let data: TransactionResponse<'_, Arc> = TransactionResponse { + blocks: None, + executed_in_block: None, + in_mempool: true, + first_seen: Some(get_current_time()), + data: DataHash { hash: Cow::Owned(hash), data: Cow::Owned(tx) } + }; + if let Err(e) = rpc.notify_clients(&NotifyEvent::TransactionAddedInMempool, json!(data)).await { debug!("Error while broadcasting event TransactionAddedInMempool to websocket: {}", e); } From 33113c2ddb6276be901e9e3162c4bfc660be43ae Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 21:36:04 +0200 Subject: [PATCH 110/160] daemon: don't build new job if no miners --- xelis_daemon/src/rpc/getwork_server.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index e8663458..4ec98653 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -316,7 +316,17 @@ impl GetWorkServer { // notify every miners connected to the getwork server // each miner have his own task so nobody wait on other - pub async fn notify_new_job(&self) -> Result<(), InternalRpcError> { + pub async fn notify_new_job(&self) -> Result<(), InternalRpcError> { + // Check that there is at least one miner connected + // otherwise, no need to build a new job + { + let miners = self.miners.lock().await; + if miners.is_empty() { + debug!("No miners connected, no need to notify them"); + return Ok(()); + } + } + debug!("Notify all miners for a new job"); let (header, difficulty) = { let storage = self.blockchain.get_storage().read().await; From ffaf9b2d297de8132eb5b3ced34725ea83e86bf4 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 21:38:27 +0200 Subject: [PATCH 111/160] daemon: predict TX broadcast of common peers --- xelis_daemon/src/p2p/mod.rs | 71 +++++++++++++++++++++++-------------- 1 file changed, 44 insertions(+), 27 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 41914dbd..571af66e 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -744,6 +744,32 @@ impl P2pServer { Ok(()) } + // Returns the list of all common peers we have between Peer and us + async fn get_common_peers_for(&self, peer: &Arc) -> Vec> { + let peer_list = self.peer_list.read().await; + let peer_peers = peer.get_peers(false).lock().await; + let mut common_peers = Vec::new(); + for common_peer_addr in peer_peers.iter() { + // if we have a common peer with him + if let Some(common_peer) = peer_list.get_peer_by_addr(common_peer_addr) { + if peer.get_id() != common_peer.get_id() { + let peers_received = common_peer.get_peers(false).lock().await; + let peers_sent = common_peer.get_peers(true).lock().await; + // verify that we already know that he his connected to it and that we informed him we are connected too to prevent any desync + if peers_received.iter().find( + |addr: &&SocketAddr| *addr == peer.get_outgoing_address() + ).is_some() && peers_sent.iter().find( + |addr: &&SocketAddr| *addr == common_peer.get_outgoing_address() + ).is_some() { + common_peers.push(common_peer.clone()); + } + } + } + } + + common_peers + } + async fn handle_incoming_packet(self: &Arc, peer: &Arc, packet: Packet<'_>) -> Result<(), P2pError> { match packet { Packet::Handshake(_) => { @@ -751,13 +777,15 @@ impl P2pServer { peer.get_connection().close().await?; return Err(P2pError::InvalidPacket) }, - Packet::TransactionPropagation(packet_wrapper) => { // TODO prevent spam + Packet::TransactionPropagation(packet_wrapper) => { trace!("{}: Transaction Propagation packet", peer); let (hash, ping) = packet_wrapper.consume(); let hash = hash.into_owned(); + ping.into_owned().update_peer(peer, &self.blockchain).await?; + // peer should not send us twice the same transaction - debug!("Received tx hash {} from {}", hash, peer); + debug!("Received tx hash {} from {}", hash, peer.get_outgoing_address()); { let mut txs_cache = peer.get_txs_cache().lock().await; if txs_cache.contains(&hash) { @@ -767,7 +795,15 @@ impl P2pServer { txs_cache.put(hash.clone(), ()); } - ping.into_owned().update_peer(peer, &self.blockchain).await?; + // Avoid sending the TX propagated to a common peer + // because we track peerlist of each peers, we can try to determinate it + // iterate over all common peers of this peer broadcaster + for common_peer in self.get_common_peers_for(&peer).await { + debug!("{} is a common peer with {}, adding TX {} to its cache", common_peer, peer, hash); + let mut txs_cache = common_peer.get_txs_cache().lock().await; + txs_cache.put(hash.clone(), ()); + } + if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)); } @@ -797,31 +833,12 @@ impl P2pServer { blocks_propagation.put(block_hash.clone(), ()); } - // Avoid sending the same block to a common peer + // Avoid sending the same block to a common peer that may have already got it // because we track peerlist of each peers, we can try to determinate it - { - let peer_list = self.peer_list.read().await; - let peer_peers = peer.get_peers(false).lock().await; - // iterate over all peers of this peer broadcaster - for common_peer_addr in peer_peers.iter() { - // if we have a common peer with him - if let Some(common_peer) = peer_list.get_peer_by_addr(common_peer_addr) { - if peer.get_id() != common_peer.get_id() { - let peers_received = common_peer.get_peers(false).lock().await; - let peers_sent = common_peer.get_peers(true).lock().await; - // verify that we already know that he his connected to it and that we informed him we are connected too to prevent any desync - if peers_received.iter().find( - |addr: &&SocketAddr| *addr == peer.get_outgoing_address() - ).is_some() && peers_sent.iter().find( - |addr: &&SocketAddr| *addr == common_peer.get_outgoing_address() - ).is_some() { - debug!("{} is a common peer with {}, adding block {} to its propagation cache", common_peer, peer, block_hash); - let mut blocks_propagation = peer.get_blocks_propagation().lock().await; - blocks_propagation.put(block_hash.clone(), ()); - } - } - } - } + for common_peer in self.get_common_peers_for(&peer).await { + debug!("{} is a common peer with {}, adding block {} to its propagation cache", common_peer, peer, block_hash); + let mut blocks_propagation = common_peer.get_blocks_propagation().lock().await; + blocks_propagation.put(block_hash.clone(), ()); } // check that we don't have this block in our chain From 357f138b9d01b8754f37028bed7c78e73b17ee20 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 21:52:50 +0200 Subject: [PATCH 112/160] daemon: better pagination inventory --- xelis_daemon/src/p2p/mod.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 571af66e..74a9fe47 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1164,14 +1164,9 @@ impl P2pServer { .collect::>(); let next_page = { - let all_txs_size = mempool.size(); - if skip < all_txs_size { - let left = all_txs_size - (all_txs.len() + skip); - if left > 0 { - Some(page_id + 1) - } else { - None - } + let mempool_size = mempool.size(); + if all_txs.len() == NOTIFY_MAX_LEN && mempool_size > skip && mempool_size - skip > NOTIFY_MAX_LEN { + Some(page_id + 1) } else { None } From 11f585c48e897942bb077289597f77418fdddee5 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 22:30:59 +0200 Subject: [PATCH 113/160] daemon: prevent DDoS by large TXs count --- xelis_daemon/src/p2p/error.rs | 2 ++ xelis_daemon/src/p2p/mod.rs | 4 +-- xelis_daemon/src/p2p/queue.rs | 31 ++++++++++++----------- xelis_daemon/src/p2p/tracker.rs | 44 +++++++++++++++++++-------------- 4 files changed, 46 insertions(+), 35 deletions(-) diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index 0179502e..57b8975f 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -85,6 +85,8 @@ pub enum P2pError { ObjectNotFound(ObjectRequest), #[error("Object not requested {}", _0)] ObjectNotRequested(ObjectRequest), + #[error("Object requested {} is not present any more in queue", _0)] + ObjectHashNotPresentInQueue(Hash), #[error("Object requested {} already requested", _0)] ObjectAlreadyRequested(ObjectRequest), #[error("Invalid object response for request, received hash: {}", _0)] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 74a9fe47..4cf38204 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -805,7 +805,7 @@ impl P2pServer { } if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)); + self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)).await?; } }, Packet::BlockPropagation(packet_wrapper) => { @@ -900,7 +900,7 @@ impl P2pServer { if !contains { // retrieve one by one to prevent acquiring the lock for nothing debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); - let (response, listener) = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())) { + let (response, listener) = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())).await { Ok(response) => match response.await { Ok(Ok(response)) => response, _ => { diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs index 16d84e94..f921dcf1 100644 --- a/xelis_daemon/src/p2p/queue.rs +++ b/xelis_daemon/src/p2p/queue.rs @@ -2,40 +2,41 @@ use std::sync::Arc; use log::{error, debug}; use tokio::sync::mpsc::{UnboundedSender, unbounded_channel}; use crate::core::{blockchain::Blockchain, storage::Storage}; -use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::SharedObjectTracker}; +use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::{SharedObjectTracker, WaiterResponse}, error::P2pError}; // TODO optimize to request the data but only handle in good order -// This allow to have a special queue for this and to not block/flood the other queue +// This allow to not wait for the data to be fetched to request the next one pub struct QueuedFetcher { - sender: UnboundedSender<(Arc, ObjectRequest)> + sender: UnboundedSender, + tracker: SharedObjectTracker } impl QueuedFetcher { pub fn new(blockchain: Arc>, tracker: SharedObjectTracker) -> Self { let (sender, mut receiver) = unbounded_channel(); let fetcher = Self { - sender + sender, + tracker }; tokio::spawn(async move { - while let Some((peer, request)) = receiver.recv().await { - match tracker.fetch_object_from_peer(peer.clone(), request).await { - Ok((response, listener)) => { + while let Some(waiter) = receiver.recv().await { + match waiter.await { + Ok(Ok((response, listener))) => { if let OwnedObjectResponse::Transaction(tx, hash) = response { - debug!("Adding {} to mempool from {}", hash, peer); + debug!("Adding {} to mempool from queued fetcher", hash); if let Err(e) = blockchain.add_tx_to_mempool(tx, true).await { error!("Error while adding tx {} to mempool: {}", hash, e); - peer.increment_fail_count(); } } else { error!("Received non tx object from peer"); - peer.increment_fail_count(); } listener.notify(); }, Err(e) => { error!("Error while fetching object from peer: {}", e); - } + }, + Ok(Err(e)) => error!("Error while fetching object from peer: {}", e) }; } }); @@ -43,9 +44,11 @@ impl QueuedFetcher { fetcher } - pub fn fetch(&self, peer: Arc, request: ObjectRequest) { - if let Err(e) = self.sender.send((peer, request)) { - error!("Error while sending get_data to fetcher: {}", e); + pub async fn fetch(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { + let receiver = self.tracker.request_object_from_peer(peer, request).await?; + if let Err(e) = self.sender.send(receiver) { + error!("Error while sending object fetcher response: {}", e); } + Ok(()) } } \ No newline at end of file diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 1edb04e5..d010321d 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -47,6 +47,10 @@ impl Request { } } + pub fn get_object(&self) -> &ObjectRequest { + &self.request + } + pub fn get_hash(&self) -> &Hash { self.request.get_hash() } @@ -75,7 +79,7 @@ pub struct ObjectTracker { } enum Message { - Request(Arc, ObjectRequest, oneshot::Sender>), + Request(Arc, Hash, oneshot::Sender>), Exit } @@ -167,34 +171,36 @@ impl ObjectTracker { Ok(()) } - pub fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { - let (sender, receiver) = oneshot::channel(); - self.request_sender.send(Message::Request(peer, request, sender))?; - Ok(receiver) - } - - pub async fn fetch_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result<(OwnedObjectResponse, Listener), P2pError> { - Ok(self.request_object_from_peer(peer, request)?.await??) - } - - async fn request_object_from_peer_internal(&self, peer: &Peer, request: ObjectRequest) -> Result<(), P2pError> { - debug!("Requesting {}", request); - let packet = Bytes::from(Packet::ObjectRequest(Cow::Borrowed(&request)).to_bytes()); - let hash = request.get_hash().clone(); - { + pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + let hash = { let mut queue = self.queue.write().await; if queue.contains_key(request.get_hash()) { return Err(P2pError::ObjectAlreadyRequested(request)) } - queue.insert(request.get_hash().clone(), Request::new(request)); - } + let hash = request.get_hash().clone(); + queue.insert(hash.clone(), Request::new(request)); + hash + }; + + let (sender, receiver) = oneshot::channel(); + self.request_sender.send(Message::Request(peer, hash, sender))?; + Ok(receiver) + } + + async fn request_object_from_peer_internal(&self, peer: &Peer, request_hash: Hash) -> Result<(), P2pError> { + debug!("Requesting object with hash {}", request_hash); + let packet = { + let queue = self.queue.write().await; + let request = queue.get(&request_hash).ok_or_else(|| P2pError::ObjectHashNotPresentInQueue(request_hash.clone()))?; + Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()) + }; // send the packet to the Peer if let Err(e) = peer.send_bytes(packet).await { error!("Error while sending object request to peer: {}", e); let mut queue = self.queue.write().await; - queue.remove(&hash); + queue.remove(&request_hash); return Err(e); } From 9ef12c5d4507cc57bcc32221b116274f3c77efa3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 22:32:23 +0200 Subject: [PATCH 114/160] daemon: missing .await --- xelis_daemon/src/p2p/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 4cf38204..40d3481c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1203,7 +1203,7 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())); + self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; } } } From 16d7ba0d6c1756e6fac6b44084cf0d836e0d9562 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 23:06:09 +0200 Subject: [PATCH 115/160] daemon: sync operation for tx request --- xelis_daemon/src/p2p/mod.rs | 16 +++++++++------- xelis_daemon/src/p2p/queue.rs | 8 ++++++-- xelis_daemon/src/p2p/tracker.rs | 8 +++----- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 40d3481c..0e142d60 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -789,12 +789,18 @@ impl P2pServer { { let mut txs_cache = peer.get_txs_cache().lock().await; if txs_cache.contains(&hash) { - warn!("{} send us a transaction ({}) already tracked by him", peer, hash); - return Err(P2pError::AlreadyTrackedTx(hash)) + debug!("{} send us a transaction ({}) already tracked by him", peer, hash); + // TODO fix common peers detection + return Ok(()) // Err(P2pError::AlreadyTrackedTx(hash)) } txs_cache.put(hash.clone(), ()); } + // Check that the tx is not in mempool or on disk already + if !self.blockchain.has_tx(&hash).await? { + self.queued_fetcher.fetch_if_not_requested(Arc::clone(peer), ObjectRequest::Transaction(hash.clone())).await?; + } + // Avoid sending the TX propagated to a common peer // because we track peerlist of each peers, we can try to determinate it // iterate over all common peers of this peer broadcaster @@ -803,10 +809,6 @@ impl P2pServer { let mut txs_cache = common_peer.get_txs_cache().lock().await; txs_cache.put(hash.clone(), ()); } - - if !self.blockchain.has_tx(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash)).await?; - } }, Packet::BlockPropagation(packet_wrapper) => { trace!("Received a block propagation packet from {}", peer); @@ -1203,7 +1205,7 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.queued_fetcher.fetch(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; + self.queued_fetcher.fetch_if_not_requested(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; } } } diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs index f921dcf1..c6cdbc09 100644 --- a/xelis_daemon/src/p2p/queue.rs +++ b/xelis_daemon/src/p2p/queue.rs @@ -44,8 +44,12 @@ impl QueuedFetcher { fetcher } - pub async fn fetch(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { - let receiver = self.tracker.request_object_from_peer(peer, request).await?; + pub async fn fetch_if_not_requested(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { + let receiver = match self.tracker.request_object_from_peer(peer, request).await { + Err(P2pError::ObjectAlreadyRequested(_)) => return Ok(()), + Err(e) => return Err(e), + Ok(r) => r + }; if let Err(e) = self.sender.send(receiver) { error!("Error while sending object fetcher response: {}", e); } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index d010321d..c051a1c5 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -174,12 +174,10 @@ impl ObjectTracker { pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { let hash = { let mut queue = self.queue.write().await; - if queue.contains_key(request.get_hash()) { - return Err(P2pError::ObjectAlreadyRequested(request)) - } - let hash = request.get_hash().clone(); - queue.insert(hash.clone(), Request::new(request)); + if let Some(old) = queue.insert(hash.clone(), Request::new(request)) { + return Err(P2pError::ObjectAlreadyRequested(old.request)) + } hash }; From 39fcc3142869bcd8f20d3512cd250c52c1cf1b54 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 23:10:05 +0200 Subject: [PATCH 116/160] daemon: notify miners about new txs --- xelis_daemon/src/core/blockchain.rs | 10 ++++++++++ xelis_daemon/src/rpc/getwork_server.rs | 25 +++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index bef9026f..343043b3 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1003,6 +1003,16 @@ impl Blockchain { // broadcast to websocket this tx if let Some(rpc) = self.rpc.lock().await.as_ref() { + // Notify miners if getwork is enabled + if let Some(getwork) = rpc.getwork_server() { + let getwork = getwork.clone(); + tokio::spawn(async move { + if let Err(e) = getwork.notify_new_job_rate_limited().await { + debug!("Error while notifying miners for new tx: {}", e); + }; + }); + } + if rpc.is_event_tracked(&NotifyEvent::TransactionAddedInMempool).await { let rpc = rpc.clone(); tokio::spawn(async move { diff --git a/xelis_daemon/src/rpc/getwork_server.rs b/xelis_daemon/src/rpc/getwork_server.rs index 4ec98653..ecada291 100644 --- a/xelis_daemon/src/rpc/getwork_server.rs +++ b/xelis_daemon/src/rpc/getwork_server.rs @@ -152,7 +152,10 @@ pub struct GetWorkServer { // we can keep them in cache up to STABLE_LIMIT blocks // so even a late miner have a chance to not be orphaned and be included in chain mining_jobs: Mutex>, - last_header_hash: Mutex> + last_header_hash: Mutex>, + // used only when a new TX is received in mempool + last_notify: Mutex, + notify_rate_limit_ms: u128 } impl GetWorkServer { @@ -161,7 +164,9 @@ impl GetWorkServer { miners: Mutex::new(HashMap::new()), blockchain, mining_jobs: Mutex::new(LruCache::new(STABLE_LIMIT as usize)), - last_header_hash: Mutex::new(None) + last_header_hash: Mutex::new(None), + last_notify: Mutex::new(0), + notify_rate_limit_ms: 500 // maximum one time every 500ms } } @@ -314,6 +319,22 @@ impl GetWorkServer { Ok(()) } + // notify every miners connected to the getwork server + // each miner have his own task so nobody wait on other + pub async fn notify_new_job_rate_limited(&self) -> Result<(), InternalRpcError> { + { + let now = get_current_timestamp(); + let mut last_notify = self.last_notify.lock().await; + if now - *last_notify < self.notify_rate_limit_ms { + debug!("Rate limit reached, not notifying miners"); + return Ok(()); + } + *last_notify = now; + } + + self.notify_new_job().await + } + // notify every miners connected to the getwork server // each miner have his own task so nobody wait on other pub async fn notify_new_job(&self) -> Result<(), InternalRpcError> { From afc9d5a97d0bba691d5d5b9f7239eec6d5488ce7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Sun, 22 Oct 2023 23:20:01 +0200 Subject: [PATCH 117/160] daemon: reduce message level --- xelis_daemon/src/p2p/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 0e142d60..4c2fb732 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -828,7 +828,7 @@ impl P2pServer { { let mut blocks_propagation = peer.get_blocks_propagation().lock().await; if blocks_propagation.contains(&block_hash) { - warn!("{} send us a block ({}) already tracked by him", peer, block_hash); + debug!("{} send us a block ({}) already tracked by him", peer, block_hash); return Err(P2pError::AlreadyTrackedBlock(block_hash)) } debug!("Saving {} in blocks propagation cache for {}", block_hash, peer); From a2bbacfdea229135c51065c8eef01a5374495250 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 00:07:34 +0200 Subject: [PATCH 118/160] daemon: queue fetcher bounded channel --- xelis_daemon/src/p2p/queue.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs index c6cdbc09..8e6be56e 100644 --- a/xelis_daemon/src/p2p/queue.rs +++ b/xelis_daemon/src/p2p/queue.rs @@ -1,19 +1,19 @@ use std::sync::Arc; use log::{error, debug}; -use tokio::sync::mpsc::{UnboundedSender, unbounded_channel}; +use tokio::sync::mpsc::{Sender, channel}; use crate::core::{blockchain::Blockchain, storage::Storage}; use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::{SharedObjectTracker, WaiterResponse}, error::P2pError}; // TODO optimize to request the data but only handle in good order // This allow to not wait for the data to be fetched to request the next one pub struct QueuedFetcher { - sender: UnboundedSender, + sender: Sender, tracker: SharedObjectTracker } impl QueuedFetcher { pub fn new(blockchain: Arc>, tracker: SharedObjectTracker) -> Self { - let (sender, mut receiver) = unbounded_channel(); + let (sender, mut receiver) = channel(128); let fetcher = Self { sender, tracker @@ -50,7 +50,7 @@ impl QueuedFetcher { Err(e) => return Err(e), Ok(r) => r }; - if let Err(e) = self.sender.send(receiver) { + if let Err(e) = self.sender.send(receiver).await { error!("Error while sending object fetcher response: {}", e); } Ok(()) From fc25a39dc1c44da8a6065b117f0f984ee4762fc7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 01:23:57 +0200 Subject: [PATCH 119/160] daemon: use has_tx function --- xelis_daemon/src/p2p/mod.rs | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 4c2fb732..2328a424 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -871,33 +871,13 @@ impl P2pServer { let mut response_blockers: Vec = Vec::new(); for hash in header.get_txs_hashes() { let contains = { // we don't lock one time because we may wait on p2p response - // Check in mempool first - let mut found = { - let mempool = zelf.blockchain.get_mempool().read().await; - mempool.contains_tx(hash) - }; - // Check in ObjectTracker - if !found { - if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { - response_blockers.push(response_blocker); - found = true; - } - } - - // Check on chain directly - if !found { - let storage = zelf.blockchain.get_storage().read().await; - found = match storage.has_transaction(hash).await { - Ok(contains) => contains, - Err(e) => { - warn!("Error while checking if we have tx {} in storage: {}", hash, e); - false - } - }; + if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { + response_blockers.push(response_blocker); + true + } else { + zelf.blockchain.has_tx(hash).await.unwrap_or(false) } - - found }; if !contains { // retrieve one by one to prevent acquiring the lock for nothing From 662b8677c9db95aab3c6eead2a4ec17aead4f790 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 12:14:18 +0200 Subject: [PATCH 120/160] daemon: fix rewind because of client protocol --- xelis_daemon/src/core/storage/sled.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index 951bdc81..f84d11aa 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -1163,7 +1163,9 @@ impl Storage for SledStorage { 'main: loop { // check if the next block is alone at its height, if yes stop rewinding if done >= count || height == 0 { // prevent removing genesis block + trace!("Done: {done}, count: {count}, height: {height}"); let tmp_blocks_at_height = self.get_blocks_at_height(height).await?; + trace!("tmp_blocks_at_height: {}", tmp_blocks_at_height.len()); if tmp_blocks_at_height.len() == 1 { for unique in tmp_blocks_at_height { if self.is_block_topological_ordered(&unique).await { @@ -1223,7 +1225,11 @@ impl Storage for SledStorage { } trace!("Deleting TX {} in block {}", tx_hash, hash); - self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; + // We have to check first as we may have already deleted it because of client protocol + // which allow multiple time the same txs in differents blocks + if self.contains_data(&self.transactions, &self.transactions_cache, tx_hash).await? { + self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; + } txs.push((tx_hash.clone(), tx)); } From b14e695cd067b04451623a63e317db4e90bdfec2 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 14:53:03 +0200 Subject: [PATCH 121/160] daemon: simplify rewind system --- xelis_daemon/src/core/blockchain.rs | 9 +- xelis_daemon/src/core/storage/mod.rs | 5 +- xelis_daemon/src/core/storage/sled.rs | 233 ++++++++++---------------- xelis_daemon/src/p2p/mod.rs | 33 ++-- xelis_daemon/src/p2p/peer.rs | 2 +- 5 files changed, 116 insertions(+), 166 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 343043b3..569ea1f4 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -403,7 +403,7 @@ impl Blockchain { for topoheight in last_pruned_topoheight..located_sync_topoheight { trace!("Pruning block at topoheight {}", topoheight); // delete block - let block_header = storage.delete_block_at_topoheight(topoheight).await?; + let _ = storage.delete_block_at_topoheight(topoheight).await?; // delete balances for all assets for asset in &assets { @@ -412,13 +412,6 @@ impl Blockchain { // delete nonces versions storage.delete_versioned_nonces_at_topoheight(topoheight).await?; - - // delete transactions for this block - for tx_hash in block_header.get_txs_hashes() { - if storage.has_transaction(tx_hash).await? { - storage.delete_tx(tx_hash).await?; - } - } } storage.set_pruned_topoheight(located_sync_topoheight)?; Ok(located_sync_topoheight) diff --git a/xelis_daemon/src/core/storage/mod.rs b/xelis_daemon/src/core/storage/mod.rs index 87daa95b..f8c306da 100644 --- a/xelis_daemon/src/core/storage/mod.rs +++ b/xelis_daemon/src/core/storage/mod.rs @@ -33,7 +33,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { fn set_pruned_topoheight(&mut self, pruned_topoheight: u64) -> Result<(), BlockchainError>; // delete block at topoheight, and all pointers (hash_at_topo, topo_by_hash, reward, supply, diff, cumulative diff...) - async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError>; + async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result<(Hash, Arc, Vec<(Hash, Arc)>), BlockchainError>; async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError>; // delete versioned balances at a specific topoheight async fn delete_versioned_balances_for_asset_at_topoheight(&mut self, asset: &Hash, topoheight: u64) -> Result<(), BlockchainError>; @@ -108,6 +108,7 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn has_transaction(&self, hash: &Hash) -> Result; async fn add_new_block(&mut self, block: Arc, txs: &Vec>, difficulty: Difficulty, hash: Hash) -> Result<(), BlockchainError>; + // Count is the number of blocks (topoheight) to rewind async fn pop_blocks(&mut self, mut height: u64, mut topoheight: u64, count: u64) -> Result<(u64, u64, Vec<(Hash, Arc)>), BlockchainError>; fn has_blocks(&self) -> bool; fn count_blocks(&self) -> usize; @@ -120,8 +121,10 @@ pub trait Storage: DifficultyProvider + Sync + Send + 'static { async fn get_top_block(&self) -> Result; async fn get_top_block_header(&self) -> Result<(Arc, Hash), BlockchainError>; + async fn set_blocks_at_height(&self, tips: Tips, height: u64) -> Result<(), BlockchainError>; async fn get_blocks_at_height(&self, height: u64) -> Result; async fn add_block_hash_at_height(&mut self, hash: Hash, height: u64) -> Result<(), BlockchainError>; + async fn remove_block_hash_at_height(&self, hash: &Hash, height: u64) -> Result<(), BlockchainError>; async fn get_topo_height_for_hash(&self, hash: &Hash) -> Result; async fn set_topo_height_for_block(&mut self, hash: &Hash, topoheight: u64) -> Result<(), BlockchainError>; diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index f84d11aa..c9ca2122 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -399,40 +399,69 @@ impl Storage for SledStorage { } // Delete the whole block using its topoheight - async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result, BlockchainError> { + async fn delete_block_at_topoheight(&mut self, topoheight: u64) -> Result<(Hash, Arc, Vec<(Hash, Arc)>), BlockchainError> { + trace!("Delete block at topoheight {topoheight}"); + // delete topoheight<->hash pointers let hash = self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topoheight).await?; + trace!("Hash is {hash} at topo {topoheight}"); + self.delete_cacheable_data::(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; - let topoheight_bytes = topoheight.to_be_bytes(); - // delete block reward - self.rewards.remove(topoheight_bytes)?; - // delete supply - self.supply.remove(topoheight_bytes)?; - // delete difficulty - self.difficulty.remove(hash.as_bytes())?; + trace!("deleting block header {}", hash); + let block = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; + trace!("block header deleted successfully"); + + trace!("Deleting supply and block reward"); + let supply: u64 = self.delete_cacheable_data(&self.supply, &None, &topoheight).await?; + trace!("Supply was {}", supply); + + let reward: u64 = self.delete_cacheable_data(&self.rewards, &None, &topoheight).await?; + trace!("Reward for block {} was: {}", hash, reward); + + trace!("Deleting difficulty"); + let _: Difficulty = self.delete_cacheable_data(&self.difficulty, &None, &hash).await?; + + trace!("Deleting cumulative difficulty"); + let cumulative_difficulty: Difficulty = self.delete_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; + trace!("Cumulative difficulty deleted: {}", cumulative_difficulty); + + let mut txs = Vec::new(); + for tx_hash in block.get_transactions() { + if self.has_tx_blocks(tx_hash)? { + let mut blocks: Tips = self.delete_cacheable_data(&self.tx_blocks, &None, tx_hash).await?; + let blocks_len = blocks.len(); + blocks.remove(&hash); + self.set_blocks_for_tx(tx_hash, &blocks)?; + trace!("Tx was included in {}, blocks left: {}", blocks_len, blocks.into_iter().map(|b| b.to_string()).collect::>().join(", ")); + } - // delete cummulative difficulty - self.cumulative_difficulty.remove(hash.as_bytes())?; + if self.is_tx_executed_in_a_block(tx_hash)? { + trace!("Tx {} was executed, deleting", tx_hash); + self.remove_tx_executed(&tx_hash)?; + } + + // We have to check first as we may have already deleted it because of client protocol + // which allow multiple time the same txs in differents blocks + if self.contains_data(&self.transactions, &self.transactions_cache, tx_hash).await? { + trace!("Deleting TX {} in block {}", tx_hash, hash); + let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; + txs.push((tx_hash.clone(), tx)); + } + } - // delete block header - let block_header = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; // remove the block hash from the set, and delete the set if empty - let mut blocks = self.get_blocks_at_height(block_header.get_height()).await?; - blocks.remove(&hash); - let height_bytes = block_header.get_height().to_be_bytes(); - if blocks.is_empty() { - self.blocks_at_height.remove(height_bytes)?; - } else { - self.blocks_at_height.insert(height_bytes, blocks.to_bytes())?; + if self.has_blocks_at_height(block.get_height()).await? { + self.remove_block_hash_at_height(&hash, block.get_height()).await?; } + // Delete cache of past blocks if let Some(cache) = &self.past_blocks_cache { let mut cache = cache.lock().await; cache.pop(&hash); } - Ok(block_header) + Ok((hash, block, txs)) } async fn delete_tx(&mut self, hash: &Hash) -> Result, BlockchainError> { @@ -1128,26 +1157,8 @@ impl Storage for SledStorage { // search the lowest topo height available based on count + 1 // (last lowest topo height accepted) - let mut lowest_topo = topoheight; - let mut lowest_height = height; - trace!("search lowest topo height available, height = {}, count = {}", height, count); - for i in (height-count..=height).rev() { - trace!("checking lowest topoheight for blocks at {}", i); - if self.has_blocks_at_height(i).await? { - for hash in self.get_blocks_at_height(i).await? { - if self.is_block_topological_ordered(&hash).await { - let topo = self.get_topo_height_for_hash(&hash).await?; - if topo < lowest_topo { - lowest_topo = topo; - } - } - } - lowest_height = i; - } else { - warn!("No blocks found at {}, how ?", i); - } - } - trace!("Lowest topoheight for rewind: {}, height: {}", lowest_topo, lowest_height); + let mut lowest_topo = topoheight - count; + trace!("Lowest topoheight for rewind: {}", lowest_topo); let pruned_topoheight = self.get_pruned_topoheight()?.unwrap_or(0); if lowest_topo < pruned_topoheight { @@ -1161,115 +1172,34 @@ impl Storage for SledStorage { let mut txs = Vec::new(); let mut done = 0; 'main: loop { - // check if the next block is alone at its height, if yes stop rewinding - if done >= count || height == 0 { // prevent removing genesis block - trace!("Done: {done}, count: {count}, height: {height}"); - let tmp_blocks_at_height = self.get_blocks_at_height(height).await?; - trace!("tmp_blocks_at_height: {}", tmp_blocks_at_height.len()); - if tmp_blocks_at_height.len() == 1 { - for unique in tmp_blocks_at_height { - if self.is_block_topological_ordered(&unique).await { - topoheight = self.get_topo_height_for_hash(&unique).await?; - if topoheight <= lowest_topo { - trace!("Unique block at height {} and topoheight {} found!", height, topoheight); - break 'main; - } - } - } - } + // stop rewinding if its genesis block or if we reached the lowest topo + if topoheight <= lowest_topo || topoheight == 0 || height == 0 { // prevent removing genesis block + trace!("Done: {done}, count: {count}, height: {height}, topoheight: {topoheight}"); + break 'main; } - // get all blocks at same height, and delete current block hash from the list - trace!("Searching blocks at height {}", height); - let blocks_at_height: Tips = self.delete_cacheable_data(&self.blocks_at_height, &None, &height).await?; - trace!("Blocks at height {}: {}", height, blocks_at_height.len()); - - for hash in blocks_at_height { - trace!("deleting block header {}", hash); - let block = self.delete_data(&self.blocks, &self.blocks_cache, &hash).await?; - trace!("block header deleted successfully"); - - let block_topoheight = if self.is_block_topological_ordered(&hash).await { - let topoheight = self.get_topo_height_for_hash(&hash).await?; - trace!("Deleting supply and block reward"); - let supply: u64 = self.delete_cacheable_data(&self.supply, &None, &topoheight).await?; - trace!("Supply was {}", supply); - - let reward: u64 = self.delete_cacheable_data(&self.rewards, &None, &topoheight).await?; - trace!("Reward for block {} was: {}", hash, reward); - Some(topoheight) - } else { - None - }; - - trace!("Deleting difficulty"); - let _: Difficulty = self.delete_cacheable_data(&self.difficulty, &None, &hash).await?; - - trace!("Deleting cumulative difficulty"); - let cumulative_difficulty: Difficulty = self.delete_cacheable_data(&self.cumulative_difficulty, &self.cumulative_difficulty_cache, &hash).await?; - trace!("Cumulative difficulty deleted: {}", cumulative_difficulty); - - for tx_hash in block.get_transactions() { - let tx: Arc = self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; - if self.has_tx_blocks(tx_hash)? { - let mut blocks: Tips = self.delete_cacheable_data(&self.tx_blocks, &None, tx_hash).await?; - let blocks_len = blocks.len(); - blocks.remove(&hash); - self.set_blocks_for_tx(tx_hash, &blocks)?; - trace!("Tx was included in {}, blocks left: {}", blocks_len, blocks.into_iter().map(|b| b.to_string()).collect::>().join(", ")); - } - - if self.is_tx_executed_in_a_block(tx_hash)? { - trace!("Tx {} was executed, deleting", tx_hash); - self.remove_tx_executed(&tx_hash)?; - } - - trace!("Deleting TX {} in block {}", tx_hash, hash); - // We have to check first as we may have already deleted it because of client protocol - // which allow multiple time the same txs in differents blocks - if self.contains_data(&self.transactions, &self.transactions_cache, tx_hash).await? { - self.delete_data(&self.transactions, &self.transactions_cache, tx_hash).await?; - } - - txs.push((tx_hash.clone(), tx)); - } - - // if block is ordered, delete data that are linked to it - if let Some(topo) = block_topoheight { - if topo < topoheight { - topoheight = topo; - } + // Delete the hash at topoheight + let (hash, block, block_txs) = self.delete_block_at_topoheight(topoheight).await?; + txs.extend(block_txs); - trace!("Block was at topoheight {}", topo); - self.delete_cacheable_data(&self.topo_by_hash, &self.topo_by_hash_cache, &hash).await?; - - if let Ok(hash_at_topo) = self.get_hash_at_topo_height(topo).await { - if hash_at_topo == hash { - trace!("Deleting hash '{}' at topo height '{}'", hash_at_topo, topo); - self.delete_cacheable_data(&self.hash_at_topo, &self.hash_at_topo_cache, &topo).await?; - } - } - } - - // generate new tips - trace!("Removing {} from {} tips", hash, tips.len()); - tips.remove(&hash); - trace!("Tips: {}", tips.iter().map(|b| b.to_string()).collect::>().join(", ")); - - for hash in block.get_tips() { - trace!("Adding {} to {} tips", hash, tips.len()); - tips.insert(hash.clone()); - } + // generate new tips + trace!("Removing {} from {} tips", hash, tips.len()); + tips.remove(&hash); + + for hash in block.get_tips() { + trace!("Adding {} to {} tips", hash, tips.len()); + tips.insert(hash.clone()); } + topoheight -= 1; // height of old block become new height - height -= 1; + if block.get_height() < height { + height = block.get_height(); + } done += 1; } - debug!("Blocks processed {}, new topoheight: {}, tips: {}", done, topoheight, tips.len()); - for hash in &tips { - trace!("tip {} at height {}", hash, self.get_height_for_block_hash(&hash).await?); - } + + debug!("Blocks processed {}, new topoheight: {}, new height: {}, tips: {}", done, topoheight, height, tips.len()); // clean all assets let mut deleted_assets = HashSet::new(); @@ -1471,6 +1401,12 @@ impl Storage for SledStorage { Ok(self.blocks_at_height.contains_key(&height.to_be_bytes())?) } + async fn set_blocks_at_height(&self, tips: Tips, height: u64) -> Result<(), BlockchainError> { + trace!("set {} blocks at height {}", tips.len(), height); + self.blocks_at_height.insert(height.to_be_bytes(), tips.to_bytes())?; + Ok(()) + } + // returns all blocks hash at specified height async fn get_blocks_at_height(&self, height: u64) -> Result { trace!("get blocks at height {}", height); @@ -1489,8 +1425,21 @@ impl Storage for SledStorage { }; tips.insert(hash); + self.set_blocks_at_height(tips, height).await + } + + async fn remove_block_hash_at_height(&self, hash: &Hash, height: u64) -> Result<(), BlockchainError> { + trace!("remove block {} at height {}", hash, height); + let mut tips = self.get_blocks_at_height(height).await?; + tips.remove(hash); + + // Delete the height if there is no blocks present anymore + if tips.is_empty() { + self.blocks_at_height.remove(&height.to_be_bytes())?; + } else { + self.set_blocks_at_height(tips, height).await?; + } - self.blocks_at_height.insert(height.to_be_bytes(), tips.to_bytes())?; Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 2328a424..907eb5f5 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -745,6 +745,12 @@ impl P2pServer { } // Returns the list of all common peers we have between Peer and us + // TODO fix common peers detection + // Problem is: + // We are connected to node A and node B, we know that they are connected each other + // But they may not already shared their peerlist about us so they don't know we are + // a common peer between them two, which result in false positive in our case and they send + // us both the same object async fn get_common_peers_for(&self, peer: &Arc) -> Vec> { let peer_list = self.peer_list.read().await; let peer_peers = peer.get_peers(false).lock().await; @@ -790,8 +796,7 @@ impl P2pServer { let mut txs_cache = peer.get_txs_cache().lock().await; if txs_cache.contains(&hash) { debug!("{} send us a transaction ({}) already tracked by him", peer, hash); - // TODO fix common peers detection - return Ok(()) // Err(P2pError::AlreadyTrackedTx(hash)) + return Err(P2pError::AlreadyTrackedTx(hash)) } txs_cache.put(hash.clone(), ()); } @@ -995,19 +1000,19 @@ impl P2pServer { debug!("{} found a common point with block {} at {} for sync, received {} blocks", peer, common_point.get_hash(), common_point.get_topoheight(), response_size); let pop_count = { let storage = self.blockchain.get_storage().read().await; - let block_height = match storage.get_height_for_block_hash(common_point.get_hash()).await { - Ok(height) => height, - Err(e) => { - warn!("{} sent us an invalid common point: {}", peer, e); - return Err(P2pError::InvalidCommonPoint(common_point.get_topoheight())) - } - }; let topoheight = storage.get_topo_height_for_hash(common_point.get_hash()).await?; if topoheight != common_point.get_topoheight() { - error!("{} sent us a valid block hash, but at invalid topoheight (expected: {}, got: {})!", peer, block_height, common_point.get_topoheight()); - return Err(P2pError::InvalidPacket) + error!("{} sent us a valid block hash, but at invalid topoheight (expected: {}, got: {})!", peer, topoheight, common_point.get_topoheight()); + return Err(P2pError::InvalidCommonPoint(common_point.get_topoheight())) + } + + let block_height = storage.get_height_for_block_hash(common_point.get_hash()).await?; + // We are under the stable height, rewind is necessary + if block_height <= self.blockchain.get_stable_height() { + self.blockchain.get_topo_height() - topoheight + } else { + 0 } - self.blockchain.get_height() - block_height }; let peer = Arc::clone(peer); @@ -1086,7 +1091,7 @@ impl P2pServer { } }, ObjectRequest::Transaction(hash) => { - let on_disk = { + let search_on_disk = { let mempool = self.blockchain.get_mempool().read().await; if let Ok(tx) = mempool.view_tx(hash) { peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(tx)))).await?; @@ -1097,7 +1102,7 @@ impl P2pServer { } }; - if on_disk { + if search_on_disk { debug!("Looking on disk for transaction {}", hash); let storage = self.blockchain.get_storage().read().await; if storage.has_transaction(hash).await? { diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 89afb38f..76e2ee3b 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -280,7 +280,7 @@ impl Peer { } pub async fn request_boostrap_chain(&self, step: StepRequest<'_>) -> Result { - debug!("Requesting bootstrap chain step: {:?}", step); + debug!("Requesting bootstrap chain step: {:?}", step.kind()); let step_kind = step.kind(); let (sender, receiver) = tokio::sync::oneshot::channel(); { From 9ca44f7dd20fd95196c990da88ef0d0961183a13 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 15:56:26 +0200 Subject: [PATCH 122/160] daemon: add trace log for sync block --- xelis_daemon/src/core/blockchain.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 569ea1f4..dc4c0b28 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -486,6 +486,7 @@ impl Blockchain { } async fn is_sync_block_at_height(&self, storage: &S, hash: &Hash, height: u64) -> Result { + trace!("is sync block {} at height {}", hash, height); let block_height = storage.get_height_for_block_hash(hash).await?; if block_height == 0 { // genesis block is a sync block return Ok(true) From 54134afff65e8eb25b2466e5b1cf2e632186b467 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 16:26:06 +0200 Subject: [PATCH 123/160] daemon: save nonces before requesting balances --- xelis_daemon/src/p2p/mod.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 907eb5f5..fea62778 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1726,7 +1726,7 @@ impl P2pServer { // we add 1 for the genesis block added below while i < topoheight && topoheight - i >= pruned_topoheight && blocks.len() + 1 < CHAIN_SYNC_REQUEST_MAX_BLOCKS { - trace!("Requesting hash at topo {} for ChainInfo", topoheight - i); + trace!("Requesting hash at topo {} for building list of blocks id", topoheight - i); let hash = storage.get_hash_at_topo_height(topoheight - i).await?; blocks.push(BlockId::new(hash, topoheight - i)); match blocks.len() { @@ -1836,12 +1836,19 @@ impl P2pServer { }, // fetch all new accounts StepResponse::Keys(keys, next_page) => { + debug!("Requesting nonces for keys"); let StepResponse::Nonces(nonces) = peer.request_boostrap_chain(StepRequest::Nonces(stable_topoheight, Cow::Borrowed(&keys))).await? else { // shouldn't happen error!("Received an invalid StepResponse (how ?) while fetching nonces"); return Err(P2pError::InvalidPacket.into()) }; + // save all nonces + for (key, nonce) in keys.iter().zip(nonces) { + debug!("Saving nonce {} for {}", nonce, key); + storage.set_nonce_at_topoheight(key, nonce, stable_topoheight).await?; + } + // TODO don't retrieve ALL each time but one by one // otherwise in really long time, it may consume lot of memory for asset in storage.get_assets().await? { @@ -1865,12 +1872,6 @@ impl P2pServer { } } - // save all nonces - for (key, nonce) in keys.into_iter().zip(nonces) { - debug!("Saving nonce {} for {}", nonce, key); - storage.set_nonce_at_topoheight(&key, nonce, stable_topoheight).await?; - } - if next_page.is_some() { Some(StepRequest::Keys(our_topoheight, stable_topoheight, next_page)) } else { From 54bd29aa0e93b042a8cd54d5d40f4faa81217123 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 16:32:20 +0200 Subject: [PATCH 124/160] daemon: fix potential deadlock --- xelis_daemon/src/p2p/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index fea62778..51e81c86 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1186,8 +1186,8 @@ impl P2pServer { } } - let mempool = self.blockchain.get_mempool().read().await; let storage = self.blockchain.get_storage().read().await; + let mempool = self.blockchain.get_mempool().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { self.queued_fetcher.fetch_if_not_requested(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; From 270642a664c741d7c0a216abbafb6145ad1d1466 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 16:50:02 +0200 Subject: [PATCH 125/160] daemon: mempool fix --- xelis_daemon/src/core/mempool.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/core/mempool.rs b/xelis_daemon/src/core/mempool.rs index 9faf9594..ad0f8eb0 100644 --- a/xelis_daemon/src/core/mempool.rs +++ b/xelis_daemon/src/core/mempool.rs @@ -248,11 +248,17 @@ impl NonceCache { } pub fn has_tx_with_same_nonce(&self, nonce: u64) -> Option<&Arc> { - if nonce < self.min || nonce > self.max { + if nonce < self.min || nonce > self.max || self.txs.is_empty() { return None; } - let index = ((nonce - self.min) % (self.max - self.min)) as usize; + trace!("has tx with same nonce: {}, max: {}, min: {}, size: {}", nonce, self.max, self.min, self.txs.len()); + let mut r = self.max - self.min; + if r == 0 { + r = self.txs.len() as u64; + } + + let index = ((nonce - self.min) % r) as usize; self.txs.get_index(index) } } \ No newline at end of file From eeb8d1391554d514a48c46d9d24958b903a091e0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 17:51:52 +0200 Subject: [PATCH 126/160] daemon: attempt to fix broken versioned nonce --- xelis_daemon/src/core/storage/sled.rs | 14 +++++++++++--- xelis_daemon/src/p2p/mod.rs | 5 ++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/xelis_daemon/src/core/storage/sled.rs b/xelis_daemon/src/core/storage/sled.rs index c9ca2122..c0e02b1d 100644 --- a/xelis_daemon/src/core/storage/sled.rs +++ b/xelis_daemon/src/core/storage/sled.rs @@ -1048,6 +1048,7 @@ impl Storage for SledStorage { self.load_from_disk(tree, &key.to_bytes()) } + // topoheight is inclusive bounds async fn get_nonce_at_maximum_topoheight(&self, key: &PublicKey, topoheight: u64) -> Result, BlockchainError> { trace!("get nonce at maximum topoheight {} for {}", topoheight, key); // check first that this address has nonce, if no returns None @@ -1058,7 +1059,7 @@ impl Storage for SledStorage { let (topo, mut version) = self.get_last_nonce(key).await?; trace!("Last version of nonce for {} is at topoheight {}", key, topo); // if it's the latest and its under the maximum topoheight - if topo < topoheight { + if topo <= topoheight { trace!("Last version nonce (valid) found at {} (maximum topoheight = {})", topo, topoheight); return Ok(Some((topo, version))) } @@ -1067,7 +1068,7 @@ impl Storage for SledStorage { while let Some(previous) = version.get_previous_topoheight() { let previous_version = self.get_nonce_at_exact_topoheight(key, previous).await?; trace!("previous nonce version is at {}", previous); - if previous < topoheight { + if previous <= topoheight { trace!("Highest version nonce found at {} (maximum topoheight = {})", previous, topoheight); return Ok(Some((previous, previous_version))) } @@ -1086,8 +1087,15 @@ impl Storage for SledStorage { async fn set_nonce_at_topoheight(&mut self, key: &PublicKey, nonce: u64, topoheight: u64) -> Result<(), BlockchainError> { trace!("set nonce to {} for {} at topo {}", nonce, key, topoheight); + let previous_topoheight = if topoheight > 0 { + self.get_nonce_at_maximum_topoheight(key, topoheight - 1).await?.map(|(topo, _)| topo) + } else { + None + }; + + let versioned = VersionedNonce::new(nonce, previous_topoheight); let tree = self.get_versioned_nonce_tree(topoheight).await?; - tree.insert(&key.as_bytes(), &nonce.to_be_bytes())?; + tree.insert(&key.as_bytes(), versioned.to_bytes())?; self.set_last_topoheight_for_nonce(key, topoheight)?; Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 51e81c86..b6d644a9 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -16,7 +16,6 @@ use xelis_common::{ block::{BlockHeader, Block}, utils::get_current_time, immutable::Immutable, - account::VersionedNonce, api::daemon::{NotifyEvent, PeerPeerDisconnectedEvent} }; use crate::{ @@ -1673,8 +1672,8 @@ impl P2pServer { StepRequest::Nonces(topoheight, keys) => { let mut nonces = Vec::with_capacity(keys.len()); for key in keys.iter() { - let (_, version) = storage.get_nonce_at_maximum_topoheight(key, topoheight).await?.unwrap_or_else(|| (0, VersionedNonce::new(0, None))); - nonces.push(version.get_nonce()); + let nonce = storage.get_nonce_at_maximum_topoheight(key, topoheight).await?.map(|(_, v)| v.get_nonce()).unwrap_or(0); + nonces.push(nonce); } StepResponse::Nonces(nonces) }, From 2f16c500473be5541e00c7bb1dec37d86e0e7642 Mon Sep 17 00:00:00 2001 From: Slixe Date: Mon, 23 Oct 2023 23:33:25 +0200 Subject: [PATCH 127/160] daemon: rework ObjectTracker, delete QueuedFetcher --- xelis_daemon/src/p2p/mod.rs | 45 ++------ xelis_daemon/src/p2p/queue.rs | 58 ---------- xelis_daemon/src/p2p/tracker.rs | 181 ++++++++++++++++++++++---------- 3 files changed, 133 insertions(+), 151 deletions(-) delete mode 100644 xelis_daemon/src/p2p/queue.rs diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index b6d644a9..eabd3f6c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -5,7 +5,6 @@ pub mod packet; pub mod peer_list; pub mod chain_validator; mod tracker; -mod queue; use indexmap::IndexSet; use lru::LruCache; @@ -55,7 +54,6 @@ use self::{ }, peer::Peer, tracker::{ObjectTracker, SharedObjectTracker}, - queue::QueuedFetcher, peer_list::{SharedPeerList, PeerList}, connection::{State, Connection}, error::P2pError @@ -103,7 +101,6 @@ pub struct P2pServer { verify_syncing_time_out: AtomicBool, // chain sync timeout check last_sync_request_sent: AtomicU64, // used to check if we are already syncing with one peer or not object_tracker: SharedObjectTracker, // used to requests objects to peers and avoid requesting the same object to multiple peers - queued_fetcher: QueuedFetcher, // used to requests all propagated txs in one task only is_running: AtomicBool, // used to check if the server is running or not in tasks blocks_propagation_queue: Mutex> // Synced cache to prevent concurrent tasks adding the block } @@ -121,8 +118,7 @@ impl P2pServer { // create mspc channel let (connections_sender, receiver) = mpsc::unbounded_channel(); - let object_tracker = ObjectTracker::new(); - let queued_fetcher = QueuedFetcher::new(Arc::clone(&blockchain), Arc::clone(&object_tracker)); + let object_tracker = ObjectTracker::new(blockchain.clone()); let server = Self { peer_id, @@ -136,7 +132,6 @@ impl P2pServer { verify_syncing_time_out: AtomicBool::new(false), last_sync_request_sent: AtomicU64::new(0), object_tracker, - queued_fetcher, is_running: AtomicBool::new(true), blocks_propagation_queue: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)) }; @@ -802,7 +797,7 @@ impl P2pServer { // Check that the tx is not in mempool or on disk already if !self.blockchain.has_tx(&hash).await? { - self.queued_fetcher.fetch_if_not_requested(Arc::clone(peer), ObjectRequest::Transaction(hash.clone())).await?; + self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone())).await?; } // Avoid sending the TX propagated to a common peer @@ -886,37 +881,15 @@ impl P2pServer { if !contains { // retrieve one by one to prevent acquiring the lock for nothing debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); - let (response, listener) = match zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())).await { - Ok(response) => match response.await { - Ok(Ok(response)) => response, - _ => { - error!("Error while handling response for TX {} from {}", hash, peer); - peer.increment_fail_count(); - return; - } - }, - Err(e) => { - error!("Error while requesting TX {} from {}: {}", hash, peer, e); + if let Err(e) = zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())).await { + error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); peer.increment_fail_count(); return; - } - }; - if let OwnedObjectResponse::Transaction(tx, _) = response { - if let Err(e) = zelf.blockchain.add_tx_to_mempool(tx, false).await { - if let BlockchainError::TxAlreadyInMempool(_) = e { - debug!("TX {} is already in mempool finally, another peer was faster", hash); - } else { - error!("Error while adding new requested tx to mempool: {}", e); - peer.increment_fail_count(); - } - } - } else { - error!("Invalid object response received from {}, expected {} got {}", peer, hash, response.get_hash()); - peer.increment_fail_count(); - return; } - // if listener is dropped before it is ok, receivers will stop listening - listener.notify(); + + if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { + response_blockers.push(response_blocker); + } } } @@ -1189,7 +1162,7 @@ impl P2pServer { let mempool = self.blockchain.get_mempool().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.queued_fetcher.fetch_if_not_requested(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; + self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; } } } diff --git a/xelis_daemon/src/p2p/queue.rs b/xelis_daemon/src/p2p/queue.rs deleted file mode 100644 index 8e6be56e..00000000 --- a/xelis_daemon/src/p2p/queue.rs +++ /dev/null @@ -1,58 +0,0 @@ -use std::sync::Arc; -use log::{error, debug}; -use tokio::sync::mpsc::{Sender, channel}; -use crate::core::{blockchain::Blockchain, storage::Storage}; -use super::{peer::Peer, packet::object::{ObjectRequest, OwnedObjectResponse}, tracker::{SharedObjectTracker, WaiterResponse}, error::P2pError}; - -// TODO optimize to request the data but only handle in good order -// This allow to not wait for the data to be fetched to request the next one -pub struct QueuedFetcher { - sender: Sender, - tracker: SharedObjectTracker -} - -impl QueuedFetcher { - pub fn new(blockchain: Arc>, tracker: SharedObjectTracker) -> Self { - let (sender, mut receiver) = channel(128); - let fetcher = Self { - sender, - tracker - }; - - tokio::spawn(async move { - while let Some(waiter) = receiver.recv().await { - match waiter.await { - Ok(Ok((response, listener))) => { - if let OwnedObjectResponse::Transaction(tx, hash) = response { - debug!("Adding {} to mempool from queued fetcher", hash); - if let Err(e) = blockchain.add_tx_to_mempool(tx, true).await { - error!("Error while adding tx {} to mempool: {}", hash, e); - } - } else { - error!("Received non tx object from peer"); - } - listener.notify(); - }, - Err(e) => { - error!("Error while fetching object from peer: {}", e); - }, - Ok(Err(e)) => error!("Error while fetching object from peer: {}", e) - }; - } - }); - - fetcher - } - - pub async fn fetch_if_not_requested(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { - let receiver = match self.tracker.request_object_from_peer(peer, request).await { - Err(P2pError::ObjectAlreadyRequested(_)) => return Ok(()), - Err(e) => return Err(e), - Ok(r) => r - }; - if let Err(e) = self.sender.send(receiver).await { - error!("Error while sending object fetcher response: {}", e); - } - Ok(()) - } -} \ No newline at end of file diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index c051a1c5..94077b8a 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -1,14 +1,14 @@ -use std::{borrow::Cow, collections::HashMap, time::Duration, sync::Arc}; +use std::{borrow::Cow, time::{Duration, Instant}, sync::Arc}; use bytes::Bytes; -use tokio::{sync::{mpsc::{UnboundedSender, UnboundedReceiver}, RwLock, oneshot}, time::timeout}; +use indexmap::IndexMap; +use tokio::sync::{mpsc::{UnboundedSender, UnboundedReceiver, Sender, Receiver}, RwLock}; use xelis_common::{crypto::hash::Hash, serializer::Serializer}; -use crate::config::PEER_TIMEOUT_REQUEST_OBJECT; +use crate::{core::{blockchain::Blockchain, storage::Storage}, config::PEER_TIMEOUT_REQUEST_OBJECT}; use log::{error, debug}; use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, error::P2pError, peer::Peer}; -pub type WaiterResponse = oneshot::Receiver>; pub type SharedObjectTracker = Arc; pub type ResponseBlocker = tokio::sync::broadcast::Receiver<()>; @@ -35,7 +35,9 @@ impl Listener { struct Request { request: ObjectRequest, - sender: Option> + sender: Option>, + response: Option, + requested_at: Option } @@ -43,7 +45,9 @@ impl Request { pub fn new(request: ObjectRequest) -> Self { Self { request, - sender: None + sender: None, + response: None, + requested_at: None } } @@ -51,6 +55,26 @@ impl Request { &self.request } + pub fn set_response(&mut self, response: OwnedObjectResponse) { + self.response = Some(response); + } + + pub fn has_response(&self) -> bool { + self.response.is_some() + } + + pub fn take_response(&mut self) -> Option { + self.response.take() + } + + pub fn set_requested(&mut self) { + self.requested_at = Some(Instant::now()); + } + + pub fn get_requested(&self) -> &Option { + &self.requested_at + } + pub fn get_hash(&self) -> &Hash { self.request.get_hash() } @@ -74,30 +98,37 @@ impl Request { // currently used to fetch in order all txs propagated by the network pub struct ObjectTracker { request_sender: UnboundedSender, - response_sender: UnboundedSender>, - queue: RwLock> + handler_sender: Sender, + queue: RwLock> } enum Message { - Request(Arc, Hash, oneshot::Sender>), + Request(Arc, Hash), Exit } impl ObjectTracker { - pub fn new() -> SharedObjectTracker { + pub fn new(blockchain: Arc>) -> SharedObjectTracker { let (request_sender, request_receiver) = tokio::sync::mpsc::unbounded_channel(); - let (response_sender, response_receiver) = tokio::sync::mpsc::unbounded_channel(); + let (handler_sender, handler_receiver) = tokio::sync::mpsc::channel(128); let zelf: Arc = Arc::new(Self { request_sender, - response_sender, - queue: RwLock::new(HashMap::new()) + handler_sender, + queue: RwLock::new(IndexMap::new()) }); { // start the loop let zelf = zelf.clone(); tokio::spawn(async move { - zelf.requester_loop(request_receiver, response_receiver).await; + zelf.requester_loop(request_receiver).await; + }); + } + + { + let zelf = zelf.clone(); + tokio::spawn(async move { + zelf.handler_loop(blockchain, handler_receiver).await; }); } @@ -111,25 +142,66 @@ impl ObjectTracker { } } - async fn requester_loop(&self, mut request_receiver: UnboundedReceiver, mut response_receiver: UnboundedReceiver>) { + + async fn handle_object_response_internal(&self, blockchain: &Arc>, response: OwnedObjectResponse) -> Result<(), P2pError> { + match response { + OwnedObjectResponse::Transaction(tx, hash) => { + blockchain.add_tx_with_hash_to_mempool(tx, hash, true).await?; + }, + _ => { + debug!("ObjectTracker received an invalid object response"); + } + } + Ok(()) + } + + async fn handler_loop(&self, blockchain: Arc>, mut handler_receiver: Receiver) { + debug!("Starting handler loop..."); + while let Some(response) = handler_receiver.recv().await { + let object = response.get_hash(); + let mut queue = self.queue.write().await; + if let Some(request) = queue.get_mut(object) { + request.set_response(response); + } + + 'inner: while !queue.is_empty() { + let handle = if let Some((_, request)) = queue.get_index(0) { + request.has_response() + } else { + false + }; + + if handle { + let (_, mut request) = queue.shift_remove_index(0).unwrap(); + let response = request.take_response().unwrap(); + if let Err(e) = self.handle_object_response_internal(&blockchain, response).await { + error!("Error while handling object response in ObjectTracker: {}", e); + } + request.to_listener().notify(); + } else { + // Maybe it timed out + if let Some((_, request)) = queue.get_index(0) { + if let Some(requested_at) = request.get_requested() { + if requested_at.elapsed() > Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT) { + let (_, request) = queue.shift_remove_index(0).unwrap(); + request.to_listener().notify(); + } + } + } + break 'inner; + } + } + } + } + + async fn requester_loop(&self, mut request_receiver: UnboundedReceiver) { debug!("Starting requester loop..."); while let Some(msg) = request_receiver.recv().await { match msg { - Message::Request(peer, request, sender) => { - if let Err(e) = self.request_object_from_peer_internal(&peer, request).await { - if sender.send(Err(e)).is_err() { - error!("Error while sending error response from ObjectTracker"); - } - } else { - let res: Result<(OwnedObjectResponse, Listener), P2pError> = timeout(Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT), response_receiver.recv()).await - .map_err(|e| P2pError::AsyncTimeOut(e)) - .and_then(|res| res.ok_or(P2pError::NoResponse)) - .and_then(|res| res); - - if sender.send(res).is_err() { - error!("Error while sending response from ObjectTracker"); - } - } + Message::Request(peer, object) => { + if let Err(e) = self.request_object_from_peer_internal(&peer, &object).await { + error!("Error while requesting object {} from {}: {}", object, peer, e); + }; }, Message::Exit => break } @@ -148,30 +220,28 @@ impl ObjectTracker { } pub async fn handle_object_response(&self, response: OwnedObjectResponse) -> Result<(), P2pError> { - let request = { - let mut queue = self.queue.write().await; - if let Some(request) = queue.remove(response.get_hash()) { - request + { + let queue = self.queue.read().await; + if let Some(request) = queue.get(response.get_hash()) { + if request.get_hash() != response.get_hash() { + debug!("Invalid object hash in ObjectTracker: expected {}, got {}", request.get_hash(), response.get_hash()); + return Err(P2pError::InvalidObjectHash(request.get_hash().clone(), response.get_hash().clone())); + } } else { let request = response.get_request(); debug!("Object not requested in ObjectTracker: {}", request); return Err(P2pError::ObjectNotRequested(request)); } - }; - - if request.get_hash() != response.get_hash() { - debug!("Invalid object hash in ObjectTracker: expected {}, got {}", request.get_hash(), response.get_hash()); - return Err(P2pError::InvalidObjectHash(request.get_hash().clone(), response.get_hash().clone())); } - if self.response_sender.send(Ok((response, request.to_listener()))).is_err() { + if self.handler_sender.send(response).await.is_err() { error!("Error while sending object response in ObjectTracker"); } Ok(()) } - pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result { + pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { let hash = { let mut queue = self.queue.write().await; let hash = request.get_hash().clone(); @@ -181,27 +251,24 @@ impl ObjectTracker { hash }; - let (sender, receiver) = oneshot::channel(); - self.request_sender.send(Message::Request(peer, hash, sender))?; - Ok(receiver) + self.request_sender.send(Message::Request(peer, hash))?; + Ok(()) } - async fn request_object_from_peer_internal(&self, peer: &Peer, request_hash: Hash) -> Result<(), P2pError> { + async fn request_object_from_peer_internal(&self, peer: &Peer, request_hash: &Hash) -> Result<(), P2pError> { debug!("Requesting object with hash {}", request_hash); - let packet = { - let queue = self.queue.write().await; - let request = queue.get(&request_hash).ok_or_else(|| P2pError::ObjectHashNotPresentInQueue(request_hash.clone()))?; - Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()) - }; + let mut queue = self.queue.write().await; + let request = queue.get_mut(request_hash).ok_or_else(|| P2pError::ObjectHashNotPresentInQueue(request_hash.clone()))?; + request.set_requested(); + let packet = Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()); // send the packet to the Peer - if let Err(e) = peer.send_bytes(packet).await { - error!("Error while sending object request to peer: {}", e); - let mut queue = self.queue.write().await; - queue.remove(&request_hash); - return Err(e); - } + peer.send_bytes(packet).await.map_err(|e| { + if let Some(request) = queue.remove(request_hash) { + request.to_listener().notify(); + } - Ok(()) + e + }) } } \ No newline at end of file From efafc8ca49c7803c4857f829cb7d97590815cfc0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 00:26:52 +0200 Subject: [PATCH 128/160] daemon: broadcast only when needed --- xelis_daemon/src/p2p/mod.rs | 6 ++--- xelis_daemon/src/p2p/tracker.rs | 42 ++++++++++++++++++++------------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index eabd3f6c..4c069bc1 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -797,7 +797,7 @@ impl P2pServer { // Check that the tx is not in mempool or on disk already if !self.blockchain.has_tx(&hash).await? { - self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone())).await?; + self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone()), true).await?; } // Avoid sending the TX propagated to a common peer @@ -881,7 +881,7 @@ impl P2pServer { if !contains { // retrieve one by one to prevent acquiring the lock for nothing debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); - if let Err(e) = zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone())).await { + if let Err(e) = zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone()), false).await { error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); peer.increment_fail_count(); return; @@ -1162,7 +1162,7 @@ impl P2pServer { let mempool = self.blockchain.get_mempool().read().await; for hash in txs.into_owned() { if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned())).await?; + self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned()), false).await?; } } } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 94077b8a..d09b018c 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -37,17 +37,19 @@ struct Request { request: ObjectRequest, sender: Option>, response: Option, - requested_at: Option + requested_at: Option, + broadcast: bool } impl Request { - pub fn new(request: ObjectRequest) -> Self { + pub fn new(request: ObjectRequest, broadcast: bool) -> Self { Self { request, sender: None, response: None, - requested_at: None + requested_at: None, + broadcast } } @@ -89,6 +91,10 @@ impl Request { } } + pub fn broadcast(&self) -> bool { + self.broadcast + } + pub fn to_listener(self) -> Listener { Listener::new(self.sender) } @@ -142,11 +148,10 @@ impl ObjectTracker { } } - - async fn handle_object_response_internal(&self, blockchain: &Arc>, response: OwnedObjectResponse) -> Result<(), P2pError> { + async fn handle_object_response_internal(&self, blockchain: &Arc>, response: OwnedObjectResponse, broadcast: bool) -> Result<(), P2pError> { match response { OwnedObjectResponse::Transaction(tx, hash) => { - blockchain.add_tx_with_hash_to_mempool(tx, hash, true).await?; + blockchain.add_tx_with_hash_to_mempool(tx, hash, broadcast).await?; }, _ => { debug!("ObjectTracker received an invalid object response"); @@ -172,24 +177,29 @@ impl ObjectTracker { }; if handle { - let (_, mut request) = queue.shift_remove_index(0).unwrap(); - let response = request.take_response().unwrap(); - if let Err(e) = self.handle_object_response_internal(&blockchain, response).await { - error!("Error while handling object response in ObjectTracker: {}", e); + if let Some((_, mut request)) = queue.shift_remove_index(0) { + if let Some(response) = request.take_response() { + if let Err(e) = self.handle_object_response_internal(&blockchain, response, request.broadcast()).await { + error!("Error while handling object response in ObjectTracker: {}", e); + } + request.to_listener().notify(); + continue; + } } - request.to_listener().notify(); } else { // Maybe it timed out if let Some((_, request)) = queue.get_index(0) { if let Some(requested_at) = request.get_requested() { if requested_at.elapsed() > Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT) { - let (_, request) = queue.shift_remove_index(0).unwrap(); - request.to_listener().notify(); + if let Some((_, request)) = queue.shift_remove_index(0) { + request.to_listener().notify(); + continue; + } } } } - break 'inner; } + break 'inner; } } } @@ -241,11 +251,11 @@ impl ObjectTracker { Ok(()) } - pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest) -> Result<(), P2pError> { + pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest, broadcast: bool) -> Result<(), P2pError> { let hash = { let mut queue = self.queue.write().await; let hash = request.get_hash().clone(); - if let Some(old) = queue.insert(hash.clone(), Request::new(request)) { + if let Some(old) = queue.insert(hash.clone(), Request::new(request, broadcast)) { return Err(P2pError::ObjectAlreadyRequested(old.request)) } hash From 9886ba619cea1a4b26e114785c0e5e2e764ea5ed Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 00:51:13 +0200 Subject: [PATCH 129/160] daemon: verify TX was not executed (orphaned txs fix) --- xelis_daemon/src/core/blockchain.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index dc4c0b28..6a001973 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -945,7 +945,8 @@ impl Blockchain { return Err(BlockchainError::TxAlreadyInMempool(hash)) } - if storage.has_transaction(&hash).await? { + // check that the TX is not already in blockchain + if storage.is_tx_executed_in_a_block(&hash)? { return Err(BlockchainError::TxAlreadyInBlockchain(hash)) } From 54a474eab894954b66f1713c15fb16cefe984fbc Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 11:07:06 +0200 Subject: [PATCH 130/160] wallet: fix get_transactions --- xelis_wallet/src/storage.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/xelis_wallet/src/storage.rs b/xelis_wallet/src/storage.rs index 0a602e8c..7502d280 100644 --- a/xelis_wallet/src/storage.rs +++ b/xelis_wallet/src/storage.rs @@ -248,9 +248,15 @@ impl EncryptedStorage { // Keep only transactions entries that have one transfer at least match transfers { + // Transfers which are not empty Some(transfers) if !transfers.is_empty() => { transactions.push(e); }, + // Something else than outgoing/incoming txs + None => { + transactions.push(e); + }, + // All the left is discarded _ => {} } } From dacc2b8ca831402a367b2adddf9cd1849033e553 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 11:57:45 +0200 Subject: [PATCH 131/160] daemon: show bootstrap response step on not requested --- xelis_daemon/src/p2p/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 4c069bc1..90f00643 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1186,7 +1186,7 @@ impl P2pServer { error!("Error while sending bootstrap response to channel: {:?}", e.kind()); } } else { - debug!("{} send us a bootstrap chain response but we didn't asked it", peer); + debug!("{} send us a bootstrap chain response of step {:?} but we didn't asked it", peer, response.kind()); return Err(P2pError::UnrequestedBootstrapChainResponse) } }, From d6be7f9d078b1bea1f7594f93d23eb761168fd6a Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 11:58:05 +0200 Subject: [PATCH 132/160] daemon: print overflow fees --- xelis_daemon/src/core/blockchain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 6a001973..1a9b3737 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1881,7 +1881,7 @@ impl Blockchain { if let Some(value) = balance.checked_sub(tx.get_fee()) { *balance = value; } else { - warn!("Overflow detected using fees in transaction {}", hash); + warn!("Overflow detected using fees ({} XEL) in transaction {}", format_xelis(tx.get_fee()), hash); return Err(BlockchainError::Overflow) } } From caf0a712753fbf0d61e3d96ef0e7a92e4525a01c Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 13:06:41 +0200 Subject: [PATCH 133/160] daemon: rework Request in ObjectTracker, clear mempool after reloading chain from disk --- xelis_daemon/src/core/blockchain.rs | 6 ++++ xelis_daemon/src/p2p/mod.rs | 1 + xelis_daemon/src/p2p/tracker.rs | 55 +++++++++++++++++------------ 3 files changed, 40 insertions(+), 22 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 1a9b3737..364caa3e 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -288,6 +288,12 @@ impl Blockchain { let difficulty = self.get_difficulty_at_tips(storage, &tips.into_iter().collect()).await?; self.difficulty.store(difficulty, Ordering::SeqCst); + + // TXs in mempool may be outdated, clear them as they will be asked later again + debug!("Clearing mempool"); + let mut mempool = self.mempool.write().await; + mempool.clear(); + Ok(()) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 90f00643..014a89ee 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -510,6 +510,7 @@ impl P2pServer { if let Err(e) = self.bootstrap_chain(&peer).await { warn!("Error occured while fast syncing with {}: {}", peer, e); } else { + debug!("Requesting inventory after successfull fast sync with {}", peer); if let Err(e) = self.request_inventory_of(&peer).await { debug!("Error occured while asking inventory of {} after fast sync: {}", peer, e); } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index d09b018c..e8de5924 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -5,7 +5,7 @@ use indexmap::IndexMap; use tokio::sync::{mpsc::{UnboundedSender, UnboundedReceiver, Sender, Receiver}, RwLock}; use xelis_common::{crypto::hash::Hash, serializer::Serializer}; use crate::{core::{blockchain::Blockchain, storage::Storage}, config::PEER_TIMEOUT_REQUEST_OBJECT}; -use log::{error, debug}; +use log::{error, debug, trace}; use super::{packet::{object::{ObjectRequest, OwnedObjectResponse}, Packet}, error::P2pError, peer::Peer}; @@ -35,6 +35,7 @@ impl Listener { struct Request { request: ObjectRequest, + peer: Arc, sender: Option>, response: Option, requested_at: Option, @@ -43,9 +44,10 @@ struct Request { impl Request { - pub fn new(request: ObjectRequest, broadcast: bool) -> Self { + pub fn new(request: ObjectRequest, peer: Arc, broadcast: bool) -> Self { Self { request, + peer, sender: None, response: None, requested_at: None, @@ -57,6 +59,10 @@ impl Request { &self.request } + pub fn get_peer(&self) -> &Arc { + &self.peer + } + pub fn set_response(&mut self, response: OwnedObjectResponse) { self.response = Some(response); } @@ -109,7 +115,7 @@ pub struct ObjectTracker { } enum Message { - Request(Arc, Hash), + Request(Hash), Exit } @@ -180,7 +186,7 @@ impl ObjectTracker { if let Some((_, mut request)) = queue.shift_remove_index(0) { if let Some(response) = request.take_response() { if let Err(e) = self.handle_object_response_internal(&blockchain, response, request.broadcast()).await { - error!("Error while handling object response in ObjectTracker: {}", e); + error!("Error while handling object response for {} in ObjectTracker from {}: {}", request.get_hash(), request.get_peer(), e); } request.to_listener().notify(); continue; @@ -208,10 +214,8 @@ impl ObjectTracker { debug!("Starting requester loop..."); while let Some(msg) = request_receiver.recv().await { match msg { - Message::Request(peer, object) => { - if let Err(e) = self.request_object_from_peer_internal(&peer, &object).await { - error!("Error while requesting object {} from {}: {}", object, peer, e); - }; + Message::Request(object) => { + self.request_object_from_peer_internal(&object).await; }, Message::Exit => break } @@ -255,30 +259,37 @@ impl ObjectTracker { let hash = { let mut queue = self.queue.write().await; let hash = request.get_hash().clone(); - if let Some(old) = queue.insert(hash.clone(), Request::new(request, broadcast)) { + if let Some(old) = queue.insert(hash.clone(), Request::new(request, peer, broadcast)) { return Err(P2pError::ObjectAlreadyRequested(old.request)) } hash }; - self.request_sender.send(Message::Request(peer, hash))?; + self.request_sender.send(Message::Request(hash))?; Ok(()) } - async fn request_object_from_peer_internal(&self, peer: &Peer, request_hash: &Hash) -> Result<(), P2pError> { + async fn request_object_from_peer_internal(&self, request_hash: &Hash) { debug!("Requesting object with hash {}", request_hash); - let mut queue = self.queue.write().await; - let request = queue.get_mut(request_hash).ok_or_else(|| P2pError::ObjectHashNotPresentInQueue(request_hash.clone()))?; - request.set_requested(); - let packet = Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()); - - // send the packet to the Peer - peer.send_bytes(packet).await.map_err(|e| { - if let Some(request) = queue.remove(request_hash) { - request.to_listener().notify(); + let mut delete = false; + { + let mut queue = self.queue.write().await; + if let Some(request) = queue.get_mut(request_hash) { + request.set_requested(); + let packet = Bytes::from(Packet::ObjectRequest(Cow::Borrowed(request.get_object())).to_bytes()); + // send the packet to the Peer + if let Err(e) = request.get_peer().send_bytes(packet).await { + error!("Error while requesting object {} using Object Tracker: {}", request_hash, e); + request.get_peer().increment_fail_count(); + delete = true; + } } + } - e - }) + if delete { + trace!("Deleting requested object with hash {}", request_hash); + let mut queue = self.queue.write().await; + queue.remove(request_hash); + } } } \ No newline at end of file From f79ec72ca2edd1fa94b67f8ebc6b6fdbeaa1c5bb Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 14:10:21 +0200 Subject: [PATCH 134/160] daemon: don't keep the lock while fast syncing --- xelis_daemon/src/core/blockchain.rs | 7 ++++--- xelis_daemon/src/p2p/mod.rs | 27 +++++++++++++++++++-------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 364caa3e..0810dccb 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -276,17 +276,18 @@ impl Blockchain { info!("All modules are now stopped!"); } - pub async fn reload_from_disk(&self, storage: &S) -> Result<(), BlockchainError> { + pub async fn reload_from_disk(&self) -> Result<(), BlockchainError> { + let storage = self.storage.read().await; let topoheight = storage.get_top_topoheight()?; let height = storage.get_top_height()?; self.topoheight.store(topoheight, Ordering::SeqCst); self.height.store(height, Ordering::SeqCst); let tips = storage.get_tips().await?; - let (_, stable_height) = self.find_common_base(storage, &tips).await?; + let (_, stable_height) = self.find_common_base(&*storage, &tips).await?; self.stable_height.store(stable_height, Ordering::SeqCst); - let difficulty = self.get_difficulty_at_tips(storage, &tips.into_iter().collect()).await?; + let difficulty = self.get_difficulty_at_tips(&*storage, &tips.into_iter().collect()).await?; self.difficulty.store(difficulty, Ordering::SeqCst); // TXs in mempool may be outdated, clear them as they will be asked later again diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 014a89ee..9164b9e2 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1739,8 +1739,10 @@ impl P2pServer { let mut our_topoheight = self.blockchain.get_topo_height(); let mut stable_topoheight = 0; - let mut storage = self.blockchain.get_storage().write().await; - let mut step: Option = Some(StepRequest::ChainInfo(self.build_list_of_blocks_id(&*storage).await?)); + let mut step: Option = { + let storage = self.blockchain.get_storage().read().await; + Some(StepRequest::ChainInfo(self.build_list_of_blocks_id(&*storage).await?)) + }; // keep them in memory, we add them when we're syncing // it's done to prevent any sync failure @@ -1748,6 +1750,7 @@ impl P2pServer { let mut top_height: u64 = 0; let mut top_block_hash: Option = None; + let mut all_assets = HashSet::new(); loop { let response = if let Some(step) = step.take() { info!("Requesting step {:?}", step.kind()); @@ -1760,6 +1763,7 @@ impl P2pServer { StepResponse::ChainInfo(common_point, topoheight, height, hash) => { // first, check the common point in case we deviated from the chain if let Some(common_point) = common_point { + let mut storage = self.blockchain.get_storage().write().await; debug!("Unverified common point found at {} with hash {}", common_point.get_topoheight(), common_point.get_hash()); let hash_at_topo = storage.get_hash_at_topo_height(common_point.get_topoheight()).await?; if hash_at_topo != *common_point.get_hash() { @@ -1794,10 +1798,12 @@ impl P2pServer { }, // fetch all assets from peer StepResponse::Assets(assets, next_page) => { + let mut storage = self.blockchain.get_storage().write().await; for asset in assets { let (asset, data) = asset.consume(); debug!("Saving asset {} at topoheight {}", asset, stable_topoheight); storage.add_asset(&asset, data).await?; + all_assets.insert(asset); } if next_page.is_some() { @@ -1816,15 +1822,18 @@ impl P2pServer { return Err(P2pError::InvalidPacket.into()) }; - // save all nonces - for (key, nonce) in keys.iter().zip(nonces) { - debug!("Saving nonce {} for {}", nonce, key); - storage.set_nonce_at_topoheight(key, nonce, stable_topoheight).await?; + { + let mut storage = self.blockchain.get_storage().write().await; + // save all nonces + for (key, nonce) in keys.iter().zip(nonces) { + debug!("Saving nonce {} for {}", nonce, key); + storage.set_nonce_at_topoheight(key, nonce, stable_topoheight).await?; + } } // TODO don't retrieve ALL each time but one by one // otherwise in really long time, it may consume lot of memory - for asset in storage.get_assets().await? { + for asset in &all_assets { debug!("Request balances for asset {}", asset); let StepResponse::Balances(balances) = peer.request_boostrap_chain(StepRequest::Balances(stable_topoheight, Cow::Borrowed(&asset), Cow::Borrowed(&keys))).await? else { // shouldn't happen @@ -1833,6 +1842,7 @@ impl P2pServer { }; // save all balances for this asset + let mut storage = self.blockchain.get_storage().write().await; for (key, balance) in keys.iter().zip(balances) { // check that the account have balance for this asset if let Some(balance) = balance { @@ -1853,6 +1863,7 @@ impl P2pServer { } }, StepResponse::BlocksMetadata(blocks) => { + let mut storage = self.blockchain.get_storage().write().await; let mut lowest_topoheight = stable_topoheight; for (i, metadata) in blocks.into_iter().enumerate() { // check that we don't already have this block in storage @@ -1905,7 +1916,7 @@ impl P2pServer { } }; } - self.blockchain.reload_from_disk(&storage).await?; + self.blockchain.reload_from_disk().await?; info!("Fast sync done with {}", peer); Ok(()) From 363ed63d36138deed36900b7e90e23ec2605d845 Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 15:15:29 +0200 Subject: [PATCH 135/160] daemon: add logs for reload from disk --- xelis_daemon/src/core/blockchain.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0810dccb..4b35e683 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -277,6 +277,7 @@ impl Blockchain { } pub async fn reload_from_disk(&self) -> Result<(), BlockchainError> { + trace!("Reloading chain from disk"); let storage = self.storage.read().await; let topoheight = storage.get_top_topoheight()?; let height = storage.get_top_height()?; @@ -291,8 +292,9 @@ impl Blockchain { self.difficulty.store(difficulty, Ordering::SeqCst); // TXs in mempool may be outdated, clear them as they will be asked later again - debug!("Clearing mempool"); + debug!("locking mempool for cleaning"); let mut mempool = self.mempool.write().await; + debug!("Clearing mempool"); mempool.clear(); Ok(()) From cade7caf1064b08186cd177297d0582ef1c8f76b Mon Sep 17 00:00:00 2001 From: Slixe Date: Tue, 24 Oct 2023 17:10:47 +0200 Subject: [PATCH 136/160] daemon: p2p error for bootstrap chain request --- xelis_daemon/src/p2p/error.rs | 2 ++ xelis_daemon/src/p2p/mod.rs | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index 57b8975f..a10e3bc6 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -69,6 +69,8 @@ pub enum P2pError { ParseAddressError(#[from] AddrParseError), #[error("Invalid packet ID")] InvalidPacket, + #[error("Peer topoheight is higher than our")] + InvalidRequestedTopoheight, #[error("Packet size exceed limit")] InvalidPacketSize, #[error("Received valid packet with not used bytes")] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 9164b9e2..1bb072d8 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1612,7 +1612,7 @@ impl P2pServer { || topoheight < PRUNE_SAFETY_LIMIT { warn!("Invalid begin topoheight (received {}, our is {}, pruned: {}) received from {}", topoheight, our_topoheight, pruned_topoheight, peer); - return Err(P2pError::InvalidPacket.into()) + return Err(P2pError::InvalidRequestedTopoheight.into()) } } From f9cb85748f4db8144bef066b0f955c97c833fa1f Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 12:02:52 +0200 Subject: [PATCH 137/160] daemon: request peer inventory after final chain sync --- xelis_daemon/src/p2p/mod.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1bb072d8..4afdf636 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -509,11 +509,6 @@ impl P2pServer { if fast_sync { if let Err(e) = self.bootstrap_chain(&peer).await { warn!("Error occured while fast syncing with {}: {}", peer, e); - } else { - debug!("Requesting inventory after successfull fast sync with {}", peer); - if let Err(e) = self.request_inventory_of(&peer).await { - debug!("Error occured while asking inventory of {} after fast sync: {}", peer, e); - } } self.set_syncing(false); } else { @@ -1427,10 +1422,10 @@ impl P2pServer { let peer_topoheight = peer.get_topoheight(); // ask inventory of this peer if we sync from too far // if we are not further than one sync, request the inventory - if peer_topoheight > our_previous_topoheight && peer_topoheight - our_previous_topoheight < CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64 { + if peer_topoheight > our_previous_topoheight && blocks_len < CHAIN_SYNC_RESPONSE_MAX_BLOCKS { let our_topoheight = self.blockchain.get_topo_height(); // verify that we synced it partially well - if peer_topoheight >= our_topoheight && peer_topoheight - our_topoheight < PRUNE_SAFETY_LIMIT { + if peer_topoheight >= our_topoheight && peer_topoheight - our_topoheight < STABLE_LIMIT { if let Err(e) = self.request_inventory_of(&peer).await { error!("Error while asking inventory to {}: {}", peer, e); } From 0b7dfe2b9d21d8208646e6d4b4bcbd293827af5b Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 15:28:56 +0200 Subject: [PATCH 138/160] daemon: add more logs, fix PeerPeerDisconnected --- xelis_daemon/src/p2p/mod.rs | 34 ++++++++++++++--------------- xelis_daemon/src/p2p/packet/ping.rs | 8 +++---- xelis_daemon/src/p2p/peer_list.rs | 1 - 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 4afdf636..a8978018 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1189,29 +1189,29 @@ impl P2pServer { Packet::PeerDisconnected(packet) => { let addr = packet.to_addr(); debug!("{} disconnected from {}", addr, peer); - let mut peer_peers = peer.get_peers(false).lock().await; - let mut peer_peers_sent = peer.get_peers(true).lock().await; + let mut peers_received = peer.get_peers(false).lock().await; + let mut peers_sent = peer.get_peers(true).lock().await; // peer should be a common one (we sent it, and received it from him) - let recv_removed = peer_peers.remove(&addr); - let sent_removed = peer_peers_sent.remove(&addr); - if !recv_removed || !sent_removed { - warn!("{} disconnected from {} but we didn't have it in our peer list: {recv_removed} {sent_removed}", addr, peer); + let recv_removed = peers_received.remove(&addr); + let sent_removed = peers_sent.remove(&addr); + // It must be a common peer + if !(recv_removed && sent_removed) { + debug!("{} disconnected from {} but we didn't have it in our peer list", addr, peer); + trace!("Our peerlist is: {:?} and {:?}", peers_received, peers_sent); return Err(P2pError::UnknownPeerReceived(addr)) } - if recv_removed { - trace!("Locking RPC Server to notify PeerDisconnected event"); - if let Some(rpc) = self.blockchain.get_rpc().lock().await.as_ref() { - if rpc.is_event_tracked(&NotifyEvent::PeerDisconnected).await { - let value = PeerPeerDisconnectedEvent { - peer_id: peer.get_id(), - peer_addr: addr - }; - rpc.notify_clients_with(&NotifyEvent::PeerDisconnected, value).await; - } + trace!("Locking RPC Server to notify PeerDisconnected event"); + if let Some(rpc) = self.blockchain.get_rpc().lock().await.as_ref() { + if rpc.is_event_tracked(&NotifyEvent::PeerPeerDisconnected).await { + let value = PeerPeerDisconnectedEvent { + peer_id: peer.get_id(), + peer_addr: addr + }; + rpc.notify_clients_with(&NotifyEvent::PeerPeerDisconnected, value).await; } - trace!("End locking for PeerDisconnected event"); } + trace!("End locking for PeerDisconnected event"); } }; Ok(()) diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 3011cbca..e7f01d19 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -79,7 +79,7 @@ impl<'a> Ping<'a> { if !self.peer_list.is_empty() { debug!("Received a peer list ({}) for {}", self.peer_list.len(), peer); - let mut peers = peer.get_peers(false).lock().await; + let mut peers_received = peer.get_peers(false).lock().await; let peer_addr = peer.get_connection().get_address(); let peer_outgoing_addr = peer.get_outgoing_address(); for addr in &self.peer_list { @@ -88,12 +88,12 @@ impl<'a> Ping<'a> { return Err(P2pError::InvalidProtocolRules) } - if peers.contains(&addr) { + debug!("Adding {} for {} in ping packet", addr, peer); + if !peers_received.insert(*addr) { error!("Invalid protocol rules: received duplicated peer {} from {} in ping packet", peer, addr); + trace!("Received peer list: {:?}, our peerlist is: {:?}", self.peer_list, peers_received); return Err(P2pError::InvalidProtocolRules) } - debug!("Adding {} for {} in ping packet", addr, peer); - peers.insert(*addr); } trace!("Locking RPC Server to notify PeerPeerListUpdated event"); diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 672631f8..06588da6 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -208,7 +208,6 @@ impl PeerList { best_height } - // get a peer by its address fn internal_get_peer_by_addr<'a>(peers: &'a HashMap>, addr: &SocketAddr) -> Option<&'a Arc> { peers.values().find(|peer| { From c73f26568334544abd40fb40c3827f9435116947 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 16:02:45 +0200 Subject: [PATCH 139/160] daemon: logs --- xelis_daemon/src/p2p/mod.rs | 1 + xelis_daemon/src/p2p/packet/ping.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index a8978018..796ea358 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -580,6 +580,7 @@ impl P2pServer { } // update the ping packet with the new peers + debug!("Set peers: {:?}, current: {:?}, going to {}", new_peers, ping.get_peers(), peer.get_outgoing_address()); ping.set_peers(new_peers); // send the ping packet to the peer if let Err(e) = peer.get_connection().send_bytes(&Packet::Ping(Cow::Borrowed(&ping)).to_bytes()).await { diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index e7f01d19..c4399be8 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -78,7 +78,7 @@ impl<'a> Ping<'a> { peer.set_cumulative_difficulty(self.cumulative_difficulty); if !self.peer_list.is_empty() { - debug!("Received a peer list ({}) for {}", self.peer_list.len(), peer); + debug!("Received a peer list ({:?}) for {}", self.peer_list, peer); let mut peers_received = peer.get_peers(false).lock().await; let peer_addr = peer.get_connection().get_address(); let peer_outgoing_addr = peer.get_outgoing_address(); From 980601563c1750ecebdd3f0da973e88e17dc0c31 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 16:23:52 +0200 Subject: [PATCH 140/160] daemon: more logs --- xelis_daemon/src/p2p/packet/ping.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index c4399be8..38dcaa16 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -78,8 +78,9 @@ impl<'a> Ping<'a> { peer.set_cumulative_difficulty(self.cumulative_difficulty); if !self.peer_list.is_empty() { - debug!("Received a peer list ({:?}) for {}", self.peer_list, peer); + debug!("Received a peer list ({:?}) for {}", self.peer_list, peer.get_outgoing_address()); let mut peers_received = peer.get_peers(false).lock().await; + debug!("Our peer list is ({:?}) for {}", peers_received, peer.get_outgoing_address()); let peer_addr = peer.get_connection().get_address(); let peer_outgoing_addr = peer.get_outgoing_address(); for addr in &self.peer_list { @@ -88,7 +89,7 @@ impl<'a> Ping<'a> { return Err(P2pError::InvalidProtocolRules) } - debug!("Adding {} for {} in ping packet", addr, peer); + debug!("Adding {} for {} in ping packet", addr, peer.get_outgoing_address()); if !peers_received.insert(*addr) { error!("Invalid protocol rules: received duplicated peer {} from {} in ping packet", peer, addr); trace!("Received peer list: {:?}, our peerlist is: {:?}", self.peer_list, peers_received); From 56b801beb706665661c6c093ba5fd8ccf45f5916 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 16:51:52 +0200 Subject: [PATCH 141/160] daemon: clear peers before re-using it --- xelis_daemon/src/p2p/mod.rs | 7 ++++--- xelis_daemon/src/p2p/packet/ping.rs | 8 ++++---- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 796ea358..a836f222 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -554,7 +554,8 @@ impl P2pServer { last_peerlist_update = current_time; let peer_list = self.peer_list.read().await; for peer in peer_list.get_peers().values() { - let mut new_peers = Vec::new(); + let new_peers = ping.get_mut_peers(); + new_peers.clear(); // all the peers we already sent to this current peer let mut peers_sent = peer.get_peers(true).lock().await; @@ -570,6 +571,7 @@ impl P2pServer { let addr = p.get_outgoing_address(); if !peers_sent.contains(addr) { // add it in our side to not re send it again + trace!("{} didn't received {} yet, adding it to peerlist in ping packet", peer.get_outgoing_address(), addr); peers_sent.insert(*addr); // add it to new list to send it new_peers.push(*addr); @@ -580,8 +582,7 @@ impl P2pServer { } // update the ping packet with the new peers - debug!("Set peers: {:?}, current: {:?}, going to {}", new_peers, ping.get_peers(), peer.get_outgoing_address()); - ping.set_peers(new_peers); + debug!("Set peers: {:?}, going to {}", new_peers, peer.get_outgoing_address()); // send the ping packet to the peer if let Err(e) = peer.get_connection().send_bytes(&Packet::Ping(Cow::Borrowed(&ping)).to_bytes()).await { debug!("Error sending specific ping packet to {}: {}", peer, e); diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 38dcaa16..353ba910 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -121,13 +121,13 @@ impl<'a> Ping<'a> { self.topoheight } - pub fn set_peers(&mut self, peers: Vec) { - self.peer_list = peers; - } - pub fn get_peers(&self) -> &Vec { &self.peer_list } + + pub fn get_mut_peers(&mut self) -> &mut Vec { + &mut self.peer_list + } } impl Serializer for Ping<'_> { From c7bae5af6f4179998a6dca17af96de150af0a81d Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 17:30:12 +0200 Subject: [PATCH 142/160] daemon: potential fix for common peers --- xelis_daemon/src/p2p/mod.rs | 19 +++++++++++-------- xelis_daemon/src/p2p/peer_list.rs | 3 ++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index a836f222..49fa8bd9 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1192,17 +1192,20 @@ impl P2pServer { let addr = packet.to_addr(); debug!("{} disconnected from {}", addr, peer); let mut peers_received = peer.get_peers(false).lock().await; - let mut peers_sent = peer.get_peers(true).lock().await; - // peer should be a common one (we sent it, and received it from him) - let recv_removed = peers_received.remove(&addr); - let sent_removed = peers_sent.remove(&addr); + let peers_sent = peer.get_peers(true).lock().await; + + let received_contains = peers_received.contains(&addr); + let sent_contains = peers_sent.contains(&addr); + // It must be a common peer - if !(recv_removed && sent_removed) { - debug!("{} disconnected from {} but we didn't have it in our peer list", addr, peer); - trace!("Our peerlist is: {:?} and {:?}", peers_received, peers_sent); - return Err(P2pError::UnknownPeerReceived(addr)) + if !(received_contains && sent_contains) { + debug!("{} disconnected from {} but its not a common peer ? {} {}", addr, peer.get_outgoing_address(), received_contains, sent_contains); + return Err(P2pError::UnknownPeerReceived(addr)) } + // Delete the peer received + peers_received.remove(&addr); + trace!("Locking RPC Server to notify PeerDisconnected event"); if let Some(rpc) = self.blockchain.get_rpc().lock().await.as_ref() { if rpc.is_event_tracked(&NotifyEvent::PeerPeerDisconnected).await { diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 06588da6..5f122de9 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -111,10 +111,11 @@ impl PeerList { let packet = Bytes::from(Packet::PeerDisconnected(PacketPeerDisconnected::new(*addr)).to_bytes()); for peer in self.peers.values() { let peers_received = peer.get_peers(false).lock().await; - let peers_sent = peer.get_peers(true).lock().await; + let mut peers_sent = peer.get_peers(true).lock().await; // check if it was a common peer (we sent it and we received it) if peers_sent.contains(addr) && peers_received.contains(addr) { debug!("Sending PeerDisconnected packet to peer {} for {}", peer, addr); + peers_sent.remove(addr); // we send the packet to notify the peer that we don't have it in common anymore if let Err(e) = peer.send_bytes(packet.clone()).await { error!("Error while trying to send PeerDisconnected packet to peer {}: {}", peer.get_connection().get_address(), e); From aed6359e6d4d11c9a0a83c34b4dbcaa0f1b1d00a Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 19:14:52 +0200 Subject: [PATCH 143/160] daemon: fix peer disconnected not found --- xelis_daemon/src/p2p/packet/ping.rs | 4 ++-- xelis_daemon/src/p2p/peer_list.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/xelis_daemon/src/p2p/packet/ping.rs b/xelis_daemon/src/p2p/packet/ping.rs index 353ba910..8d37f8d5 100644 --- a/xelis_daemon/src/p2p/packet/ping.rs +++ b/xelis_daemon/src/p2p/packet/ping.rs @@ -85,13 +85,13 @@ impl<'a> Ping<'a> { let peer_outgoing_addr = peer.get_outgoing_address(); for addr in &self.peer_list { if peer_addr == addr || peer_outgoing_addr == addr { - error!("Invalid protocol rules: peer {} sent us its own socket address in ping packet", peer); + error!("Invalid protocol rules: peer {} sent us its own socket address in ping packet", peer.get_outgoing_address()); return Err(P2pError::InvalidProtocolRules) } debug!("Adding {} for {} in ping packet", addr, peer.get_outgoing_address()); if !peers_received.insert(*addr) { - error!("Invalid protocol rules: received duplicated peer {} from {} in ping packet", peer, addr); + error!("Invalid protocol rules: received duplicated peer {} from {} in ping packet", addr, peer.get_outgoing_address()); trace!("Received peer list: {:?}, our peerlist is: {:?}", self.peer_list, peers_received); return Err(P2pError::InvalidProtocolRules) } diff --git a/xelis_daemon/src/p2p/peer_list.rs b/xelis_daemon/src/p2p/peer_list.rs index 5f122de9..ef25e22f 100644 --- a/xelis_daemon/src/p2p/peer_list.rs +++ b/xelis_daemon/src/p2p/peer_list.rs @@ -111,11 +111,11 @@ impl PeerList { let packet = Bytes::from(Packet::PeerDisconnected(PacketPeerDisconnected::new(*addr)).to_bytes()); for peer in self.peers.values() { let peers_received = peer.get_peers(false).lock().await; - let mut peers_sent = peer.get_peers(true).lock().await; + let peers_sent = peer.get_peers(true).lock().await; // check if it was a common peer (we sent it and we received it) + // Because its a common peer, we can expect that he will send us the same packet if peers_sent.contains(addr) && peers_received.contains(addr) { - debug!("Sending PeerDisconnected packet to peer {} for {}", peer, addr); - peers_sent.remove(addr); + debug!("Sending PeerDisconnected packet to peer {} for {}", peer.get_outgoing_address(), addr); // we send the packet to notify the peer that we don't have it in common anymore if let Err(e) = peer.send_bytes(packet.clone()).await { error!("Error while trying to send PeerDisconnected packet to peer {}: {}", peer.get_connection().get_address(), e); From 36c4c02f43c28a2e7fbedd49aa5790dc7bc08849 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 22:04:37 +0200 Subject: [PATCH 144/160] daemon: fix potential deadlock --- xelis_daemon/src/p2p/mod.rs | 4 +++- xelis_daemon/src/rpc/rpc.rs | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 49fa8bd9..8adf42fc 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1178,7 +1178,9 @@ impl P2pServer { self.handle_bootstrap_chain_request(peer, request.step()).await?; }, Packet::BootstrapChainResponse(response) => { + debug!("Received a bootstrap chain response ({:?}) from {}", response.kind(), peer); if let Some(sender) = peer.get_bootstrap_chain_channel().lock().await.take() { + trace!("Sending bootstrap chain response ({:?})", response.kind()); let response = response.response(); if let Err(e) = sender.send(response) { error!("Error while sending bootstrap response to channel: {:?}", e.kind()); @@ -1552,7 +1554,7 @@ impl P2pServer { // broadcast block to all peers that can accept directly this new block pub async fn broadcast_block(&self, block: &BlockHeader, cumulative_difficulty: u64, our_topoheight: u64, our_height: u64, pruned_topoheight: Option, hash: &Hash, lock: bool) { - trace!("Broadcast block: {}", hash); + info!("Broadcasting block {} at height {}", hash, block.get_height()); // we build the ping packet ourself this time (we have enough data for it) // because this function can be call from Blockchain, which would lead to a deadlock let ping = Ping::new(Cow::Borrowed(hash), our_topoheight, our_height, pruned_topoheight, cumulative_difficulty, Vec::new()); diff --git a/xelis_daemon/src/rpc/rpc.rs b/xelis_daemon/src/rpc/rpc.rs index 76cbd7a6..868db94c 100644 --- a/xelis_daemon/src/rpc/rpc.rs +++ b/xelis_daemon/src/rpc/rpc.rs @@ -436,8 +436,8 @@ async fn submit_transaction(blockchain: Arc>, body: Va async fn get_transaction(blockchain: Arc>, body: Value) -> Result { let params: GetTransactionParams = parse_params(body)?; - let mempool = blockchain.get_mempool().read().await; let storage = blockchain.get_storage().read().await; + let mempool = blockchain.get_mempool().read().await; get_transaction_response_for_hash(&*storage, &mempool, ¶ms.hash).await } @@ -494,8 +494,8 @@ async fn get_mempool(blockchain: Arc>, body: Value) -> return Err(InternalRpcError::UnexpectedParams) } - let mempool = blockchain.get_mempool().read().await; let storage = blockchain.get_storage().read().await; + let mempool = blockchain.get_mempool().read().await; let mut transactions: Vec = Vec::new(); for (hash, sorted_tx) in mempool.get_txs() { transactions.push(get_transaction_response(&*storage, sorted_tx.get_tx(), hash, true, Some(sorted_tx.get_first_seen())).await?); @@ -621,8 +621,8 @@ async fn get_transactions(blockchain: Arc>, body: Valu return Err(InternalRpcError::InvalidRequest).context(format!("Too many requested txs: {}, maximum is {}", hashes.len(), MAX_TXS))? } - let mempool = blockchain.get_mempool().read().await; let storage = blockchain.get_storage().read().await; + let mempool = blockchain.get_mempool().read().await; let mut transactions: Vec> = Vec::with_capacity(hashes.len()); for hash in hashes { let tx = match get_transaction_response_for_hash(&*storage, &mempool, &hash).await { From 12864584a80cbb75f9677cad3e6192f7f6593f83 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 23:04:56 +0200 Subject: [PATCH 145/160] daemon: don't show error on already requested object --- xelis_daemon/src/p2p/mod.rs | 10 +++++++--- xelis_daemon/src/p2p/tracker.rs | 11 +++++++---- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 8adf42fc..51a8f5c7 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -795,7 +795,9 @@ impl P2pServer { // Check that the tx is not in mempool or on disk already if !self.blockchain.has_tx(&hash).await? { - self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone()), true).await?; + if !self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.clone()), true).await? { + debug!("TX propagated {} was already requested, ignoring", hash); + } } // Avoid sending the TX propagated to a common peer @@ -1159,8 +1161,10 @@ impl P2pServer { let storage = self.blockchain.get_storage().read().await; let mempool = self.blockchain.get_mempool().read().await; for hash in txs.into_owned() { - if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? && !self.object_tracker.has_requested_object(&hash).await { - self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned()), false).await?; + if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? { + if !self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned()), false).await? { + debug!("TX was already requested, ignoring"); + } } } } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index e8de5924..51ca6425 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -255,18 +255,21 @@ impl ObjectTracker { Ok(()) } - pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest, broadcast: bool) -> Result<(), P2pError> { + // Request the object from the peer or return false if it is already requested + pub async fn request_object_from_peer(&self, peer: Arc, request: ObjectRequest, broadcast: bool) -> Result { + trace!("Requesting object {} from {}", request.get_hash(), peer); let hash = { let mut queue = self.queue.write().await; let hash = request.get_hash().clone(); - if let Some(old) = queue.insert(hash.clone(), Request::new(request, peer, broadcast)) { - return Err(P2pError::ObjectAlreadyRequested(old.request)) + if queue.insert(hash.clone(), Request::new(request, peer, broadcast)).is_some() { + return Ok(false) } hash }; + trace!("Transfering object request {} to task", hash); self.request_sender.send(Message::Request(hash))?; - Ok(()) + Ok(true) } async fn request_object_from_peer_internal(&self, request_hash: &Hash) { From 14864b9699db743ed42447cb1d3ea17331508864 Mon Sep 17 00:00:00 2001 From: Slixe Date: Wed, 25 Oct 2023 23:57:01 +0200 Subject: [PATCH 146/160] daemon: notify listeners on drop --- xelis_daemon/src/p2p/mod.rs | 5 ++--- xelis_daemon/src/p2p/tracker.rs | 17 ++++++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 51a8f5c7..d875cfa9 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -896,9 +896,8 @@ impl P2pServer { // Wait on all already requested txs for mut blocker in response_blockers { if let Err(e) = blocker.recv().await { - error!("Error while waiting on response blocker: {}", e); - peer.increment_fail_count(); - return; + // It's mostly a closed channel error, so we can ignore it + debug!("Error while waiting on response blocker: {}", e); } } diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index 51ca6425..a2c4a043 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -13,7 +13,7 @@ pub type SharedObjectTracker = Arc; pub type ResponseBlocker = tokio::sync::broadcast::Receiver<()>; -pub struct Listener { +struct Listener { sender: Option> } @@ -42,7 +42,6 @@ struct Request { broadcast: bool } - impl Request { pub fn new(request: ObjectRequest, peer: Arc, broadcast: bool) -> Self { Self { @@ -101,8 +100,14 @@ impl Request { self.broadcast } - pub fn to_listener(self) -> Listener { - Listener::new(self.sender) + fn to_listener(&mut self) -> Listener { + Listener::new(self.sender.take()) + } +} + +impl Drop for Request { + fn drop(&mut self) { + self.to_listener().notify(); } } @@ -188,7 +193,6 @@ impl ObjectTracker { if let Err(e) = self.handle_object_response_internal(&blockchain, response, request.broadcast()).await { error!("Error while handling object response for {} in ObjectTracker from {}: {}", request.get_hash(), request.get_peer(), e); } - request.to_listener().notify(); continue; } } @@ -197,8 +201,7 @@ impl ObjectTracker { if let Some((_, request)) = queue.get_index(0) { if let Some(requested_at) = request.get_requested() { if requested_at.elapsed() > Duration::from_millis(PEER_TIMEOUT_REQUEST_OBJECT) { - if let Some((_, request)) = queue.shift_remove_index(0) { - request.to_listener().notify(); + if queue.shift_remove_index(0).is_some() { continue; } } From c3c3eca25f91f880a79c9f457cb63469e71604a9 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 00:27:14 +0200 Subject: [PATCH 147/160] daemon: use write task of peer for ping packets --- xelis_daemon/src/p2p/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index d875cfa9..43a28d57 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -584,7 +584,7 @@ impl P2pServer { // update the ping packet with the new peers debug!("Set peers: {:?}, going to {}", new_peers, peer.get_outgoing_address()); // send the ping packet to the peer - if let Err(e) = peer.get_connection().send_bytes(&Packet::Ping(Cow::Borrowed(&ping)).to_bytes()).await { + if let Err(e) = peer.send_packet(Packet::Ping(Cow::Borrowed(&ping))).await { debug!("Error sending specific ping packet to {}: {}", peer, e); } } @@ -596,7 +596,7 @@ impl P2pServer { // broadcast directly the ping packet asap to all peers for peer in peerlist.get_peers().values() { trace!("broadcast to {}", peer); - if let Err(e) = peer.get_connection().send_bytes(&bytes).await { + if let Err(e) = peer.send_bytes(bytes.clone()).await { error!("Error while trying to broadcast directly ping packet to {}: {}", peer, e); }; } From e9fcdbeec0396772a474b4b19e2db99ac193eb4f Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 00:38:43 +0200 Subject: [PATCH 148/160] daemon: increase timeout bootstrap step --- xelis_daemon/src/config.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs index 22a62066..c6805fa7 100644 --- a/xelis_daemon/src/config.rs +++ b/xelis_daemon/src/config.rs @@ -71,7 +71,7 @@ pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // time in seconds between each t pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout -pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 20000; // millis until we timeout +pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 60000; // millis until we timeout lazy_static! { pub static ref DEV_PUBLIC_KEY: PublicKey = Address::from_string(&DEV_ADDRESS.to_owned()).unwrap().to_public_key(); diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 43a28d57..c90205d1 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -872,6 +872,7 @@ impl P2pServer { let contains = { // we don't lock one time because we may wait on p2p response // Check in ObjectTracker if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { + trace!("{} is already requested, waiting on response blocker for block {}", hash, block_hash); response_blockers.push(response_blocker); true } else { @@ -913,7 +914,7 @@ impl P2pServer { debug!("Adding received block {} from {} to chain", block_hash, peer); if let Err(e) = zelf.blockchain.add_new_block(block, true, false).await { - error!("Error while adding new block: {}", e); + error!("Error while adding new block from {}: {}", peer, e); peer.increment_fail_count(); } }); From 77de491b750b389d0f076657b6d95c0434c13fed Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 00:41:15 +0200 Subject: [PATCH 149/160] daemon: reduce error to debug level for notify --- xelis_daemon/src/p2p/tracker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/tracker.rs b/xelis_daemon/src/p2p/tracker.rs index a2c4a043..1761b27d 100644 --- a/xelis_daemon/src/p2p/tracker.rs +++ b/xelis_daemon/src/p2p/tracker.rs @@ -26,8 +26,8 @@ impl Listener { pub fn notify(self) { if let Some(sender) = self.sender { - if sender.send(()).is_err() { - error!("Error while sending notification to ObjectTracker"); + if let Err(e) = sender.send(()) { + debug!("Error while sending notification: {}", e); } } } From 81b374009f16d8947aae96270dc8f8846c6eadc0 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 13:51:05 +0200 Subject: [PATCH 150/160] daemon: object response handler --- xelis_daemon/src/core/blockchain.rs | 16 +++++++++++++ xelis_daemon/src/p2p/error.rs | 2 ++ xelis_daemon/src/p2p/mod.rs | 37 +++++++++++++++++++---------- xelis_daemon/src/p2p/peer.rs | 5 ++++ 4 files changed, 47 insertions(+), 13 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 4b35e683..1dda6f05 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1065,6 +1065,22 @@ impl Blockchain { storage.has_transaction(hash).await } + // retrieve the TX based on its hash by searching in mempool then on disk + pub async fn get_tx(&self, hash: &Hash) -> Result, BlockchainError> { + // check in mempool first + // if its present, returns it + { + let mempool = self.mempool.read().await; + if let Ok(tx) = mempool.get_tx(hash) { + return Ok(tx) + } + } + + // check in storage now + let storage = self.storage.read().await; + storage.get_transaction(hash).await + } + pub async fn get_block_template_for_storage(&self, storage: &S, address: PublicKey) -> Result { let extra_nonce: [u8; EXTRA_NONCE_SIZE] = rand::thread_rng().gen::<[u8; EXTRA_NONCE_SIZE]>(); // generate random bytes let tips_set = storage.get_tips().await?; diff --git a/xelis_daemon/src/p2p/error.rs b/xelis_daemon/src/p2p/error.rs index a10e3bc6..835b8496 100644 --- a/xelis_daemon/src/p2p/error.rs +++ b/xelis_daemon/src/p2p/error.rs @@ -93,6 +93,8 @@ pub enum P2pError { ObjectAlreadyRequested(ObjectRequest), #[error("Invalid object response for request, received hash: {}", _0)] InvalidObjectResponse(Hash), + #[error("Invalid object response type for request")] + InvalidObjectResponseType, #[error(transparent)] ObjectRequestError(#[from] RecvError), #[error("Expected a block type")] diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index c90205d1..efcd4488 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1091,17 +1091,22 @@ impl P2pServer { Packet::ObjectResponse(response) => { trace!("Received a object response from {}", peer); let response = response.to_owned()?; - // check if the Object Tracker has requested this object - if self.object_tracker.has_requested_object(response.get_hash()).await { - self.object_tracker.handle_object_response(response).await?; - } else { // otherwise check if its specific to the peer - // check if we have requested this object & get the sender from it - let request = response.get_request(); + trace!("Object response received is {}", response.get_hash()); + + // check if we requested it from this peer + let request = response.get_request(); + if peer.has_requested_object(&request).await { let sender = peer.remove_object_request(request).await?; // handle the response if sender.send(response).is_err() { error!("Error while sending object response to sender!"); } + // check if the Object Tracker has requested this object + } else if self.object_tracker.has_requested_object(request.get_hash()).await { + trace!("Object Tracker requested it, handling it"); + self.object_tracker.handle_object_response(response).await?; + } else { + return Err(P2pError::ObjectNotRequested(request)) } }, Packet::NotifyInventoryRequest(packet_wrapper) => { @@ -1539,7 +1544,8 @@ impl P2pServer { for peer in peer_list.get_peers().values() { // check that the peer is not too far from us // otherwise we may spam him for nothing - if peer.get_topoheight() + CHAIN_SYNC_RESPONSE_MAX_BLOCKS as u64 > current_topoheight { + let peer_topoheight = peer.get_topoheight(); + if (peer_topoheight >= current_topoheight && peer_topoheight - current_topoheight < STABLE_LIMIT) || current_topoheight - peer_topoheight < STABLE_LIMIT { trace!("Peer {} is not too far from us, checking cache for tx hash {}", peer, tx); let mut txs_cache = peer.get_txs_cache().lock().await; // check that we didn't already send this tx to this peer or that he don't already have it @@ -1869,11 +1875,10 @@ impl P2pServer { } }, StepResponse::BlocksMetadata(blocks) => { - let mut storage = self.blockchain.get_storage().write().await; let mut lowest_topoheight = stable_topoheight; for (i, metadata) in blocks.into_iter().enumerate() { // check that we don't already have this block in storage - if storage.has_block(&metadata.hash).await? { + if self.blockchain.has_block(&metadata.hash).await? { continue; } @@ -1886,16 +1891,20 @@ impl P2pServer { let mut txs = Vec::with_capacity(header.get_txs_hashes().len()); for tx_hash in header.get_txs_hashes() { - if !storage.has_transaction(tx_hash).await? { + let tx = if self.blockchain.has_tx(tx_hash).await? { + Immutable::Arc(self.blockchain.get_tx(tx_hash).await?) + } else { let OwnedObjectResponse::Transaction(tx, _) = peer.request_blocking_object(ObjectRequest::Transaction(tx_hash.clone())).await? else { error!("Received an invalid requested object while fetching block transaction {}", tx_hash); - return Err(P2pError::InvalidPacket.into()) + return Err(P2pError::InvalidObjectResponseType.into()) }; - txs.push(Immutable::Owned(tx)); - } + Immutable::Owned(tx) + }; + txs.push(tx); } // link its TX to the block + let mut storage = self.blockchain.get_storage().write().await; for tx_hash in header.get_txs_hashes() { storage.add_block_for_tx(tx_hash, &hash)?; } @@ -1910,6 +1919,8 @@ impl P2pServer { // save the block with its transactions, difficulty storage.add_new_block(Arc::new(header), &txs, metadata.difficulty, hash).await?; } + + let mut storage = self.blockchain.get_storage().write().await; storage.set_pruned_topoheight(lowest_topoheight)?; storage.set_top_topoheight(top_topoheight)?; storage.set_top_height(top_height)?; diff --git a/xelis_daemon/src/p2p/peer.rs b/xelis_daemon/src/p2p/peer.rs index 76e2ee3b..09d7bd8b 100644 --- a/xelis_daemon/src/p2p/peer.rs +++ b/xelis_daemon/src/p2p/peer.rs @@ -244,6 +244,11 @@ impl Peer { &self.objects_requested } + pub async fn has_requested_object(&self, request: &ObjectRequest) -> bool { + let objects = self.objects_requested.lock().await; + objects.contains_key(&request) + } + pub async fn remove_object_request(&self, request: ObjectRequest) -> Result, P2pError> { let mut objects = self.objects_requested.lock().await; objects.remove(&request).ok_or(P2pError::ObjectNotFound(request)) From a949729cf95bbdbb2c9de517bb08f4bc3e8096e7 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 16:07:20 +0200 Subject: [PATCH 151/160] daemon: queued blocks propagation handler --- xelis_daemon/src/p2p/mod.rs | 149 ++++++++++++++++++++---------------- 1 file changed, 85 insertions(+), 64 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index efcd4488..1d01cf53 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -102,7 +102,8 @@ pub struct P2pServer { last_sync_request_sent: AtomicU64, // used to check if we are already syncing with one peer or not object_tracker: SharedObjectTracker, // used to requests objects to peers and avoid requesting the same object to multiple peers is_running: AtomicBool, // used to check if the server is running or not in tasks - blocks_propagation_queue: Mutex> // Synced cache to prevent concurrent tasks adding the block + blocks_propagation_queue: Mutex>, // Synced cache to prevent concurrent tasks adding the block + blocks_processor: UnboundedSender<(Arc, BlockHeader, Hash)> // Sender for the blocks processing task to have a ordered queue } impl P2pServer { @@ -115,8 +116,10 @@ impl P2pServer { let mut rng = rand::thread_rng(); let peer_id: u64 = rng.gen(); // generate a random peer id for network let addr: SocketAddr = bind_address.parse()?; // parse the bind address - // create mspc channel - let (connections_sender, receiver) = mpsc::unbounded_channel(); + // create mspc channel for connections to peers + let (connections_sender, connections_receiver) = mpsc::unbounded_channel(); + + let (blocks_processer, blocks_processor_receiver) = mpsc::unbounded_channel(); let object_tracker = ObjectTracker::new(blockchain.clone()); @@ -133,16 +136,26 @@ impl P2pServer { last_sync_request_sent: AtomicU64::new(0), object_tracker, is_running: AtomicBool::new(true), - blocks_propagation_queue: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)) + blocks_propagation_queue: Mutex::new(LruCache::new(STABLE_LIMIT as usize * TIPS_LIMIT)), + blocks_processor: blocks_processer }; let arc = Arc::new(server); - let zelf = Arc::clone(&arc); - tokio::spawn(async move { - if let Err(e) = zelf.start(receiver, use_peerlist, exclusive_nodes).await { - error!("Unexpected error on P2p module: {}", e); - } - }); + { + let zelf = Arc::clone(&arc); + tokio::spawn(async move { + if let Err(e) = zelf.start(connections_receiver, use_peerlist, exclusive_nodes).await { + error!("Unexpected error on P2p module: {}", e); + } + }); + } + + // Start the blocks processing task to have a queued handler + { + let zelf = Arc::clone(&arc); + tokio::spawn(zelf.blocks_processing_task(blocks_processor_receiver)); + } + Ok(arc) } @@ -630,6 +643,65 @@ impl P2pServer { } } + // Task for all blocks propagation + async fn blocks_processing_task(self: Arc, mut receiver: UnboundedReceiver<(Arc, BlockHeader, Hash)>) { + debug!("Starting blocks processing task"); + while let Some((peer, header, block_hash)) = receiver.recv().await { + let mut response_blockers: Vec = Vec::new(); + for hash in header.get_txs_hashes() { + let contains = { // we don't lock one time because we may wait on p2p response + // Check in ObjectTracker + if let Some(response_blocker) = self.object_tracker.get_response_blocker_for_requested_object(hash).await { + trace!("{} is already requested, waiting on response blocker for block {}", hash, block_hash); + response_blockers.push(response_blocker); + true + } else { + self.blockchain.has_tx(hash).await.unwrap_or(false) + } + }; + + if !contains { // retrieve one by one to prevent acquiring the lock for nothing + debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); + if let Err(e) = self.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone()), false).await { + error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); + peer.increment_fail_count(); + return; + } + + if let Some(response_blocker) = self.object_tracker.get_response_blocker_for_requested_object(hash).await { + response_blockers.push(response_blocker); + } + } + } + + // Wait on all already requested txs + for mut blocker in response_blockers { + if let Err(e) = blocker.recv().await { + // It's mostly a closed channel error, so we can ignore it + debug!("Error while waiting on response blocker: {}", e); + } + } + + // add immediately the block to chain as we are synced with + let block = match self.blockchain.build_block_from_header(Immutable::Owned(header)).await { + Ok(block) => block, + Err(e) => { + error!("Error while building block {} from peer {}: {}", block_hash, peer, e); + peer.increment_fail_count(); + return; + } + }; + + debug!("Adding received block {} from {} to chain", block_hash, peer); + if let Err(e) = self.blockchain.add_new_block(block, true, false).await { + error!("Error while adding new block from {}: {}", peer, e); + peer.increment_fail_count(); + } + } + + debug!("Blocks processing task ended"); + } + // this function handle the logic to send all packets to the peer async fn handle_connection_write_side(&self, peer: &Arc, rx: &mut UnboundedReceiver) -> Result<(), P2pError> { loop { @@ -863,61 +935,10 @@ impl P2pServer { let block_height = header.get_height(); debug!("Received block at height {} from {}", block_height, peer); - let zelf = Arc::clone(self); let peer = Arc::clone(peer); - // verify that we have all txs in local or ask peer to get missing txs - tokio::spawn(async move { - let mut response_blockers: Vec = Vec::new(); - for hash in header.get_txs_hashes() { - let contains = { // we don't lock one time because we may wait on p2p response - // Check in ObjectTracker - if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { - trace!("{} is already requested, waiting on response blocker for block {}", hash, block_hash); - response_blockers.push(response_blocker); - true - } else { - zelf.blockchain.has_tx(hash).await.unwrap_or(false) - } - }; - - if !contains { // retrieve one by one to prevent acquiring the lock for nothing - debug!("Requesting TX {} to {} for block {}", hash, peer, block_hash); - if let Err(e) = zelf.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone()), false).await { - error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); - peer.increment_fail_count(); - return; - } - - if let Some(response_blocker) = zelf.object_tracker.get_response_blocker_for_requested_object(hash).await { - response_blockers.push(response_blocker); - } - } - } - - // Wait on all already requested txs - for mut blocker in response_blockers { - if let Err(e) = blocker.recv().await { - // It's mostly a closed channel error, so we can ignore it - debug!("Error while waiting on response blocker: {}", e); - } - } - - // add immediately the block to chain as we are synced with - let block = match zelf.blockchain.build_block_from_header(Immutable::Owned(header)).await { - Ok(block) => block, - Err(e) => { - error!("Error while building block {} from peer {}: {}", block_hash, peer, e); - peer.increment_fail_count(); - return; - } - }; - - debug!("Adding received block {} from {} to chain", block_hash, peer); - if let Err(e) = zelf.blockchain.add_new_block(block, true, false).await { - error!("Error while adding new block from {}: {}", peer, e); - peer.increment_fail_count(); - } - }); + if let Err(e) = self.blocks_processor.send((peer, header, block_hash)) { + error!("Error while sending block propagated to blocks processor task: {}", e); + } }, Packet::ChainRequest(packet_wrapper) => { trace!("Received a chain request from {}", peer); From 19458f86c5b0432dd37337cf6372c32dc2128e97 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 16:32:20 +0200 Subject: [PATCH 152/160] daemon: fix block processing task --- xelis_daemon/src/p2p/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 1d01cf53..5286be03 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -665,7 +665,7 @@ impl P2pServer { if let Err(e) = self.object_tracker.request_object_from_peer(Arc::clone(&peer), ObjectRequest::Transaction(hash.clone()), false).await { error!("Error while requesting TX {} to {} for block {}: {}", hash, peer, block_hash, e); peer.increment_fail_count(); - return; + continue; } if let Some(response_blocker) = self.object_tracker.get_response_blocker_for_requested_object(hash).await { @@ -688,7 +688,7 @@ impl P2pServer { Err(e) => { error!("Error while building block {} from peer {}: {}", block_hash, peer, e); peer.increment_fail_count(); - return; + continue; } }; @@ -1911,7 +1911,9 @@ impl P2pServer { }; let mut txs = Vec::with_capacity(header.get_txs_hashes().len()); + debug!("Retrieving {} txs for block {}", header.get_txs_count(), hash); for tx_hash in header.get_txs_hashes() { + trace!("Retrieving TX {} for block {}", tx_hash, hash); let tx = if self.blockchain.has_tx(tx_hash).await? { Immutable::Arc(self.blockchain.get_tx(tx_hash).await?) } else { From f297ff308625821c2c84b979f4760490b1a4b7c3 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 16:50:17 +0200 Subject: [PATCH 153/160] daemon: clean up code --- xelis_daemon/src/core/blockchain.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 13 ++----------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 1dda6f05..0b063356 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1189,7 +1189,7 @@ impl Blockchain { let block_hash = block.hash(); debug!("Add new block {}", block_hash); if storage.has_block(&block_hash).await? { - error!("Block is already in chain!"); + error!("Block {} is already in chain!", block_hash); return Err(BlockchainError::AlreadyInChain) } diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 5286be03..bb3c898f 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1403,16 +1403,8 @@ impl P2pServer { for tx_hash in header.get_txs_hashes() { // check first on disk in case it was already fetch by a previous block // it can happens as TXs can be integrated in multiple blocks and executed only one time - let potential_tx_on_disk = { - let storage = self.blockchain.get_storage().read().await; - if storage.has_transaction(tx_hash).await? { - Some(storage.get_transaction(tx_hash).await?) - } else { - None - } - }; // check if we find it - if let Some(tx) = potential_tx_on_disk { + if let Some(tx) = self.blockchain.get_tx(tx_hash).await.ok() { trace!("Found the transaction {} on disk", tx_hash); transactions.push(Immutable::Arc(tx)); } else { // otherwise, ask it from peer @@ -1439,8 +1431,7 @@ impl P2pServer { for hash in blocks { // Request all blocks now if !self.blockchain.has_block(&hash).await? { trace!("Block {} is not found, asking it to peer", hash); - let object_request = ObjectRequest::Block(hash.clone()); - let response = peer.request_blocking_object(object_request).await?; + let response = peer.request_blocking_object(ObjectRequest::Block(hash.clone())).await?; if let OwnedObjectResponse::Block(block, hash) = response { trace!("Received block {} at height {} from {}", hash, block.get_height(), peer); self.blockchain.add_new_block(block, false, false).await?; From 7d8733335e8cc94de9d990e104a7cf938c5cf786 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 17:11:25 +0200 Subject: [PATCH 154/160] daemon: for debug, one task per packet handling --- xelis_daemon/src/p2p/mod.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index bb3c898f..9f6243b3 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1258,12 +1258,23 @@ impl P2pServer { // Packet is read from the same task always, while its handling is delegated to a unique task async fn listen_connection(self: &Arc, buf: &mut [u8], peer: &Arc) -> Result<(), P2pError> { // Read & parse the packet - let packet = peer.get_connection().read_packet(buf, MAX_BLOCK_SIZE as u32).await?; + let bytes = peer.get_connection().read_packet_bytes(buf, MAX_BLOCK_SIZE as u32).await?; // Handle the packet - if let Err(e) = self.handle_incoming_packet(&peer, packet).await { - error!("Error occured while handling incoming packet from {}: {}", peer, e); - peer.increment_fail_count(); - } + let zelf = self.clone(); + let peer = peer.clone(); + tokio::spawn(async move { + let packet = match peer.get_connection().read_packet_from_bytes(&bytes).await { + Ok(packet) => packet, + Err(e) => { + error!("Error while parsing packet from bytes: {}", e); + return; + } + }; + if let Err(e) = zelf.handle_incoming_packet(&peer, packet).await { + error!("Error occured while handling incoming packet from {}: {}", peer, e); + peer.increment_fail_count(); + } + }); Ok(()) } From 89ba4e8534f21ab26687faff6da7631838a4e937 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 17:25:23 +0200 Subject: [PATCH 155/160] daemon: reverse last commit --- xelis_daemon/src/p2p/mod.rs | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 9f6243b3..bb3c898f 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1258,23 +1258,12 @@ impl P2pServer { // Packet is read from the same task always, while its handling is delegated to a unique task async fn listen_connection(self: &Arc, buf: &mut [u8], peer: &Arc) -> Result<(), P2pError> { // Read & parse the packet - let bytes = peer.get_connection().read_packet_bytes(buf, MAX_BLOCK_SIZE as u32).await?; + let packet = peer.get_connection().read_packet(buf, MAX_BLOCK_SIZE as u32).await?; // Handle the packet - let zelf = self.clone(); - let peer = peer.clone(); - tokio::spawn(async move { - let packet = match peer.get_connection().read_packet_from_bytes(&bytes).await { - Ok(packet) => packet, - Err(e) => { - error!("Error while parsing packet from bytes: {}", e); - return; - } - }; - if let Err(e) = zelf.handle_incoming_packet(&peer, packet).await { - error!("Error occured while handling incoming packet from {}: {}", peer, e); - peer.increment_fail_count(); - } - }); + if let Err(e) = self.handle_incoming_packet(&peer, packet).await { + error!("Error occured while handling incoming packet from {}: {}", peer, e); + peer.increment_fail_count(); + } Ok(()) } From 1944f2777131922ce12374bbd4b8f5f39950d0ab Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 18:07:48 +0200 Subject: [PATCH 156/160] daemon: increase time out request object to 15s --- xelis_daemon/src/config.rs | 2 +- xelis_daemon/src/p2p/mod.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/config.rs b/xelis_daemon/src/config.rs index c6805fa7..04deaa61 100644 --- a/xelis_daemon/src/config.rs +++ b/xelis_daemon/src/config.rs @@ -70,7 +70,7 @@ pub const P2P_EXTEND_PEERLIST_DELAY: u64 = 60; // time in seconds between each t // Peer rules pub const PEER_FAIL_TIME_RESET: u64 = 60 * 5; // number of seconds to reset the counter pub const PEER_FAIL_LIMIT: u8 = 20; // number of fail to disconnect the peer -pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 3000; // millis until we timeout +pub const PEER_TIMEOUT_REQUEST_OBJECT: u64 = 15000; // millis until we timeout pub const PEER_TIMEOUT_BOOTSTRAP_STEP: u64 = 60000; // millis until we timeout lazy_static! { diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index bb3c898f..ad7c20f8 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1914,6 +1914,7 @@ impl P2pServer { }; Immutable::Owned(tx) }; + trace!("TX {} ok", tx_hash); txs.push(tx); } From 079479940c146748e6f2e6ef2cc97444558c982f Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 18:13:31 +0200 Subject: [PATCH 157/160] daemon: don't keep the lock while requesting object --- xelis_daemon/src/p2p/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index ad7c20f8..ec63202c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1184,10 +1184,9 @@ impl P2pServer { } } - let storage = self.blockchain.get_storage().read().await; - let mempool = self.blockchain.get_mempool().read().await; for hash in txs.into_owned() { - if !mempool.contains_tx(&hash) && !storage.has_transaction(&hash).await? { + // Verify that we don't already have it + if !self.blockchain.has_tx(&hash).await? { if !self.object_tracker.request_object_from_peer(Arc::clone(peer), ObjectRequest::Transaction(hash.into_owned()), false).await? { debug!("TX was already requested, ignoring"); } From 2ea7bc231e0d2c5aab1f49313fe2f5efd219b4fb Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 20:25:18 +0200 Subject: [PATCH 158/160] daemon: don't keep the lock --- xelis_daemon/src/core/blockchain.rs | 2 + xelis_daemon/src/p2p/mod.rs | 73 +++++++++++++++-------------- 2 files changed, 41 insertions(+), 34 deletions(-) diff --git a/xelis_daemon/src/core/blockchain.rs b/xelis_daemon/src/core/blockchain.rs index 0b063356..1d32e99a 100644 --- a/xelis_daemon/src/core/blockchain.rs +++ b/xelis_daemon/src/core/blockchain.rs @@ -1162,10 +1162,12 @@ impl Blockchain { } pub async fn build_block_from_header(&self, header: Immutable) -> Result { + trace!("Searching TXs for block at height {}", header.get_height()); let mut transactions: Vec> = Vec::with_capacity(header.get_txs_count()); let storage = self.storage.read().await; let mempool = self.mempool.read().await; for hash in header.get_txs_hashes() { + trace!("Searching TX {} for building block", hash); // at this point, we don't want to lose/remove any tx, we clone it only let tx = if mempool.contains_tx(hash) { mempool.get_tx(hash)? diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index ec63202c..8e98580c 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -1064,45 +1064,50 @@ impl P2pServer { let request = request.into_owned(); match &request { ObjectRequest::Block(hash) => { - let storage = self.blockchain.get_storage().read().await; - if storage.has_block(hash).await? { - let block = storage.get_block(hash).await?; - peer.send_packet(Packet::ObjectResponse(ObjectResponse::Block(Cow::Owned(block)))).await?; - } else { - debug!("{} asked block '{}' but not present in our chain", peer, hash); - peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; - } + debug!("{} asked full block {}", peer, hash); + let block = { + let storage = self.blockchain.get_storage().read().await; + storage.get_block(hash).await + }; + + match block { + Ok(block) => { + debug!("block {} found, sending it", hash); + peer.send_packet(Packet::ObjectResponse(ObjectResponse::Block(Cow::Borrowed(&block)))).await?; + }, + Err(e) => { + debug!("{} asked block '{}' but not present in our chain: {}", peer, hash, e); + peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; + } + }; }, ObjectRequest::BlockHeader(hash) => { - let storage = self.blockchain.get_storage().read().await; - if storage.has_block(hash).await? { - let header = storage.get_block_header_by_hash(hash).await?; - peer.send_packet(Packet::ObjectResponse(ObjectResponse::BlockHeader(Cow::Borrowed(&header)))).await?; - } else { - debug!("{} asked block header '{}' but not present in our chain", peer, hash); - peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; - } - }, - ObjectRequest::Transaction(hash) => { - let search_on_disk = { - let mempool = self.blockchain.get_mempool().read().await; - if let Ok(tx) = mempool.view_tx(hash) { - peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(tx)))).await?; - false - } else { - debug!("{} asked transaction '{}' but not present in our mempool", peer, hash); - true - } + debug!("{} asked block header {}", peer, hash); + let block = { + let storage = self.blockchain.get_storage().read().await; + storage.get_block_header_by_hash(hash).await }; - if search_on_disk { - debug!("Looking on disk for transaction {}", hash); - let storage = self.blockchain.get_storage().read().await; - if storage.has_transaction(hash).await? { - let tx = storage.get_transaction(hash).await?; + match block { + Ok(block) => { + debug!("block header {} found, sending it", hash); + peer.send_packet(Packet::ObjectResponse(ObjectResponse::BlockHeader(Cow::Borrowed(&block)))).await?; + }, + Err(e) => { + debug!("{} asked block header '{}' but not present in our chain: {}", peer, hash, e); + peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; + } + }; + }, + ObjectRequest::Transaction(hash) => { + debug!("{} asked tx {}", peer, hash); + match self.blockchain.get_tx(hash).await { + Ok(tx) => { + debug!("tx {} found, sending it", hash); peer.send_packet(Packet::ObjectResponse(ObjectResponse::Transaction(Cow::Borrowed(&tx)))).await?; - } else { - debug!("{} asked transaction '{}' but not present in our chain", peer, hash); + }, + Err(e) => { + debug!("{} asked tx '{}' but not present in our chain: {}", peer, hash, e); peer.send_packet(Packet::ObjectResponse(ObjectResponse::NotFound(request))).await?; } } From def0bed0a9646d5d41d8a69f3a51fc52bebc3d71 Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 22:59:52 +0200 Subject: [PATCH 159/160] daemon: don't returns error for already tracked tx (temp) --- xelis_daemon/src/p2p/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xelis_daemon/src/p2p/mod.rs b/xelis_daemon/src/p2p/mod.rs index 8e98580c..b1dccf4f 100644 --- a/xelis_daemon/src/p2p/mod.rs +++ b/xelis_daemon/src/p2p/mod.rs @@ -860,7 +860,8 @@ impl P2pServer { let mut txs_cache = peer.get_txs_cache().lock().await; if txs_cache.contains(&hash) { debug!("{} send us a transaction ({}) already tracked by him", peer, hash); - return Err(P2pError::AlreadyTrackedTx(hash)) + // TODO Fix common peer detection + return Ok(()) // Err(P2pError::AlreadyTrackedTx(hash)) } txs_cache.put(hash.clone(), ()); } From 9ed1268629a0a8c4b7f04af34553cee0dfe1f4ee Mon Sep 17 00:00:00 2001 From: Slixe Date: Thu, 26 Oct 2023 23:02:19 +0200 Subject: [PATCH 160/160] common: set same level for file logging --- xelis_common/src/prompt/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/xelis_common/src/prompt/mod.rs b/xelis_common/src/prompt/mod.rs index 1da64f6d..e8f30eb9 100644 --- a/xelis_common/src/prompt/mod.rs +++ b/xelis_common/src/prompt/mod.rs @@ -668,6 +668,7 @@ impl Prompt { } let file_log = fern::Dispatch::new() + .level(level.into()) .format(move |out, message, record| { let pad = " ".repeat((30i16 - record.target().len() as i16).max(0) as usize); let level_pad = if record.level() == Level::Error || record.level() == Level::Debug { "" } else { " " };