diff --git a/src/graphql/node.rs b/src/graphql/node.rs index b9f515a6..8f3917a1 100644 --- a/src/graphql/node.rs +++ b/src/graphql/node.rs @@ -13,7 +13,10 @@ use ipnet::Ipv4Net; use review_database::{Indexable, Indexed}; use roxy::Process as RoxyProcess; use serde::{Deserialize, Serialize}; -use std::net::{IpAddr, SocketAddr}; +use std::{ + collections::HashMap, + net::{IpAddr, SocketAddr}, +}; pub type PortNumber = u16; @@ -109,43 +112,59 @@ pub(super) struct Node { customer_id: u32, description: String, pub(super) hostname: String, - nics: Vec, - disk_usage_limit: Option, - #[graphql(skip)] - allow_access_from: Option>, - - #[graphql(skip)] - review_id: Option, - - ssh_port: PortNumber, - #[graphql(skip)] - dns_server_ip: Option, - dns_server_port: Option, - #[graphql(skip)] - syslog_server_ip: Option, - syslog_server_port: Option, review: bool, - review_nics: Option>, review_port: Option, review_web_port: Option, - #[graphql(skip)] - ntp_server_ip: Option, - ntp_server_port: Option, piglet: bool, - + #[graphql(skip)] + piglet_giganto_ip: Option, + piglet_giganto_port: Option, + #[graphql(skip)] + piglet_review_ip: Option, + piglet_review_port: Option, + save_packets: bool, + http: bool, + office: bool, + exe: bool, + pdf: bool, + html: bool, + txt: bool, + smtp_eml: bool, + ftp: bool, giganto: bool, - giganto_ingestion_nics: Option>, + #[graphql(skip)] + giganto_ingestion_ip: Option, giganto_ingestion_port: Option, - giganto_publish_nics: Option>, + #[graphql(skip)] + giganto_publish_ip: Option, giganto_publish_port: Option, - giganto_graphql_nics: Option>, + #[graphql(skip)] + giganto_graphql_ip: Option, giganto_graphql_port: Option, + retention_period: Option, reconverge: bool, + #[graphql(skip)] + reconverge_review_ip: Option, + reconverge_review_port: Option, + #[graphql(skip)] + reconverge_giganto_ip: Option, + reconverge_giganto_port: Option, hog: bool, + #[graphql(skip)] + hog_review_ip: Option, + hog_review_port: Option, + #[graphql(skip)] + hog_giganto_ip: Option, + hog_giganto_port: Option, + protocols: bool, + protocol_list: HashMap, + + sensors: bool, + sensor_list: HashMap, creation_time: DateTime, } @@ -160,29 +179,32 @@ impl Node { ID(self.customer_id.to_string()) } - async fn allow_access_from(&self) -> Option> { - self.allow_access_from.as_ref().map(|allow| { - allow - .iter() - .map(ToString::to_string) - .collect::>() - }) + async fn piglet_giganto_ip(&self) -> Option { + self.piglet_giganto_ip.as_ref().map(ToString::to_string) } - - async fn review_id(&self) -> Option { - self.review_id.map(|id| ID(id.to_string())) + async fn piglet_review_ip(&self) -> Option { + self.piglet_review_ip.as_ref().map(ToString::to_string) } - - async fn ntp_server_ip(&self) -> Option { - self.ntp_server_ip.as_ref().map(ToString::to_string) + async fn giganto_ingestion_ip(&self) -> Option { + self.giganto_ingestion_ip.as_ref().map(ToString::to_string) } - - async fn dns_server_ip(&self) -> Option { - self.dns_server_ip.as_ref().map(ToString::to_string) + async fn giganto_publish_ip(&self) -> Option { + self.giganto_publish_ip.as_ref().map(ToString::to_string) } - - async fn syslog_server_ip(&self) -> Option { - self.syslog_server_ip.as_ref().map(ToString::to_string) + async fn giganto_graphql_ip(&self) -> Option { + self.giganto_graphql_ip.as_ref().map(ToString::to_string) + } + async fn reconverge_review_ip(&self) -> Option { + self.reconverge_review_ip.as_ref().map(ToString::to_string) + } + async fn reconverge_giganto_ip(&self) -> Option { + self.reconverge_giganto_ip.as_ref().map(ToString::to_string) + } + async fn hog_review_ip(&self) -> Option { + self.hog_review_ip.as_ref().map(ToString::to_string) + } + async fn hog_giganto_ip(&self) -> Option { + self.hog_giganto_ip.as_ref().map(ToString::to_string) } } @@ -328,20 +350,16 @@ impl Indexable for NodeStatus { #[derive(Serialize)] pub struct Setting { name: String, - nics: Vec, - accesslist: Option>, - disklimit: f32, + // ingest, publish address of Piglet. web_addr is not used + piglet: Option, // graphql, ingest, publish address of Giganto giganto: Option, - hog: bool, - ntp: Option, - piglet: bool, - reconverge: bool, + // ingest, publish address of Hog. web_addr is not used + hog: Option, + // ingest, publish address of REconverge. web_addr is not used + reconverge: Option, // rpc, web address of REview. pub_addr is not used - review: Option, - ssh: Option, - // True for UDP, False for TCP - syslog: Option>, + review: Option, } #[derive(Serialize)] @@ -349,6 +367,13 @@ pub struct ServerAddress { web_addr: SocketAddr, rpc_addr: SocketAddr, pub_addr: SocketAddr, + ing_addr: SocketAddr, +} + +#[derive(Serialize)] +pub struct ServerPort { + rpc_port: PortNumber, + web_port: PortNumber, } #[derive(Clone, Deserialize, Serialize, SimpleObject)] @@ -392,20 +417,45 @@ mod tests { customerId: 0, description: "This is the admin node running review.", hostname: "admin.aice-security.com", - nics: [{ - name: "eth0", - interface: "192.168.0.1/24", - gateway: "192.168.0.254" - }], - sshPort: 22, review: true, - reviewNics: ["eth0"], reviewPort: 38390, - reviewWebPort: 38391, + reviewWebPort: 8443, piglet: false, + pigletGigantoIp: null, + pigletGigantoPort: null, + pigletReviewIp: null, + pigletReviewPort: null, + savePackets: false, + http: false, + office: false, + exe: false, + pdf: false, + html: false, + txt: false, + smtpEml: false, + ftp: false, giganto: false, + gigantoIngestionIp: null, + gigantoIngestionPort: null, + gigantoPublishIp: null, + gigantoPublishPort: null, + gigantoGraphqlIp: null, + gigantoGraphqlPort: null, + retentionPeriod: null, reconverge: false, - hog: false + reconvergeReviewIp: null, + reconvergeReviewPort: null, + reconvergeGigantoIp: null, + reconvergeGigantoPort: null, + hog: false, + hogReviewIp: null, + hogReviewPort: null, + hogGigantoIp: null, + hogGigantoPort: null, + protocols: false, + protocolList: {}, + sensors: false, + sensorList: {}, ) }"#, ) @@ -415,53 +465,103 @@ mod tests { let res = schema .execute( r#"mutation { - replaceNode( + updateNode( id: "0" old: { name: "admin node", customerId: 0, description: "This is the admin node running review.", hostname: "admin.aice-security.com", - nics: [{ - name: "eth0", - interface: "192.168.0.1/24", - gateway: "192.168.0.254" - }], - sshPort: 22, review: true, - reviewNics: ["eth0"], reviewPort: 38390, - reviewWebPort: 38391, + reviewWebPort: 8443, piglet: false, + pigletGigantoIp: null, + pigletGigantoPort: null, + pigletReviewIp: null, + pigletReviewPort: null, + savePackets: false, + http: false, + office: false, + exe: false, + pdf: false, + html: false, + txt: false, + smtpEml: false, + ftp: false, giganto: false, + gigantoIngestionIp: null, + gigantoIngestionPort: null, + gigantoPublishIp: null, + gigantoPublishPort: null, + gigantoGraphqlIp: null, + gigantoGraphqlPort: null, + retentionPeriod: null, reconverge: false, - hog: false + reconvergeReviewIp: null, + reconvergeReviewPort: null, + reconvergeGigantoIp: null, + reconvergeGigantoPort: null, + hog: false, + hogReviewIp: null, + hogReviewPort: null, + hogGigantoIp: null, + hogGigantoPort: null, + protocols: false, + protocolList: {}, + sensors: false, + sensorList: {}, }, new: { name: "AdminNode", customerId: 0, description: "This is the admin node running review.", hostname: "admin.aice-security.com", - nics: [{ - name: "eth0", - interface: "192.168.0.1/24", - gateway: "192.168.0.254" - }], - sshPort: 23, review: true, - reviewNics: ["eth0"], reviewPort: 38391, - reviewWebPort: 38392, + reviewWebPort: 8443, piglet: false, + pigletGigantoIp: null, + pigletGigantoPort: null, + pigletReviewIp: null, + pigletReviewPort: null, + savePackets: false, + http: false, + office: false, + exe: false, + pdf: false, + html: false, + txt: false, + smtpEml: false, + ftp: false, giganto: false, + gigantoIngestionIp: null, + gigantoIngestionPort: null, + gigantoPublishIp: null, + gigantoPublishPort: null, + gigantoGraphqlIp: null, + gigantoGraphqlPort: null, + retentionPeriod: null, reconverge: false, - hog: false + reconvergeReviewIp: null, + reconvergeReviewPort: null, + reconvergeGigantoIp: null, + reconvergeGigantoPort: null, + hog: false, + hogReviewIp: null, + hogReviewPort: null, + hogGigantoIp: null, + hogGigantoPort: null, + protocols: false, + protocolList: {}, + sensors: false, + sensorList: {}, } ) }"#, ) .await; - assert_eq!(res.data.to_string(), r#"{replaceNode: "0"}"#); + assert_eq!(res.data.to_string(), r#"{updateNode: "0"}"#); let res = schema .execute( diff --git a/src/graphql/node/crud.rs b/src/graphql/node/crud.rs index 95e5d397..4075cdea 100644 --- a/src/graphql/node/crud.rs +++ b/src/graphql/node/crud.rs @@ -4,8 +4,8 @@ use crate::graphql::{customer::broadcast_customer_networks, get_customer_network use super::{ super::{Role, RoleGuard}, - Nic, NicInput, Node, NodeInput, NodeMutation, NodeQuery, NodeTotalCount, PortNumber, - ServerAddress, Setting, + Node, NodeInput, NodeMutation, NodeQuery, NodeTotalCount, PortNumber, ServerAddress, + ServerPort, Setting, }; use async_graphql::{ connection::{query, Connection, EmptyFields}, @@ -15,7 +15,10 @@ use async_graphql::{ use bincode::Options; use chrono::Utc; use review_database::{Indexed, IterableMap, Store}; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, +}; use tracing::error; #[Object] @@ -72,147 +75,195 @@ impl NodeMutation { customer_id: ID, description: String, hostname: String, - nics: Vec, - disk_usage_limit: Option, - allow_access_from: Option>, - - review_id: Option, - - // TODO: change to "ssh_port: Option" - ssh_port: PortNumber, - dns_server_ip: Option, - dns_server_port: Option, - syslog_server_ip: Option, - syslog_server_port: Option, review: bool, - review_nics: Option>, review_port: Option, review_web_port: Option, - ntp_server_ip: Option, - ntp_server_port: Option, piglet: bool, + piglet_giganto_ip: Option, + piglet_giganto_port: Option, + piglet_review_ip: Option, + piglet_review_port: Option, + save_packets: bool, + http: bool, + office: bool, + exe: bool, + pdf: bool, + html: bool, + txt: bool, + smtp_eml: bool, + ftp: bool, giganto: bool, - giganto_ingestion_nics: Option>, + giganto_ingestion_ip: Option, giganto_ingestion_port: Option, - giganto_publish_nics: Option>, + giganto_publish_ip: Option, giganto_publish_port: Option, - giganto_graphql_nics: Option>, + giganto_graphql_ip: Option, giganto_graphql_port: Option, + retention_period: Option, reconverge: bool, + reconverge_review_ip: Option, + reconverge_review_port: Option, + reconverge_giganto_ip: Option, + reconverge_giganto_port: Option, hog: bool, + hog_review_ip: Option, + hog_review_port: Option, + hog_giganto_ip: Option, + hog_giganto_port: Option, + protocols: bool, + protocol_list: HashMap, + sensors: bool, + sensor_list: HashMap, ) -> Result { - let (id, customer_id, review) = { + let (id, customer_id) = { let store = crate::graphql::get_store(ctx).await?; let map = store.node_map(); let customer_id = customer_id .as_str() .parse::() .map_err(|_| "invalid customer ID")?; - let mut new_nics = Vec::::with_capacity(nics.len()); - for n in nics { - new_nics.push(n.try_into().map_err(|_| "invalid IP address: nic")?); - } - let original_count = new_nics.len(); - new_nics.sort_by(|a, b| a.name.cmp(&b.name)); - new_nics.dedup_by(|a, b| a.name == b.name); - if new_nics.len() != original_count { - return Err("duplicate network interface name".into()); - } - let allow_access_from = if let Some(allow_access_from) = allow_access_from { - let mut new_allow = Vec::::new(); - for ip in allow_access_from { - new_allow.push( - ip.as_str() - .parse::() - .map_err(|_| "invalid IP address: access")?, - ); - } - new_allow.sort_unstable(); - new_allow.dedup(); - Some(new_allow) + let piglet_giganto_ip = if let Some(ip) = piglet_giganto_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: storage")?, + ) + } else { + None + }; + let piglet_review_ip = if let Some(ip) = piglet_review_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: administration")?, + ) } else { None }; - let review_id = if let Some(id) = review_id { - Some(id.parse::().map_err(|_| "invalid review ID")?) + let giganto_ingestion_ip = if let Some(ip) = giganto_ingestion_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: receiving")?, + ) } else { None }; - let dns_server_ip = if let Some(ip) = dns_server_ip { + let giganto_publish_ip = if let Some(ip) = giganto_publish_ip { Some( ip.as_str() .parse::() - .map_err(|_| "invalid IP address: dns server")?, + .map_err(|_| "invalid IP address: sending")?, ) } else { None }; - let syslog_server_ip = if let Some(ip) = syslog_server_ip { + let giganto_graphql_ip = if let Some(ip) = giganto_graphql_ip { Some( ip.as_str() .parse::() - .map_err(|_| "invalid IP address: syslog server")?, + .map_err(|_| "invalid IP address: web")?, ) } else { None }; - let ntp_server_ip = if let Some(ip) = ntp_server_ip { + let reconverge_review_ip = if let Some(ip) = reconverge_review_ip { Some( ip.as_str() .parse::() - .map_err(|_| "invalid IP address: ntp server")?, + .map_err(|_| "invalid IP address: administration")?, ) } else { None }; + let reconverge_giganto_ip = if let Some(ip) = reconverge_giganto_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: storage")?, + ) + } else { + None + }; + let hog_review_ip = if let Some(ip) = hog_review_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: administration")?, + ) + } else { + None + }; + let hog_giganto_ip = if let Some(ip) = hog_giganto_ip { + Some( + ip.as_str() + .parse::() + .map_err(|_| "invalid IP address: storage")?, + ) + } else { + None + }; + let value = Node { id: u32::MAX, customer_id, name, description, hostname, - nics: new_nics, - disk_usage_limit, - allow_access_from, - - review_id, - - ssh_port, - dns_server_ip, - dns_server_port, - syslog_server_ip, - syslog_server_port, review, - review_nics, review_port, review_web_port, - ntp_server_ip, - ntp_server_port, piglet, + piglet_giganto_ip, + piglet_giganto_port, + piglet_review_ip, + piglet_review_port, + save_packets, + http, + office, + exe, + pdf, + html, + txt, + smtp_eml, + ftp, giganto, - giganto_ingestion_nics, + giganto_ingestion_ip, giganto_ingestion_port, - giganto_publish_nics, + giganto_publish_ip, giganto_publish_port, - giganto_graphql_nics, + giganto_graphql_ip, giganto_graphql_port, + retention_period, reconverge, + reconverge_review_ip, + reconverge_review_port, + reconverge_giganto_ip, + reconverge_giganto_port, hog, + hog_review_ip, + hog_review_port, + hog_giganto_ip, + hog_giganto_port, + protocols, + protocol_list, + sensors, + sensor_list, creation_time: Utc::now(), }; let id = map.insert(value)?; - (id, customer_id, review) + (id, customer_id) }; if review { let store = crate::graphql::get_store(ctx).await?; @@ -256,7 +307,7 @@ impl NodeMutation { /// Updates the given node, returning the node ID that was updated. #[graphql(guard = "RoleGuard::new(Role::SystemAdministrator) .or(RoleGuard::new(Role::SecurityAdministrator))")] - async fn replace_node( + async fn update_node( &self, ctx: &Context<'_>, id: ID, @@ -318,106 +369,105 @@ pub fn get_node_settings(db: &Store) -> Result> { .deserialize::(value.as_ref()) .map_err(|_| "invalid value in database")?; - let accesslist = node.allow_access_from.clone(); + let piglet: Option = if node.piglet { + Some(ServerAddress { + web_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + rpc_addr: SocketAddr::new( + node.piglet_review_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.piglet_review_port.unwrap_or_default(), + ), + pub_addr: SocketAddr::new( + node.piglet_giganto_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.piglet_giganto_port.unwrap_or_default(), + ), + ing_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + }) + } else { + None + }; let giganto = if node.giganto { Some(ServerAddress { - web_addr: get_sockaddr( - &node.nics, - &node.giganto_graphql_nics, + web_addr: SocketAddr::new( + node.giganto_graphql_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), node.giganto_graphql_port.unwrap_or_default(), ), - rpc_addr: get_sockaddr( - &node.nics, - &node.giganto_ingestion_nics, + rpc_addr: SocketAddr::new( + node.giganto_ingestion_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), node.giganto_ingestion_port.unwrap_or_default(), ), - pub_addr: get_sockaddr( - &node.nics, - &node.giganto_publish_nics, + pub_addr: SocketAddr::new( + node.giganto_publish_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), node.giganto_publish_port.unwrap_or_default(), ), + ing_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), }) } else { None }; - let ntp = if let Some(ntp_server_ip) = node.ntp_server_ip { - Some(SocketAddr::new( - ntp_server_ip, - node.ntp_server_port.unwrap_or_default(), - )) + + let review = if node.review { + Some(ServerPort { + rpc_port: node.review_port.unwrap_or_default(), + web_port: node.review_web_port.unwrap_or_default(), + }) } else { None }; - let review = if node.review { + let reconverge = if node.reconverge { Some(ServerAddress { - web_addr: get_sockaddr( - &node.nics, - &node.review_nics, - node.review_web_port.unwrap_or_default(), + web_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + rpc_addr: SocketAddr::new( + node.reconverge_review_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.reconverge_review_port.unwrap_or_default(), ), - rpc_addr: get_sockaddr( - &node.nics, - &node.review_nics, - node.review_port.unwrap_or_default(), + pub_addr: SocketAddr::new( + node.reconverge_giganto_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.reconverge_giganto_port.unwrap_or_default(), ), - pub_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + ing_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), }) } else { None }; - let ssh = if node.ssh_port > 0 { - Some(node.ssh_port) - } else { - None - }; - // TODO: multiple syslog servers should be configurable. - let syslog = if let Some(syslog_server_ip) = node.syslog_server_ip { - Some(vec![( - true, - SocketAddr::new( - syslog_server_ip, - node.syslog_server_port.unwrap_or_default(), + let hog = if node.hog { + Some(ServerAddress { + web_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + rpc_addr: SocketAddr::new( + node.hog_review_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.hog_review_port.unwrap_or_default(), + ), + pub_addr: SocketAddr::new( + node.hog_giganto_ip + .unwrap_or_else(|| IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))), + node.hog_giganto_port.unwrap_or_default(), ), - )]) + ing_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + }) } else { None }; output.push(Setting { name: node.hostname, - nics: node.nics, - accesslist, - disklimit: node.disk_usage_limit.unwrap_or_default(), + piglet, giganto, - hog: node.hog, - ntp, - piglet: node.piglet, - reconverge: node.reconverge, + hog, + reconverge, review, - ssh, - syslog, }); } Ok(output) } -// if target has multiple values, it assumes that the server address was chosen as 0.0.0.0 address -fn get_sockaddr(nics: &[Nic], target: &Option>, port: PortNumber) -> SocketAddr { - let mut ret = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), port); - if let Some(target_nics) = &target { - if target_nics.len() == 1 { - if let Some(first) = target_nics.first() { - ret = nics.iter().find(|nic| &nic.name == first).map_or( - SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), port), - |nic| SocketAddr::new(IpAddr::V4(nic.interface.addr()), port), - ); - } - } - } - ret -} - /// Returns the customer id of review node. /// /// # Errors diff --git a/src/graphql/node/input.rs b/src/graphql/node/input.rs index bf33a322..826b20f3 100644 --- a/src/graphql/node/input.rs +++ b/src/graphql/node/input.rs @@ -1,8 +1,8 @@ -use super::{NicInput, Node, PortNumber}; -use anyhow::{bail, Context as AnyhowContext}; +use super::{Node, PortNumber}; //NicInput +use anyhow::Context as AnyhowContext; //bail use async_graphql::{types::ID, InputObject, Result}; use review_database::IndexedMapUpdate; -use std::net::IpAddr; +use std::{collections::HashMap, net::IpAddr}; #[allow(clippy::module_name_repetitions)] #[derive(Clone, InputObject)] @@ -12,38 +12,50 @@ pub(super) struct NodeInput { pub customer_id: ID, pub description: String, pub hostname: String, - pub nics: Vec, - pub disk_usage_limit: Option, - pub allow_access_from: Option>, - - pub review_id: Option, - - pub ssh_port: PortNumber, - pub dns_server_ip: Option, - pub dns_server_port: Option, - pub syslog_server_ip: Option, - pub syslog_server_port: Option, pub review: bool, - pub review_nics: Option>, pub review_port: Option, pub review_web_port: Option, - pub ntp_server_ip: Option, - pub ntp_server_port: Option, pub piglet: bool, + pub piglet_giganto_ip: Option, + pub piglet_giganto_port: Option, + pub piglet_review_ip: Option, + pub piglet_review_port: Option, + pub save_packets: bool, + pub http: bool, + pub office: bool, + pub exe: bool, + pub pdf: bool, + pub html: bool, + pub txt: bool, + pub smtp_eml: bool, + pub ftp: bool, pub giganto: bool, - pub giganto_ingestion_nics: Option>, + pub giganto_ingestion_ip: Option, pub giganto_ingestion_port: Option, - pub giganto_publish_nics: Option>, + pub giganto_publish_ip: Option, pub giganto_publish_port: Option, - pub giganto_graphql_nics: Option>, + pub giganto_graphql_ip: Option, pub giganto_graphql_port: Option, + pub retention_period: Option, pub reconverge: bool, + pub reconverge_review_ip: Option, + pub reconverge_review_port: Option, + pub reconverge_giganto_ip: Option, + pub reconverge_giganto_port: Option, pub hog: bool, + pub hog_review_ip: Option, + pub hog_review_port: Option, + pub hog_giganto_ip: Option, + pub hog_giganto_port: Option, + pub protocols: bool, + pub protocol_list: HashMap, + pub sensors: bool, + pub sensor_list: HashMap, } impl IndexedMapUpdate for NodeInput { @@ -66,91 +78,91 @@ impl IndexedMapUpdate for NodeInput { value.description.push_str(&self.description); value.hostname.clear(); value.hostname.push_str(&self.hostname); - value.nics.clear(); - for n in &self.nics { - value - .nics - .push(n.try_into().context("invalid IP address: nic")?); - } - let original_count = value.nics.len(); - value.nics.sort_by(|a, b| a.name.cmp(&b.name)); - value.nics.dedup_by(|a, b| a.name == b.name); - if value.nics.len() != original_count { - bail!("duplicate network interface name"); - } - value.disk_usage_limit = self.disk_usage_limit; - if let Some(allow_access_from) = self.allow_access_from.as_ref() { - let mut allow = Vec::::with_capacity(allow_access_from.len()); - for ip in allow_access_from { - allow.push(ip.parse::().context("invalid IP address: access")?); - } - allow.sort_unstable(); - allow.dedup(); - value.allow_access_from = Some(allow); - } else { - value.allow_access_from = None; - } - // review server - value.review_id = if let Some(id) = self.review_id.as_ref() { - Some(id.parse::().context("invalid review ID")?) - } else { - None - }; + // review + value.review = self.review; + value.review_port = self.review_port; + value.review_web_port = self.review_web_port; - // communication - value.ssh_port = self.ssh_port; - value.dns_server_ip = if let Some(ip) = self.dns_server_ip.as_deref() { - Some( - ip.parse::() - .context("invalid IP address: dns server")?, - ) + // piglet + value.piglet = self.piglet; + value.piglet_giganto_ip = if let Some(ip) = self.piglet_giganto_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) } else { None }; - value.dns_server_port = self.dns_server_port; - value.syslog_server_ip = if let Some(ip) = self.syslog_server_ip.as_deref() { - Some( - ip.parse::() - .context("invalid IP address: syslog server")?, - ) + value.piglet_giganto_port = self.piglet_giganto_port; + value.piglet_review_ip = if let Some(ip) = self.piglet_review_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) } else { None }; - value.syslog_server_port = self.syslog_server_port; + value.piglet_review_port = self.piglet_review_port; + value.save_packets = self.save_packets; + value.http = self.http; + value.office = self.office; + value.exe = self.exe; + value.pdf = self.pdf; + value.html = self.html; + value.txt = self.txt; + value.smtp_eml = self.smtp_eml; + value.ftp = self.ftp; - // review - value.review = self.review; - value.review_nics = self.review_nics.clone(); - value.review_port = self.review_port; - value.review_web_port = self.review_web_port; - value.ntp_server_ip = if let Some(ip) = self.ntp_server_ip.as_deref() { - Some( - ip.parse::() - .context("invalid IP address: ntp server")?, - ) + // giganto + value.giganto = self.giganto; + value.giganto_ingestion_ip = if let Some(ip) = self.giganto_ingestion_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) } else { None }; - value.ntp_server_port = self.ntp_server_port; - - // piglet - value.piglet = self.piglet; - - // giganto - value.giganto = self.giganto; - value.giganto_ingestion_nics = self.giganto_ingestion_nics.clone(); value.giganto_ingestion_port = self.giganto_ingestion_port; - value.giganto_publish_nics = self.giganto_publish_nics.clone(); + value.giganto_publish_ip = if let Some(ip) = self.giganto_publish_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; value.giganto_publish_port = self.giganto_publish_port; - value.giganto_graphql_nics = self.giganto_graphql_nics.clone(); + value.giganto_graphql_ip = if let Some(ip) = self.giganto_graphql_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; value.giganto_graphql_port = self.giganto_graphql_port; + value.retention_period = self.retention_period; // reconverge value.reconverge = self.reconverge; + value.reconverge_review_ip = if let Some(ip) = self.reconverge_review_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; + value.reconverge_review_port = self.reconverge_review_port; + value.reconverge_giganto_ip = if let Some(ip) = self.reconverge_giganto_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; + value.reconverge_giganto_port = self.reconverge_giganto_port; // hog value.hog = self.hog; + value.hog_review_ip = if let Some(ip) = self.hog_review_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; + value.hog_review_port = self.hog_review_port; + value.hog_giganto_ip = if let Some(ip) = self.hog_giganto_ip.as_deref() { + Some(ip.parse::().context("invalid IP address")?) + } else { + None + }; + value.hog_giganto_port = self.hog_giganto_port; + value.protocols = self.protocols; + value.protocol_list = self.protocol_list.clone(); + value.sensors = self.sensors; + value.sensor_list = self.sensor_list.clone(); Ok(value) } @@ -174,70 +186,102 @@ impl IndexedMapUpdate for NodeInput { if self.hostname != value.hostname { return false; } - if self.nics.len() != value.nics.len() { + + // review + if self.review != value.review { return false; } - if !self - .nics - .iter() - .zip(value.nics.iter()) - .all(|(lhs, rhs)| lhs == rhs) - { + if self.review_port != value.review_port { return false; } - if self.disk_usage_limit != value.disk_usage_limit { + if self.review_web_port != value.review_web_port { return false; } - if let (Some(v), Some(value_allow_access_from)) = - (self.allow_access_from.as_ref(), &value.allow_access_from) + + // piglet + if self.piglet != value.piglet { + return false; + } + if let (Some(ip_self), Some(ip_value)) = + (self.piglet_giganto_ip.as_deref(), value.piglet_giganto_ip) { - if v.len() != value_allow_access_from.len() { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) + { return false; } - if !v - .iter() - .zip(value_allow_access_from.iter()) - .all(|(lhs, rhs)| lhs.parse::().map_or(true, |lhs| lhs == *rhs)) + } else if self.piglet_giganto_ip.is_some() || value.piglet_giganto_ip.is_some() { + return false; + } + if self.piglet_giganto_port != value.piglet_giganto_port { + return false; + } + if let (Some(ip_self), Some(ip_value)) = + (self.piglet_review_ip.as_deref(), value.piglet_review_ip) + { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) { return false; } - } else if self.allow_access_from.is_some() || value.allow_access_from.is_some() { + } else if self.piglet_review_ip.is_some() || value.piglet_review_ip.is_some() { return false; } - - // review server - let same_review_id = match (self.review_id.as_ref(), value.review_id.as_ref()) { - (Some(self_id), Some(value_id)) => self_id - .parse::() - .map_or(false, |self_id| self_id == *value_id), - (None, None) => true, - _ => false, - }; - if !same_review_id { + if self.piglet_review_port != value.piglet_review_port { + return false; + } + if self.save_packets != value.save_packets { + return false; + } + if self.http != value.http { + return false; + } + if self.office != value.office { + return false; + } + if self.exe != value.exe { + return false; + } + if self.pdf != value.pdf { + return false; + } + if self.html != value.html { + return false; + } + if self.txt != value.txt { + return false; + } + if self.smtp_eml != value.smtp_eml { + return false; + } + if self.ftp != value.ftp { return false; } - // communication - if self.ssh_port != value.ssh_port { + // giganto + if self.giganto != value.giganto { return false; } - if let (Some(ip_self), Some(ip_value)) = - (self.dns_server_ip.as_deref(), value.dns_server_ip) - { + if let (Some(ip_self), Some(ip_value)) = ( + self.giganto_ingestion_ip.as_deref(), + value.giganto_ingestion_ip, + ) { if ip_self .parse::() .map_or(true, |ip_self| ip_self != ip_value) { return false; } - } else if self.dns_server_ip.is_some() || value.dns_server_ip.is_some() { + } else if self.giganto_ingestion_ip.is_some() || value.giganto_ingestion_ip.is_some() { return false; } - if self.dns_server_port != value.dns_server_port { + if self.giganto_ingestion_port != value.giganto_ingestion_port { return false; } if let (Some(ip_self), Some(ip_value)) = - (self.syslog_server_ip.as_deref(), value.syslog_server_ip) + (self.giganto_publish_ip.as_deref(), value.giganto_publish_ip) { if ip_self .parse::() @@ -245,102 +289,115 @@ impl IndexedMapUpdate for NodeInput { { return false; } - } else if self.syslog_server_ip.is_some() || value.syslog_server_ip.is_some() { + } else if self.giganto_publish_ip.is_some() || value.giganto_publish_ip.is_some() { return false; } - if self.syslog_server_port != value.syslog_server_port { + if self.giganto_publish_port != value.giganto_publish_port { return false; } - - // review - if self.review != value.review { + if let (Some(ip_self), Some(ip_value)) = + (self.giganto_graphql_ip.as_deref(), value.giganto_graphql_ip) + { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) + { + return false; + } + } else if self.giganto_graphql_ip.is_some() || value.giganto_graphql_ip.is_some() { return false; } - if !nics_eq(self.review_nics.as_ref(), value.review_nics.as_ref()) { + if self.giganto_graphql_port != value.giganto_graphql_port { return false; } - if self.review_port != value.review_port { + if self.retention_period != value.retention_period { return false; } - if self.review_web_port != value.review_web_port { + + // reconverge + if self.reconverge != value.reconverge { return false; } - if let (Some(ip_self), Some(ip_value)) = - (self.ntp_server_ip.as_deref(), value.ntp_server_ip) - { + if let (Some(ip_self), Some(ip_value)) = ( + self.reconverge_review_ip.as_deref(), + value.reconverge_review_ip, + ) { if ip_self .parse::() .map_or(true, |ip_self| ip_self != ip_value) { return false; } - } else if self.ntp_server_ip.is_some() || value.ntp_server_ip.is_some() { + } else if self.reconverge_review_ip.is_some() || value.reconverge_review_ip.is_some() { return false; } - if self.ntp_server_port != value.ntp_server_port { + if self.reconverge_review_port != value.reconverge_review_port { return false; } - - // piglet - if self.piglet != value.piglet { + if let (Some(ip_self), Some(ip_value)) = ( + self.reconverge_giganto_ip.as_deref(), + value.reconverge_giganto_ip, + ) { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) + { + return false; + } + } else if self.reconverge_giganto_ip.is_some() || value.reconverge_giganto_ip.is_some() { + return false; + } + if self.reconverge_giganto_port != value.reconverge_giganto_port { return false; } - // giganto - if self.giganto != value.giganto { + // hog + if self.hog != value.hog { return false; } - if !nics_eq( - self.giganto_ingestion_nics.as_ref(), - value.giganto_ingestion_nics.as_ref(), - ) { + if let (Some(ip_self), Some(ip_value)) = + (self.hog_review_ip.as_deref(), value.hog_review_ip) + { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) + { + return false; + } + } else if self.hog_review_ip.is_some() || value.hog_review_ip.is_some() { return false; } - if self.giganto_ingestion_port != value.giganto_ingestion_port { + if self.hog_review_port != value.hog_review_port { return false; } - if !nics_eq( - self.giganto_publish_nics.as_ref(), - value.giganto_publish_nics.as_ref(), - ) { + if let (Some(ip_self), Some(ip_value)) = + (self.hog_giganto_ip.as_deref(), value.hog_giganto_ip) + { + if ip_self + .parse::() + .map_or(true, |ip_self| ip_self != ip_value) + { + return false; + } + } else if self.hog_giganto_ip.is_some() || value.hog_giganto_ip.is_some() { return false; } - if self.giganto_publish_port != value.giganto_publish_port { + if self.hog_giganto_port != value.hog_giganto_port { return false; } - if !nics_eq( - self.giganto_graphql_nics.as_ref(), - value.giganto_graphql_nics.as_ref(), - ) { + if self.protocols != value.protocols { return false; } - if self.giganto_graphql_port != value.giganto_graphql_port { + if self.protocol_list != value.protocol_list { return false; } - - // reconverge - if self.reconverge != value.reconverge { + if self.sensors != value.sensors { return false; } - - // hog - if self.hog != value.hog { + if self.sensor_list != value.sensor_list { return false; } true } } - -fn nics_eq(lhs: Option<&Vec>, rhs: Option<&Vec>) -> bool { - match (lhs, rhs) { - (None, None) => true, - (Some(l), Some(r)) => { - let (mut l, mut r) = (l.clone(), r.clone()); - l.sort_unstable(); - r.sort_unstable(); - l == r - } - _ => false, - } -}