use anyhow::{Context, Result}; use regex::Regex; use serde::{Deserialize, Serialize}; use std::path::{Path, PathBuf}; use std::time::Duration; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { /// OpenTelemetry export configuration #[serde(default)] pub otlp: OtlpConfig, /// Metrics collection configuration #[serde(default)] pub metrics: MetricsConfig, /// Collection interval #[serde(default = "default_collection_interval")] pub collection_interval_secs: u64, } impl Default for Config { fn default() -> Self { Self { otlp: OtlpConfig::default(), metrics: MetricsConfig::default(), collection_interval_secs: default_collection_interval(), } } } impl Config { /// Load configuration from file pub fn from_file(path: &Path) -> Result { let content = std::fs::read_to_string(path) .with_context(|| format!("Failed to read config file: {}", path.display()))?; let mut config: Config = toml::from_str(&content) .with_context(|| format!("Failed to parse config file: {}", path.display()))?; // Load process filter includes if configured if let Some(process_filter) = &config.metrics.process_filter { let config_dir = path.parent(); match process_filter.load_with_includes(config_dir) { Ok(loaded_filter) => { config.metrics.process_filter = Some(loaded_filter); } Err(e) => { tracing::warn!("Failed to load process filter include: {}", e); } } } config.validate()?; Ok(config) } /// Validate configuration pub fn validate(&self) -> Result<()> { if self.collection_interval_secs == 0 { anyhow::bail!("Collection interval must be greater than 0"); } self.otlp.validate()?; Ok(()) } pub fn collection_interval(&self) -> Duration { Duration::from_secs(self.collection_interval_secs) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct OtlpConfig { /// OTLP endpoint (e.g., "http://localhost:4317") #[serde(default = "default_endpoint")] pub endpoint: String, /// Export interval in seconds #[serde(default = "default_export_interval")] pub export_interval_secs: u64, /// Service name for the metrics #[serde(default = "default_service_name")] pub service_name: String, /// Service version #[serde(default = "default_service_version")] pub service_version: String, /// Additional resource attributes #[serde(default)] pub resource_attributes: std::collections::HashMap, /// Timeout for export operations in seconds #[serde(default = "default_timeout")] pub export_timeout_secs: u64, } impl Default for OtlpConfig { fn default() -> Self { Self { endpoint: default_endpoint(), export_interval_secs: default_export_interval(), service_name: default_service_name(), service_version: default_service_version(), resource_attributes: std::collections::HashMap::new(), export_timeout_secs: default_timeout(), } } } impl OtlpConfig { pub fn export_interval(&self) -> Duration { Duration::from_secs(self.export_interval_secs) } pub fn export_timeout(&self) -> Duration { Duration::from_secs(self.export_timeout_secs) } pub fn validate(&self) -> Result<()> { if self.endpoint.is_empty() { anyhow::bail!("OTLP endpoint cannot be empty"); } if !self.endpoint.starts_with("http://") && !self.endpoint.starts_with("https://") { anyhow::bail!("OTLP endpoint must be a valid HTTP/HTTPS URL"); } if self.export_interval_secs == 0 { anyhow::bail!("Export interval must be greater than 0"); } if self.service_name.is_empty() { anyhow::bail!("Service name cannot be empty"); } Ok(()) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MetricsConfig { /// Export CPU metrics #[serde(default = "default_true")] pub cpu: bool, /// Export memory metrics #[serde(default = "default_true")] pub memory: bool, /// Export network metrics #[serde(default = "default_true")] pub network: bool, /// Export disk metrics #[serde(default = "default_true")] pub disk: bool, /// Export process metrics #[serde(default)] pub processes: bool, /// Export temperature metrics #[serde(default = "default_true")] pub temperature: bool, /// Process filter configuration #[serde(default)] pub process_filter: Option, } impl Default for MetricsConfig { fn default() -> Self { Self { cpu: true, memory: true, network: true, disk: true, processes: false, temperature: true, process_filter: None, } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ProcessFilterConfig { /// Path to external file containing process filter (optional) #[serde(skip_serializing_if = "Option::is_none")] pub include: Option, /// Filter mode: "whitelist" or "blacklist" #[serde(skip_serializing_if = "Option::is_none")] pub filter_mode: Option, /// Maximum number of processes to report (top N by CPU usage) #[serde(default = "default_max_processes")] pub max_processes: usize, /// List of process names to filter (case-insensitive substring match) #[serde(default)] pub names: Vec, /// List of regex patterns to match process names #[serde(default)] pub patterns: Vec, /// List of process PIDs to filter #[serde(default)] pub pids: Vec, /// Compiled regex patterns (not serialized, built at runtime) #[serde(skip)] compiled_patterns: Option>, } impl ProcessFilterConfig { /// Load and merge process filter from include file if specified pub fn load_with_includes(& self, config_dir: Option<&Path>) -> Result { if let Some(include_path) = &self.include { // Resolve path relative to config directory if provided let full_path = if include_path.is_absolute() { include_path.clone() } else if let Some(dir) = config_dir { dir.join(include_path) } else { include_path.clone() }; // Read and parse the included file let content = std::fs::read_to_string(&full_path) .with_context(|| format!("Failed to read process filter file: {}", full_path.display()))?; let included: ProcessFilterConfig = toml::from_str(&content) .with_context(|| format!("Failed to parse process filter file: {}", full_path.display()))?; // Merge: included file takes precedence let mut merged = Self { include: None, filter_mode: included.filter_mode.or(self.filter_mode), max_processes: included.max_processes, names: if included.names.is_empty() { self.names.clone() } else { included.names }, patterns: if included.patterns.is_empty() { self.patterns.clone() } else { included.patterns }, pids: if included.pids.is_empty() { self.pids.clone() } else { included.pids }, compiled_patterns: None, }; merged.compile_patterns()?; Ok(merged) } else { let mut result = self.clone(); result.compile_patterns()?; Ok(result) } } /// Compile regex patterns from strings fn compile_patterns(&mut self) -> Result<()> { if self.patterns.is_empty() { self.compiled_patterns = None; return Ok(()); } let mut compiled = Vec::new(); for pattern in &self.patterns { let regex = Regex::new(pattern) .with_context(|| format!("Invalid regex pattern: {}", pattern))?; compiled.push(regex); } self.compiled_patterns = Some(compiled); Ok(()) } /// Check if a process should be included based on filter configuration pub fn should_include_process(&self, process_name: &str, process_pid: u32) -> bool { let filter_mode = match &self.filter_mode { Some(mode) => mode, None => return true, }; // Check if process matches the filter lists let matches_name = self .names .iter() .any(|name| process_name.to_lowercase().contains(&name.to_lowercase())); let matches_pattern = if let Some(patterns) = &self.compiled_patterns { patterns.iter().any(|regex| regex.is_match(process_name)) } else { false }; let matches_pid = self.pids.contains(&process_pid); let matches = matches_name || matches_pattern || matches_pid; match filter_mode { ProcessFilterMode::Whitelist => matches, ProcessFilterMode::Blacklist => !matches, } } } #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "lowercase")] pub enum ProcessFilterMode { Whitelist, Blacklist, } // Default functions fn default_endpoint() -> String { "http://localhost:4317".to_string() } fn default_export_interval() -> u64 { 10 } fn default_collection_interval() -> u64 { 5 } fn default_service_name() -> String { "symon".to_string() } fn default_service_version() -> String { env!("CARGO_PKG_VERSION").to_string() } fn default_timeout() -> u64 { 30 } fn default_true() -> bool { true } fn default_max_processes() -> usize { 10 }