diff --git a/src-tauri/src/main.rs b/src-tauri/src/main.rs index 220af3c..e2d308b 100644 --- a/src-tauri/src/main.rs +++ b/src-tauri/src/main.rs @@ -3,7 +3,7 @@ windows_subsystem = "windows" )] -use sysinfo::System; +use sysinfo::{System, Pid}; use std::sync::Mutex; use std::process::Command; use tauri::State; @@ -195,7 +195,6 @@ fn collect_snapshot( let is_syspulse = syspulse_pids.contains(&pid_u32); let mut memory = rss; - // PSS collection is expensive. Throttle it heavily. if collect_pss && (rss > 20 * 1024 * 1024 || is_syspulse) { let needs_refresh = match pss_cache.get(&pid_u32) { Some((_, last)) => (now - *last).num_seconds() > 5, @@ -239,11 +238,7 @@ fn generate_report(start_time: DateTime, snapshots: Vec, mode: Pr let timeline: Vec = snapshots.iter().map(|s| { let cpu_total = s.cpu_usage.iter().sum::() / s.cpu_usage.len() as f32; let mem_total_gb = s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0; - - let profiler_stats = s.processes.iter() - .filter(|p| p.is_syspulse) - .fold((0.0, 0), |acc, p| (acc.0 + p.cpu_usage, acc.1 + p.memory)); - + let profiler_stats = s.processes.iter().filter(|p| p.is_syspulse).fold((0.0, 0), |acc, p| (acc.0 + p.cpu_usage, acc.1 + p.memory)); TimelinePoint { time: s.timestamp.format("%H:%M:%S").to_string(), cpu_total, @@ -256,9 +251,7 @@ fn generate_report(start_time: DateTime, snapshots: Vec, mode: Pr let mut target_name = None; if let Some(tpid) = target_pid { if let Some(snapshot) = snapshots.first() { - if let Some(p) = snapshot.processes.iter().find(|p| p.pid == tpid) { - target_name = Some(p.name.clone()); - } + if let Some(p) = snapshot.processes.iter().find(|p| p.pid == tpid) { target_name = Some(p.name.clone()); } } } @@ -267,21 +260,9 @@ fn generate_report(start_time: DateTime, snapshots: Vec, mode: Pr for snapshot in &snapshots { for proc in &snapshot.processes { - let entry = pid_map.entry(proc.pid).or_insert_with(|| ( - proc.name.clone(), - proc.parent_pid, - Vec::new(), - 0.0, - 0.0, - proc.is_syspulse, - false - )); + let entry = pid_map.entry(proc.pid).or_insert_with(|| (proc.name.clone(), proc.parent_pid, Vec::new(), 0.0, 0.0, proc.is_syspulse, false)); let mem_mb = proc.memory as f32 / 1024.0 / 1024.0; - entry.2.push(ProcessHistoryPoint { - time: snapshot.timestamp.format("%H:%M:%S").to_string(), - cpu_usage: proc.cpu_usage, - memory_mb: mem_mb, - }); + entry.2.push(ProcessHistoryPoint { time: snapshot.timestamp.format("%H:%M:%S").to_string(), cpu_usage: proc.cpu_usage, memory_mb: mem_mb }); if proc.cpu_usage > entry.3 { entry.3 = proc.cpu_usage; } if mem_mb > entry.4 { entry.4 = mem_mb; } if proc.status.contains("Zombie") { entry.6 = true; } @@ -294,43 +275,20 @@ fn generate_report(start_time: DateTime, snapshots: Vec, mode: Pr let mut warnings = Vec::new(); if is_zombie { warnings.push("Zombie".to_string()); } if peak_cpu > 80.0 { warnings.push("High Peak".to_string()); } - (pid, AggregatedProcess { - pid, - name, - avg_cpu: total_cpu / num_snapshots, - peak_cpu, - avg_memory_mb: total_mem / num_snapshots, - peak_memory_mb: peak_mem, - inclusive_avg_cpu: 0.0, - inclusive_avg_memory_mb: 0.0, - instance_count: 1, - warnings, - history, - is_syspulse, - children: Vec::new(), - }) + (pid, AggregatedProcess { pid, name, avg_cpu: total_cpu / num_snapshots, peak_cpu, avg_memory_mb: total_mem / num_snapshots, peak_memory_mb: peak_mem, inclusive_avg_cpu: 0.0, inclusive_avg_memory_mb: 0.0, instance_count: 1, warnings, history, is_syspulse, children: Vec::new() }) }).collect(); let mut child_to_parent = HashMap::new(); for snapshot in &snapshots { for proc in &snapshot.processes { - if let Some(ppid) = proc.parent_pid { - if nodes.contains_key(&ppid) { - child_to_parent.insert(proc.pid, ppid); - } - } + if let Some(ppid) = proc.parent_pid { if nodes.contains_key(&ppid) { child_to_parent.insert(proc.pid, ppid); } } } } let mut child_map: HashMap> = HashMap::new(); - for (&child, &parent) in &child_to_parent { - child_map.entry(parent).or_default().push(child); - } + for (&child, &parent) in &child_to_parent { child_map.entry(parent).or_default().push(child); } - let root_pids: Vec = nodes.keys() - .filter(|pid| !child_to_parent.contains_key(pid)) - .cloned() - .collect(); + let root_pids: Vec = nodes.keys().filter(|pid| !child_to_parent.contains_key(pid)).cloned().collect(); fn build_node(pid: u32, nodes: &mut HashMap, child_map: &HashMap>) -> Option { let mut node = nodes.remove(&pid)?; @@ -350,97 +308,47 @@ fn generate_report(start_time: DateTime, snapshots: Vec, mode: Pr } let mut final_roots = Vec::new(); - for pid in root_pids { - if let Some(root_node) = build_node(pid, &mut nodes, &child_map) { - final_roots.push(root_node); - } - } + for pid in root_pids { if let Some(root_node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(root_node); } } let remaining_pids: Vec = nodes.keys().cloned().collect(); - for pid in remaining_pids { - if let Some(node) = build_node(pid, &mut nodes, &child_map) { - final_roots.push(node); - } - } + for pid in remaining_pids { if let Some(node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(node); } } if mode == ProfilingMode::Global { - // Global mode aggregation: Group by NAME, and flattened let mut name_groups: HashMap = HashMap::new(); - fn flatten_to_groups(node: AggregatedProcess, groups: &mut HashMap) { let entry = groups.entry(node.name.clone()).or_insert_with(|| { let mut base = node.clone(); - base.children = Vec::new(); - base.instance_count = 0; - base.inclusive_avg_cpu = 0.0; - base.inclusive_avg_memory_mb = 0.0; - base.avg_cpu = 0.0; - base.avg_memory_mb = 0.0; + base.children = Vec::new(); base.instance_count = 0; base.inclusive_avg_cpu = 0.0; base.inclusive_avg_memory_mb = 0.0; base.avg_cpu = 0.0; base.avg_memory_mb = 0.0; base }); - - entry.avg_cpu += node.avg_cpu; - entry.avg_memory_mb += node.avg_memory_mb; - entry.instance_count += 1; + entry.avg_cpu += node.avg_cpu; entry.avg_memory_mb += node.avg_memory_mb; entry.instance_count += 1; if node.peak_cpu > entry.peak_cpu { entry.peak_cpu = node.peak_cpu; } if node.peak_memory_mb > entry.peak_memory_mb { entry.peak_memory_mb = node.peak_memory_mb; } - - for child in node.children { - flatten_to_groups(child, groups); - } + for child in node.children { flatten_to_groups(child, groups); } } - - for root in final_roots { - flatten_to_groups(root, &mut name_groups); - } - + for root in final_roots { flatten_to_groups(root, &mut name_groups); } let mut flattened: Vec = name_groups.into_values().collect(); flattened.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal)); - - Report { - start_time: start_time.to_rfc3339(), - end_time: end_time.to_rfc3339(), - duration_seconds: duration, - mode, - target_name, - timeline, - aggregated_processes: flattened, - } + Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: flattened } } else { - // Targeted mode: Return only the target root(s) and their hierarchy let mut targeted_roots = Vec::new(); if let Some(tpid) = target_pid { fn extract_target(nodes: Vec, tpid: u32) -> (Vec, Option) { - let mut remaining = Vec::new(); - let mut found = None; + let mut remaining = Vec::new(); let mut found = None; for mut node in nodes { - if node.pid == tpid { - found = Some(node); - } else { + if node.pid == tpid { found = Some(node); } + else { let (new_children, sub_found) = extract_target(node.children, tpid); node.children = new_children; - if let Some(f) = sub_found { - found = Some(f); - } + if let Some(f) = sub_found { found = Some(f); } remaining.push(node); } } (remaining, found) } let (_, target_node) = extract_target(final_roots, tpid); - if let Some(t) = target_node { - targeted_roots.push(t); - } - } - - Report { - start_time: start_time.to_rfc3339(), - end_time: end_time.to_rfc3339(), - duration_seconds: duration, - mode, - target_name, - timeline, - aggregated_processes: targeted_roots, + if let Some(t) = target_node { targeted_roots.push(t); } } + Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: targeted_roots } } } @@ -451,41 +359,22 @@ fn get_system_stats(state: State, minimal: bool) -> SystemStats { let mut sys = state.sys.lock().unwrap(); let mut profiling = state.profiling.lock().unwrap(); let mut pss_cache = state.pss_cache.lock().unwrap(); - let self_pid = std::process::id(); let syspulse_pids = get_syspulse_pids(self_pid, &sys); - let snapshot = collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, profiling.is_active); - - if profiling.is_active { - profiling.snapshots.push(snapshot.clone()); - } - + if profiling.is_active { profiling.snapshots.push(snapshot.clone()); } let recording_duration = profiling.start_time.map(|s| (Utc::now() - s).num_seconds() as u64).unwrap_or(0); - - let display_processes = if minimal && !profiling.is_active { - Vec::new() - } else { + let display_processes = if minimal && !profiling.is_active { Vec::new() } else { let mut p = snapshot.processes.clone(); p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal)); p.truncate(50); p }; - - SystemStats { - cpu_usage: snapshot.cpu_usage, - total_memory: sys.total_memory(), - used_memory: sys.used_memory(), - processes: display_processes, - is_recording: profiling.is_active, - recording_duration, - } + SystemStats { cpu_usage: snapshot.cpu_usage, total_memory: sys.total_memory(), used_memory: sys.used_memory(), processes: display_processes, is_recording: profiling.is_active, recording_duration } } #[tauri::command] -fn get_initial_report(state: State) -> Option { - state.initial_report.lock().unwrap().clone() -} +fn get_initial_report(state: State) -> Option { state.initial_report.lock().unwrap().clone() } #[tauri::command] fn save_report(report: Report) -> Result { @@ -498,21 +387,13 @@ fn save_report(report: Report) -> Result { #[tauri::command] fn start_profiling(state: State) { let mut profiling = state.profiling.lock().unwrap(); - profiling.is_active = true; - profiling.mode = ProfilingMode::Global; - profiling.target_pid = None; - profiling.start_time = Some(Utc::now()); - profiling.snapshots.clear(); + profiling.is_active = true; profiling.mode = ProfilingMode::Global; profiling.target_pid = None; profiling.start_time = Some(Utc::now()); profiling.snapshots.clear(); } #[tauri::command] fn start_targeted_profiling(state: State, pid: u32) { let mut profiling = state.profiling.lock().unwrap(); - profiling.is_active = true; - profiling.mode = ProfilingMode::Targeted; - profiling.target_pid = Some(pid); - profiling.start_time = Some(Utc::now()); - profiling.snapshots.clear(); + profiling.is_active = true; profiling.mode = ProfilingMode::Targeted; profiling.target_pid = Some(pid); profiling.start_time = Some(Utc::now()); profiling.snapshots.clear(); } #[tauri::command] @@ -525,83 +406,33 @@ fn stop_profiling(state: State) -> Report { #[tauri::command] fn run_as_admin(command: String) -> Result { - let output = Command::new("pkexec") - .arg("sh") - .arg("-c") - .arg(&command) - .output() - .map_err(|e| e.to_string())?; - if output.status.success() { - Ok(String::from_utf8_lossy(&output.stdout).to_string()) - } else { - Err(String::from_utf8_lossy(&output.stderr).to_string()) - } + let output = Command::new("pkexec").arg("sh").arg("-c").arg(&command).output().map_err(|e| e.to_string())?; + if output.status.success() { Ok(String::from_utf8_lossy(&output.stdout).to_string()) } else { Err(String::from_utf8_lossy(&output.stderr).to_string()) } } fn main() { let cli = Cli::parse(); let mut initial_report: Option = None; - - if let Some(file_path) = cli.file { - if let Ok(content) = fs::read_to_string(file_path) { - if let Ok(report) = serde_json::from_str(&content) { - initial_report = Some(report); - } - } - } - + if let Some(file_path) = cli.file { if let Ok(content) = fs::read_to_string(file_path) { if let Ok(report) = serde_json::from_str(&content) { initial_report = Some(report); } } } if cli.headless { println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval); - let mut sys = System::new_all(); - let mut pss_cache = HashMap::new(); - let start_time = Utc::now(); - let mut snapshots = Vec::new(); - let self_pid = std::process::id(); - + let mut sys = System::new_all(); let mut pss_cache = HashMap::new(); let start_time = Utc::now(); let mut snapshots = Vec::new(); let self_pid = std::process::id(); for i in 0..(cli.duration * 1000 / cli.interval) { let syspulse_pids = get_syspulse_pids(self_pid, &sys); snapshots.push(collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, true)); std::thread::sleep(Duration::from_millis(cli.interval)); - if (i + 1) % (1000 / cli.interval) == 0 { - println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration); - } + if (i + 1) % (1000 / cli.interval) == 0 { println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration); } } - let report = generate_report(start_time, snapshots, ProfilingMode::Global, None); let json = serde_json::to_string_pretty(&report).unwrap(); let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S")))); fs::write(&out_path, json).expect("Failed to write report"); println!("✅ Report saved to: {:?}", out_path); - - if cli.gui { - initial_report = Some(report); - } else { - return; - } + if cli.gui { initial_report = Some(report); } else { return; } } - tauri::Builder::default() - .manage(AppState { - sys: Mutex::new(System::new_all()), - profiling: Mutex::new(ProfilingSession { - is_active: false, - mode: ProfilingMode::Global, - target_pid: None, - start_time: None, - snapshots: Vec::new(), - }), - initial_report: Mutex::new(initial_report), - pss_cache: Mutex::new(HashMap::new()), - }) - .invoke_handler(tauri::generate_handler![ - get_system_stats, - get_initial_report, - start_profiling, - start_targeted_profiling, - stop_profiling, - run_as_admin, - save_report - ]) + .manage(AppState { sys: Mutex::new(System::new_all()), profiling: Mutex::new(ProfilingSession { is_active: false, mode: ProfilingMode::Global, target_pid: None, start_time: None, snapshots: Vec::new() }), initial_report: Mutex::new(initial_report), pss_cache: Mutex::new(HashMap::new()) }) + .invoke_handler(tauri::generate_handler![get_system_stats, get_initial_report, start_profiling, start_targeted_profiling, stop_profiling, run_as_admin, save_report]) .run(tauri::generate_context!()) .expect("error while running tauri application"); }