fix: resolve critical performance issues and UI hangs by optimizing polling and PSS collection

This commit is contained in:
2026-02-23 01:47:13 +01:00
parent e6c5adca5e
commit 9ff0855102
3 changed files with 112 additions and 74 deletions

View File

@@ -8,10 +8,9 @@ use std::sync::Mutex;
use std::process::Command;
use tauri::State;
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use chrono::{DateTime, Utc};
use std::fs;
use rayon::prelude::*;
use clap::Parser;
use std::path::PathBuf;
use std::time::Duration;
@@ -21,27 +20,16 @@ use std::time::Duration;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Start profiling immediately without GUI
#[arg(short, long)]
headless: bool,
/// Duration of profiling in seconds (for headless mode)
#[arg(short, long, default_value_t = 30)]
duration: u64,
/// Interval between snapshots in milliseconds
#[arg(short, long, default_value_t = 1000)]
interval: u64,
/// Output path for the JSON report
#[arg(short, long)]
output: Option<PathBuf>,
/// Open the GUI with the collected data after headless profiling
#[arg(short, long)]
gui: bool,
/// Open an existing JSON report file in the GUI
#[arg(short, long)]
file: Option<PathBuf>,
}
@@ -88,6 +76,7 @@ struct AppState {
sys: Mutex<System>,
profiling: Mutex<ProfilingSession>,
initial_report: Mutex<Option<Report>>,
pss_cache: Mutex<HashMap<u32, (u64, DateTime<Utc>)>>,
}
// --- Report Structures ---
@@ -151,36 +140,68 @@ fn get_pss(pid: u32) -> Option<u64> {
None
}
fn is_syspulse_recursive(pid: u32, self_pid: u32, sys: &System) -> bool {
if pid == self_pid { return true; }
let mut current = sys.process(Pid::from_u32(pid));
while let Some(proc) = current {
if let Some(ppid) = proc.parent() {
if ppid.as_u32() == self_pid { return true; }
current = sys.process(ppid);
} else {
break;
fn get_syspulse_pids(self_pid: u32, sys: &System) -> HashSet<u32> {
let mut res = HashSet::new();
res.insert(self_pid);
let mut children_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (pid, p) in sys.processes() {
if let Some(ppid) = p.parent() {
children_map.entry(ppid.as_u32()).or_default().push(pid.as_u32());
}
}
false
let mut stack = vec![self_pid];
while let Some(pid) = stack.pop() {
if let Some(children) = children_map.get(&pid) {
for &c in children {
if res.insert(c) {
stack.push(c);
}
}
}
}
res
}
fn collect_snapshot(sys: &mut System, self_pid: u32) -> Snapshot {
fn collect_snapshot(
sys: &mut System,
self_pid: u32,
syspulse_pids: &HashSet<u32>,
pss_cache: &mut HashMap<u32, (u64, DateTime<Utc>)>,
collect_pss: bool
) -> Snapshot {
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let used_memory = sys.used_memory();
let now = Utc::now();
let processes: Vec<ProcessStats> = sys.processes().iter()
.par_bridge()
.filter_map(|(pid, p)| {
let rss = p.memory();
if rss == 0 { return None; }
.map(|(pid, p)| {
let pid_u32 = pid.as_u32();
let memory = if rss > 10 * 1024 * 1024 { get_pss(pid_u32).unwrap_or(rss) } else { rss };
Some(ProcessStats {
let rss = p.memory();
let is_syspulse = syspulse_pids.contains(&pid_u32);
let mut memory = rss;
// PSS collection is expensive. Throttle it heavily.
if collect_pss && (rss > 20 * 1024 * 1024 || is_syspulse) {
let needs_refresh = match pss_cache.get(&pid_u32) {
Some((_, last)) => (now - *last).num_seconds() > 5,
None => true,
};
if needs_refresh {
if let Some(pss) = get_pss(pid_u32) {
pss_cache.insert(pid_u32, (pss, now));
memory = pss;
}
} else if let Some((cached_pss, _)) = pss_cache.get(&pid_u32) {
memory = *cached_pss;
}
}
ProcessStats {
pid: pid_u32,
parent_pid: p.parent().map(|pp| pp.as_u32()),
name: p.name().to_string_lossy().to_string(),
@@ -188,12 +209,12 @@ fn collect_snapshot(sys: &mut System, self_pid: u32) -> Snapshot {
memory,
status: format!("{:?}", p.status()),
user_id: p.user_id().map(|u| u.to_string()),
is_syspulse: is_syspulse_recursive(pid_u32, self_pid, &sys),
})
is_syspulse,
}
}).collect();
Snapshot {
timestamp: Utc::now(),
timestamp: now,
cpu_usage,
used_memory,
processes,
@@ -213,59 +234,51 @@ fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Repor
}
}).collect();
struct PidStats {
name: String,
history: Vec<ProcessHistoryPoint>,
peak_cpu: f32,
peak_mem: f32,
is_syspulse: bool,
is_zombie: bool,
}
let mut pid_map: HashMap<u32, PidStats> = HashMap::new();
let mut pid_map: HashMap<u32, (String, Option<u32>, Vec<ProcessHistoryPoint>, f32, f32, bool, bool)> = HashMap::new();
let num_snapshots = snapshots.len() as f32;
for snapshot in &snapshots {
for proc in &snapshot.processes {
let entry = pid_map.entry(proc.pid).or_insert_with(|| PidStats {
name: proc.name.clone(),
history: Vec::new(),
peak_cpu: 0.0,
peak_mem: 0.0,
is_syspulse: proc.is_syspulse,
is_zombie: false,
});
let entry = pid_map.entry(proc.pid).or_insert_with(|| (
proc.name.clone(),
proc.parent_pid,
Vec::new(),
0.0,
0.0,
proc.is_syspulse,
false
));
let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
entry.history.push(ProcessHistoryPoint {
entry.2.push(ProcessHistoryPoint {
time: snapshot.timestamp.format("%H:%M:%S").to_string(),
cpu_usage: proc.cpu_usage,
memory_mb: mem_mb,
});
if proc.cpu_usage > entry.peak_cpu { entry.peak_cpu = proc.cpu_usage; }
if mem_mb > entry.peak_mem { entry.peak_mem = mem_mb; }
if proc.status.contains("Zombie") { entry.is_zombie = true; }
if proc.cpu_usage > entry.3 { entry.3 = proc.cpu_usage; }
if mem_mb > entry.4 { entry.4 = mem_mb; }
if proc.status.contains("Zombie") { entry.6 = true; }
}
}
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, stats)| {
let total_cpu: f32 = stats.history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = stats.history.iter().map(|h| h.memory_mb).sum();
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, (name, _, history, peak_cpu, peak_mem, is_syspulse, is_zombie))| {
let total_cpu: f32 = history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = history.iter().map(|h| h.memory_mb).sum();
let mut warnings = Vec::new();
if stats.is_zombie { warnings.push("Zombie".to_string()); }
if stats.peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
if is_zombie { warnings.push("Zombie".to_string()); }
if peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
(pid, AggregatedProcess {
pid,
name: stats.name,
name,
avg_cpu: total_cpu / num_snapshots,
peak_cpu: stats.peak_cpu,
peak_cpu,
avg_memory_mb: total_mem / num_snapshots,
peak_memory_mb: stats.peak_mem,
peak_memory_mb: peak_mem,
inclusive_avg_cpu: 0.0,
inclusive_avg_memory_mb: 0.0,
instance_count: 1,
warnings,
history: stats.history,
is_syspulse: stats.is_syspulse,
history,
is_syspulse,
children: Vec::new(),
})
}).collect();
@@ -337,8 +350,14 @@ fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Repor
fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
let mut pss_cache = state.pss_cache.lock().unwrap();
let snapshot = collect_snapshot(&mut sys, std::process::id());
let self_pid = std::process::id();
let syspulse_pids = get_syspulse_pids(self_pid, &sys);
// NO PSS collection during live dashboard. ONLY during profiling or after.
// This is the primary cause of high CPU/I/O.
let snapshot = collect_snapshot(&mut sys, self_pid, &syspulse_pids, &mut pss_cache, profiling.is_active);
if profiling.is_active {
profiling.snapshots.push(snapshot.clone());
@@ -390,7 +409,8 @@ fn start_profiling(state: State<AppState>) {
fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
generate_report(profiling.start_time.unwrap_or(Utc::now()), profiling.snapshots.drain(..).collect())
let snapshots: Vec<Snapshot> = profiling.snapshots.drain(..).collect();
generate_report(profiling.start_time.unwrap_or(Utc::now()), snapshots)
}
#[tauri::command]
@@ -423,12 +443,14 @@ fn main() {
if cli.headless {
println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval);
let mut sys = System::new_all();
let mut pss_cache = HashMap::new();
let start_time = Utc::now();
let mut snapshots = Vec::new();
let self_pid = std::process::id();
for i in 0..(cli.duration * 1000 / cli.interval) {
snapshots.push(collect_snapshot(&mut sys, self_pid));
let syspulse_pids = get_syspulse_pids(self_pid, &sys);
snapshots.push(collect_snapshot(&mut sys, self_pid, &syspulse_pids, &mut pss_cache, true));
std::thread::sleep(Duration::from_millis(cli.interval));
if (i + 1) % (1000 / cli.interval) == 0 {
println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration);
@@ -457,6 +479,7 @@ fn main() {
snapshots: Vec::new(),
}),
initial_report: Mutex::new(initial_report),
pss_cache: Mutex::new(HashMap::new()),
})
.invoke_handler(tauri::generate_handler![
get_system_stats,