feat: implement hierarchical process tree view and inclusive resource calculation

This commit is contained in:
2026-02-22 23:13:04 +01:00
parent ab4b2af800
commit d0398edb29

View File

@@ -26,6 +26,7 @@ struct SystemStats {
#[derive(Serialize, Clone, Debug)] #[derive(Serialize, Clone, Debug)]
struct ProcessStats { struct ProcessStats {
pid: u32, pid: u32,
parent_pid: Option<u32>,
name: String, name: String,
cpu_usage: f32, cpu_usage: f32,
memory: u64, memory: u64,
@@ -80,15 +81,19 @@ struct ProcessHistoryPoint {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
struct AggregatedProcess { struct AggregatedProcess {
pid: u32,
name: String, name: String,
avg_cpu: f32, avg_cpu: f32,
peak_cpu: f32, peak_cpu: f32,
avg_memory_mb: f32, avg_memory_mb: f32,
peak_memory_mb: f32, peak_memory_mb: f32,
inclusive_avg_cpu: f32,
inclusive_avg_memory_mb: f32,
instance_count: usize, instance_count: usize,
warnings: Vec<String>, warnings: Vec<String>,
history: Vec<ProcessHistoryPoint>, history: Vec<ProcessHistoryPoint>,
is_syspulse: bool, is_syspulse: bool,
children: Vec<AggregatedProcess>,
} }
// --- Commands --- // --- Commands ---
@@ -136,6 +141,7 @@ fn get_system_stats(
let pid_u32 = pid.as_u32(); let pid_u32 = pid.as_u32();
ProcessStats { ProcessStats {
pid: pid_u32, pid: pid_u32,
parent_pid: process.parent().map(|p| p.as_u32()),
name: process.name().to_string_lossy().to_string(), name: process.name().to_string_lossy().to_string(),
cpu_usage: process.cpu_usage(), cpu_usage: process.cpu_usage(),
memory: process.memory(), memory: process.memory(),
@@ -155,6 +161,7 @@ fn get_system_stats(
let pid_u32 = pid.as_u32(); let pid_u32 = pid.as_u32();
ProcessStats { ProcessStats {
pid: pid_u32, pid: pid_u32,
parent_pid: process.parent().map(|p| p.as_u32()),
name: process.name().to_string_lossy().to_string(), name: process.name().to_string_lossy().to_string(),
cpu_usage: process.cpu_usage(), cpu_usage: process.cpu_usage(),
memory: process.memory(), memory: process.memory(),
@@ -231,95 +238,136 @@ fn stop_profiling(state: State<AppState>) -> Report {
} }
}).collect(); }).collect();
// 2. Aggregate Processes over Time // 2. Aggregate RAW stats per PID
let mut process_map: HashMap<String, Vec<ProcessHistoryPoint>> = HashMap::new(); struct PidStats {
let mut peak_stats: HashMap<String, (f32, f32)> = HashMap::new(); // (Peak CPU, Peak Mem) name: String,
let mut unique_pids: HashMap<String, std::collections::HashSet<u32>> = HashMap::new(); parent_pid: Option<u32>,
let mut status_flags: HashMap<String, bool> = HashMap::new(); // Zombie check history: Vec<ProcessHistoryPoint>,
let mut syspulse_flags: HashMap<String, bool> = HashMap::new(); peak_cpu: f32,
peak_mem: f32,
is_syspulse: bool,
is_zombie: bool,
}
let mut pid_map: HashMap<u32, PidStats> = HashMap::new();
let num_snapshots = profiling.snapshots.len() as f32;
for snapshot in &profiling.snapshots { for snapshot in &profiling.snapshots {
let mut snapshot_procs: HashMap<String, (f32, u64)> = HashMap::new();
for proc in &snapshot.processes { for proc in &snapshot.processes {
let entry = snapshot_procs.entry(proc.name.clone()).or_default(); let entry = pid_map.entry(proc.pid).or_insert_with(|| PidStats {
entry.0 += proc.cpu_usage; name: proc.name.clone(),
entry.1 += proc.memory; parent_pid: proc.parent_pid,
history: Vec::new(),
unique_pids.entry(proc.name.clone()).or_default().insert(proc.pid); peak_cpu: 0.0,
if proc.status.contains("Zombie") { peak_mem: 0.0,
status_flags.insert(proc.name.clone(), true); is_syspulse: proc.is_syspulse,
} is_zombie: false,
if proc.is_syspulse { });
syspulse_flags.insert(proc.name.clone(), true);
}
}
// Record history for all processes seen in this snapshot let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
for (name, (cpu, mem)) in snapshot_procs { entry.history.push(ProcessHistoryPoint {
let hist_entry = process_map.entry(name.clone()).or_default();
let mem_mb = mem as f32 / 1024.0 / 1024.0;
hist_entry.push(ProcessHistoryPoint {
time: snapshot.timestamp.format("%H:%M:%S").to_string(), time: snapshot.timestamp.format("%H:%M:%S").to_string(),
cpu_usage: cpu, cpu_usage: proc.cpu_usage,
memory_mb: mem_mb, memory_mb: mem_mb,
}); });
let peaks = peak_stats.entry(name).or_insert((0.0, 0.0)); if proc.cpu_usage > entry.peak_cpu { entry.peak_cpu = proc.cpu_usage; }
if cpu > peaks.0 { peaks.0 = cpu; } if mem_mb > entry.peak_mem { entry.peak_mem = mem_mb; }
if mem_mb > peaks.1 { peaks.1 = mem_mb; } if proc.status.contains("Zombie") { entry.is_zombie = true; }
} }
} }
let mut aggregated_processes: Vec<AggregatedProcess> = Vec::new(); // 3. Convert to nodes and build tree
let num_snapshots = profiling.snapshots.len() as f32; let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, stats)| {
let total_cpu: f32 = stats.history.iter().map(|h| h.cpu_usage).sum();
for (name, history) in process_map { let total_mem: f32 = stats.history.iter().map(|h| h.memory_mb).sum();
let (peak_cpu, peak_mem) = peak_stats.get(&name).cloned().unwrap_or((0.0, 0.0));
let count = unique_pids.get(&name).map(|s| s.len()).unwrap_or(0);
let is_syspulse = syspulse_flags.get(&name).cloned().unwrap_or(false);
// Average over the whole SESSION (zeros for snapshots where not present)
let total_cpu_sum: f32 = history.iter().map(|h| h.cpu_usage).sum();
let total_mem_sum: f32 = history.iter().map(|h| h.memory_mb).sum();
let avg_cpu = if num_snapshots > 0.0 { total_cpu_sum / num_snapshots } else { 0.0 };
let avg_mem = if num_snapshots > 0.0 { total_mem_sum / num_snapshots } else { 0.0 };
let mut warnings = Vec::new(); let mut warnings = Vec::new();
if status_flags.get(&name).cloned().unwrap_or(false) { if stats.is_zombie { warnings.push("Zombie".to_string()); }
warnings.push("Zombie Process Detected".to_string()); if stats.peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
}
if peak_cpu > 80.0 {
warnings.push("High Peak Load".to_string());
}
if peak_mem > 2048.0 {
warnings.push("Heavy Memory usage".to_string());
}
aggregated_processes.push(AggregatedProcess { (pid, AggregatedProcess {
name, pid,
avg_cpu, name: stats.name,
peak_cpu, avg_cpu: total_cpu / num_snapshots,
avg_memory_mb: avg_mem, peak_cpu: stats.peak_cpu,
peak_memory_mb: peak_mem, avg_memory_mb: total_mem / num_snapshots,
instance_count: count, peak_memory_mb: stats.peak_mem,
inclusive_avg_cpu: 0.0, // Calculated later
inclusive_avg_memory_mb: 0.0, // Calculated later
instance_count: 1,
warnings, warnings,
history, history: stats.history,
is_syspulse, is_syspulse: stats.is_syspulse,
}); children: Vec::new(),
})
}).collect();
// 4. Link children to parents
let mut root_pids = Vec::new();
let mut child_to_parent = HashMap::new();
// We need to re-fetch parent info because we moved pid_map
for snapshot in &profiling.snapshots {
for proc in &snapshot.processes {
if let Some(ppid) = proc.parent_pid {
if nodes.contains_key(&ppid) {
child_to_parent.insert(proc.pid, ppid);
}
}
}
} }
// Sort by Average CPU descending let pids: Vec<u32> = nodes.keys().cloned().collect();
aggregated_processes.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal)); for pid in pids {
if let Some(&ppid) = child_to_parent.get(&pid) {
// Already handled in recursive aggregation or linked below
} else {
root_pids.push(pid);
}
}
// 5. Recursive function to calculate inclusive stats and build tree
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> AggregatedProcess {
let mut node = nodes.remove(&pid).unwrap();
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
let mut inc_cpu = node.avg_cpu;
let mut inc_mem = node.avg_memory_mb;
for c_pid in children_pids {
let child_node = build_node(c_pid, nodes, child_map);
inc_cpu += child_node.inclusive_avg_cpu;
inc_mem += child_node.inclusive_avg_memory_mb;
node.children.push(child_node);
}
node.inclusive_avg_cpu = inc_cpu;
node.inclusive_avg_memory_mb = inc_mem;
node
}
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (&child, &parent) in &child_to_parent {
child_map.entry(parent).or_default().push(child);
}
let mut final_roots = Vec::new();
for pid in root_pids {
if nodes.contains_key(&pid) {
final_roots.push(build_node(pid, &mut nodes, &child_map));
}
}
// Sort roots by inclusive CPU
final_roots.sort_by(|a, b| b.inclusive_avg_cpu.partial_cmp(&a.inclusive_avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report { Report {
start_time: start.to_rfc3339(), start_time: start.to_rfc3339(),
end_time: end.to_rfc3339(), end_time: end.to_rfc3339(),
duration_seconds: duration, duration_seconds: duration,
timeline, timeline,
aggregated_processes, aggregated_processes: final_roots,
} }
} }