fix: resolve UI freezes by parallelizing PSS collection and optimizing system refreshing

This commit is contained in:
2026-02-23 01:12:26 +01:00
parent a5d226503d
commit 191aa494db
2 changed files with 96 additions and 114 deletions

View File

@@ -3,36 +3,15 @@
windows_subsystem = "windows"
)]
use sysinfo::{System, Pid};
use sysinfo::System;
use std::sync::Mutex;
use std::process::Command;
use tauri::State;
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use chrono::{DateTime, Utc};
use std::fs;
// --- Helper for Real Memory (PSS) on Linux ---
fn get_pss(pid: u32) -> Option<u64> {
// PSS (Proportional Set Size) is the most accurate "real" memory metric.
// It counts private memory + proportional share of shared libraries.
// smaps_rollup is a fast way to get this on modern Linux kernels.
let path = format!("/proc/{}/smaps_rollup", pid);
if let Ok(contents) = fs::read_to_string(path) {
for line in contents.lines() {
if line.starts_with("Pss:") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
if let Ok(kb) = parts[1].parse::<u64>() {
return Some(kb * 1024);
}
}
}
}
}
None
}
use rayon::prelude::*;
// --- Data Structures ---
@@ -119,21 +98,48 @@ struct AggregatedProcess {
children: Vec<AggregatedProcess>,
}
// --- Commands ---
// --- Helper for Real Memory (PSS) on Linux ---
fn is_descendant_of(pid: u32, target_pid: u32, sys: &System) -> bool {
let mut current_pid = Some(Pid::from_u32(pid));
while let Some(p) = current_pid {
if p.as_u32() == target_pid {
return true;
}
if let Some(process) = sys.process(p) {
current_pid = process.parent();
} else {
break;
fn get_pss(pid: u32) -> Option<u64> {
let path = format!("/proc/{}/smaps_rollup", pid);
if let Ok(contents) = fs::read_to_string(path) {
for line in contents.lines() {
if line.starts_with("Pss:") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
if let Ok(kb) = parts[1].parse::<u64>() {
return Some(kb * 1024);
}
}
}
}
}
false
None
}
// --- Commands ---
fn get_all_descendants(target_pid: u32, sys: &System) -> HashSet<u32> {
let mut descendants = HashSet::new();
let mut stack = vec![target_pid];
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (pid, process) in sys.processes() {
if let Some(parent) = process.parent() {
child_map.entry(parent.as_u32()).or_default().push(pid.as_u32());
}
}
while let Some(pid) = stack.pop() {
if let Some(children) = child_map.get(&pid) {
for &child in children {
if descendants.insert(child) {
stack.push(child);
}
}
}
}
descendants
}
#[tauri::command]
@@ -144,59 +150,46 @@ fn get_system_stats(
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
if minimal {
sys.refresh_cpu_all();
sys.refresh_memory();
} else {
sys.refresh_all();
}
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let self_pid = std::process::id();
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let total_memory = sys.total_memory();
let used_memory = sys.used_memory();
let mut processes: Vec<ProcessStats> = if minimal && !profiling.is_active {
Vec::new()
} else {
sys.processes().iter()
.map(|(pid, process)| {
let pid_u32 = pid.as_u32();
let memory = get_pss(pid_u32).unwrap_or_else(|| process.memory());
ProcessStats {
pid: pid_u32,
parent_pid: process.parent().map(|p| p.as_u32()),
name: process.name().to_string_lossy().to_string(),
cpu_usage: process.cpu_usage(),
memory,
status: format!("{:?}", process.status()),
user_id: process.user_id().map(|uid| uid.to_string()),
is_syspulse: is_descendant_of(pid_u32, self_pid, &sys),
}
}).collect()
};
let syspulse_set = get_all_descendants(self_pid, &sys);
let raw_processes: Vec<_> = sys.processes().iter()
.map(|(pid, p)| (
pid.as_u32(),
p.parent().map(|pp| pp.as_u32()),
p.name().to_string_lossy().to_string(),
p.cpu_usage(),
p.memory(),
format!("{:?}", p.status()),
p.user_id().map(|u| u.to_string())
))
.collect();
let processes: Vec<ProcessStats> = raw_processes.into_par_iter()
.map(|(pid, parent_pid, name, cpu, rss, status, uid)| {
let is_syspulse = pid == self_pid || syspulse_set.contains(&pid);
let memory = get_pss(pid).unwrap_or(rss);
ProcessStats {
pid,
parent_pid,
name,
cpu_usage: cpu,
memory,
status,
user_id: uid,
is_syspulse,
}
}).collect();
if profiling.is_active {
// Even in minimal mode, if recording we need the processes for the report
if minimal {
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
processes = sys.processes().iter()
.map(|(pid, process)| {
let pid_u32 = pid.as_u32();
let memory = get_pss(pid_u32).unwrap_or_else(|| process.memory());
ProcessStats {
pid: pid_u32,
parent_pid: process.parent().map(|p| p.as_u32()),
name: process.name().to_string_lossy().to_string(),
cpu_usage: process.cpu_usage(),
memory,
status: format!("{:?}", process.status()),
user_id: process.user_id().map(|uid| uid.to_string()),
is_syspulse: is_descendant_of(pid_u32, self_pid, &sys),
}
}).collect();
}
profiling.snapshots.push(Snapshot {
timestamp: Utc::now(),
cpu_usage: cpu_usage.clone(),
@@ -211,18 +204,22 @@ fn get_system_stats(
0
};
let mut display_processes = if minimal && !profiling.is_active {
Vec::new()
} else {
processes.clone()
};
if !minimal {
processes.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
processes.truncate(50);
} else if !profiling.is_active {
processes.clear();
display_processes.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
display_processes.truncate(50);
}
SystemStats {
cpu_usage,
total_memory,
used_memory,
processes,
processes: display_processes,
is_recording: profiling.is_active,
recording_duration,
}
@@ -300,7 +297,7 @@ fn stop_profiling(state: State<AppState>) -> Report {
}
}
// 3. Convert to nodes and build tree
// 3. Convert to nodes
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, stats)| {
let total_cpu: f32 = stats.history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = stats.history.iter().map(|h| h.memory_mb).sum();
@@ -316,8 +313,8 @@ fn stop_profiling(state: State<AppState>) -> Report {
peak_cpu: stats.peak_cpu,
avg_memory_mb: total_mem / num_snapshots,
peak_memory_mb: stats.peak_mem,
inclusive_avg_cpu: 0.0, // Calculated later
inclusive_avg_memory_mb: 0.0, // Calculated later
inclusive_avg_cpu: 0.0,
inclusive_avg_memory_mb: 0.0,
instance_count: 1,
warnings,
history: stats.history,
@@ -326,11 +323,8 @@ fn stop_profiling(state: State<AppState>) -> Report {
})
}).collect();
// 4. Link children to parents
let mut root_pids = Vec::new();
// 4. Build Tree
let mut child_to_parent = HashMap::new();
// We need to re-fetch parent info because we moved pid_map
for snapshot in &profiling.snapshots {
for proc in &snapshot.processes {
if let Some(ppid) = proc.parent_pid {
@@ -341,16 +335,16 @@ fn stop_profiling(state: State<AppState>) -> Report {
}
}
let pids: Vec<u32> = nodes.keys().cloned().collect();
for pid in pids {
if let Some(&_ppid) = child_to_parent.get(&pid) {
// Already handled in recursive aggregation or linked below
} else {
root_pids.push(pid);
}
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (&child, &parent) in &child_to_parent {
child_map.entry(parent).or_default().push(child);
}
// 5. Recursive function to calculate inclusive stats and build tree
let root_pids: Vec<u32> = nodes.keys()
.filter(|pid| !child_to_parent.contains_key(pid))
.cloned()
.collect();
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> Option<AggregatedProcess> {
let mut node = nodes.remove(&pid)?;
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
@@ -371,11 +365,6 @@ fn stop_profiling(state: State<AppState>) -> Report {
Some(node)
}
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (&child, &parent) in &child_to_parent {
child_map.entry(parent).or_default().push(child);
}
let mut final_roots = Vec::new();
for pid in root_pids {
if let Some(root_node) = build_node(pid, &mut nodes, &child_map) {
@@ -383,7 +372,6 @@ fn stop_profiling(state: State<AppState>) -> Report {
}
}
// Include any remaining orphan nodes as roots (e.g. if parent info was missing in snapshots)
let remaining_pids: Vec<u32> = nodes.keys().cloned().collect();
for pid in remaining_pids {
if let Some(node) = build_node(pid, &mut nodes, &child_map) {
@@ -391,7 +379,6 @@ fn stop_profiling(state: State<AppState>) -> Report {
}
}
// Sort roots by inclusive CPU
final_roots.sort_by(|a, b| b.inclusive_avg_cpu.partial_cmp(&a.inclusive_avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report {
@@ -405,12 +392,6 @@ fn stop_profiling(state: State<AppState>) -> Report {
#[tauri::command]
fn run_as_admin(command: String) -> Result<String, String> {
// Uses pkexec to run a command as root.
// CAUTION: This is a simple implementation. In production, validate inputs carefully.
// Splitting command for safety is hard, so we assume 'command' is a simple executable name or safe string.
// Example usage from frontend: "kill -9 1234" -> pkexec kill -9 1234
let output = Command::new("pkexec")
.arg("sh")
.arg("-c")