feat: rewrite for accurate PSS memory and smart user-focused hierarchy

This commit is contained in:
2026-02-23 20:02:23 +01:00
parent 3497543c4f
commit 9c39c8b35f
5 changed files with 305 additions and 97 deletions

43
src-tauri/src/commands.rs Normal file
View File

@@ -0,0 +1,43 @@
use tauri::State;
use crate::models::*;
use std::process::Command as StdCommand;
use crate::AppState;
#[tauri::command]
pub fn get_latest_stats(state: State<AppState>) -> GlobalStats {
state.monitor.get_latest()
}
#[tauri::command]
pub fn start_profiling(state: State<AppState>, target_pid: Option<u32>) {
state.monitor.start_profiling(target_pid);
}
#[tauri::command]
pub fn stop_profiling(state: State<AppState>) -> Option<ProfilingReport> {
state.monitor.stop_profiling()
}
#[tauri::command]
pub fn run_as_admin(command: String) -> Result<String, String> {
let output = StdCommand::new("pkexec")
.arg("sh")
.arg("-c")
.arg(&command)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
#[tauri::command]
pub fn save_report(report: ProfilingReport) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", chrono::Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}

View File

@@ -5,19 +5,13 @@
mod models;
mod monitor;
mod commands;
mod cli;
use tauri::State;
use crate::monitor::Monitor;
use crate::models::GlobalStats;
struct AppState {
monitor: Monitor,
}
#[tauri::command]
fn get_latest_stats(state: State<AppState>) -> GlobalStats {
state.monitor.get_latest()
pub struct AppState {
pub monitor: Monitor,
}
fn main() {
@@ -29,7 +23,11 @@ fn main() {
monitor
})
.invoke_handler(tauri::generate_handler![
get_latest_stats
commands::get_latest_stats,
commands::start_profiling,
commands::stop_profiling,
commands::run_as_admin,
commands::save_report
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@@ -1,31 +1,49 @@
use serde::Serialize;
use serde::{Serialize, Deserialize};
#[derive(Clone, Serialize, Debug)]
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct ProcessNode {
pub pid: u32,
pub name: String,
pub cpu_self: f32,
pub cpu_children: f32,
pub mem_rss: u64,
pub mem_children: u64,
pub cpu_inclusive: f32,
pub mem_pss_self: u64, // Proportional Set Size (Fair memory)
pub mem_pss_inclusive: u64, // Sum of self + children PSS
pub children: Vec<ProcessNode>,
}
impl ProcessNode {
pub fn total_cpu(&self) -> f32 {
self.cpu_self + self.cpu_children
}
pub fn total_mem(&self) -> u64 {
self.mem_rss + self.mem_children
}
}
#[derive(Clone, Serialize)]
#[derive(Clone, Serialize, Deserialize)]
pub struct GlobalStats {
pub cpu_total: f32,
pub mem_used: u64,
pub mem_total: u64,
pub process_tree: Vec<ProcessNode>,
pub smart_tree: Vec<ProcessNode>, // Top-level user-relevant processes
pub process_count: usize,
pub is_profiling: bool,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ProfilingReport {
pub start_time: String,
pub end_time: String,
pub duration_seconds: u64,
pub snapshots: Vec<ReportSnapshot>,
pub target_pid: Option<u32>,
pub aggregated_tree: Vec<ProcessNode>, // Final inclusive stats
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ReportSnapshot {
pub timestamp: String,
pub cpu_usage: f32,
pub mem_used: u64,
// Only stores top consumers or target info to keep JSON size sane
pub top_processes: Vec<ProcessSnapshot>,
}
#[derive(Clone, Serialize, Deserialize)]
pub struct ProcessSnapshot {
pub pid: u32,
pub name: String,
pub cpu: f32,
pub pss: u64,
}

View File

@@ -1,13 +1,38 @@
use sysinfo::System;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
use std::time::{Duration, Instant};
use std::collections::HashMap;
use chrono::Utc;
use std::fs;
use crate::models::*;
pub struct Monitor {
data: Arc<Mutex<GlobalStats>>,
running: Arc<Mutex<bool>>,
profiling_data: Arc<Mutex<Option<ProfilingSession>>>,
}
struct ProfilingSession {
start_time: chrono::DateTime<Utc>,
target_pid: Option<u32>,
snapshots: Vec<Snapshot>,
}
#[derive(Clone)]
struct Snapshot {
timestamp: chrono::DateTime<Utc>,
cpu_total: f32,
mem_used: u64,
processes: Vec<ProcessRawStats>,
}
#[derive(Clone)]
struct ProcessRawStats {
pid: u32,
ppid: Option<u32>,
name: String,
cpu: f32,
pss: u64,
}
impl Monitor {
@@ -16,13 +41,14 @@ impl Monitor {
cpu_total: 0.0,
mem_used: 0,
mem_total: 0,
process_tree: Vec::new(),
smart_tree: Vec::new(),
process_count: 0,
is_profiling: false,
};
Monitor {
data: Arc::new(Mutex::new(stats)),
running: Arc::new(Mutex::new(false)),
profiling_data: Arc::new(Mutex::new(None)),
}
}
@@ -31,112 +57,235 @@ impl Monitor {
lock.clone()
}
pub fn start_profiling(&self, target_pid: Option<u32>) {
let mut lock = self.profiling_data.lock().unwrap();
*lock = Some(ProfilingSession {
start_time: Utc::now(),
target_pid,
snapshots: Vec::new(),
});
}
pub fn stop_profiling(&self) -> Option<ProfilingReport> {
let mut lock = self.profiling_data.lock().unwrap();
if let Some(session) = lock.take() {
let end_time = Utc::now();
let duration = (end_time - session.start_time).num_seconds() as u64;
if let Some(last_snap) = session.snapshots.last() {
let snapshots_count = session.snapshots.len() as f32;
let mut avg_stats: HashMap<u32, (f32, f32)> = HashMap::new();
for snap in &session.snapshots {
for p in &snap.processes {
let entry = avg_stats.entry(p.pid).or_insert((0.0, 0.0));
entry.0 += p.cpu;
entry.1 += p.pss as f32;
}
}
let mut tree_nodes: HashMap<u32, ProcessNode> = last_snap.processes.iter().map(|p| {
let (cpu_sum, pss_sum) = avg_stats.get(&p.pid).cloned().unwrap_or((0.0, 0.0));
(p.pid, ProcessNode {
pid: p.pid,
name: p.name.clone(),
cpu_self: cpu_sum / snapshots_count,
cpu_inclusive: 0.0,
mem_pss_self: (pss_sum / snapshots_count) as u64,
mem_pss_inclusive: 0,
children: Vec::new(),
})
}).collect();
let ppid_map: HashMap<u32, u32> = last_snap.processes.iter()
.filter_map(|p| p.ppid.map(|ppid| (p.pid, ppid)))
.collect();
let report_tree = build_tree_recursive(&mut tree_nodes, &ppid_map, session.target_pid);
return Some(ProfilingReport {
start_time: session.start_time.to_rfc3339(),
end_time: end_time.to_rfc3339(),
duration_seconds: duration,
snapshots: session.snapshots.iter().map(|s| ReportSnapshot {
timestamp: s.timestamp.to_rfc3339(),
cpu_usage: s.cpu_total,
mem_used: s.mem_used,
top_processes: s.processes.iter()
.filter(|p| p.cpu > 1.0 || (session.target_pid == Some(p.pid)))
.take(20)
.map(|p| ProcessSnapshot { pid: p.pid, name: p.name.clone(), cpu: p.cpu, pss: p.pss })
.collect(),
}).collect(),
target_pid: session.target_pid,
aggregated_tree: report_tree,
});
}
}
None
}
pub fn start(&self) {
let data_ref = self.data.clone();
let running_ref = self.running.clone();
*running_ref.lock().unwrap() = true;
let profiling_ref = self.profiling_data.clone();
thread::spawn(move || {
let mut sys = System::new_all();
thread::sleep(Duration::from_millis(500));
sys.refresh_all();
let mut pss_cache: HashMap<u32, (u64, Instant)> = HashMap::new();
loop {
if !*running_ref.lock().unwrap() {
break;
}
let tick_start = Instant::now();
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let cpu_total = sys.global_cpu_usage();
let mem_used = sys.used_memory();
let mem_total = sys.total_memory();
let process_count = sys.processes().len();
let now = Utc::now();
let tree = build_process_tree(&sys);
let raw_processes: Vec<ProcessRawStats> = sys.processes().iter()
.map(|(pid, p)| {
let pid_u32 = pid.as_u32();
let rss = p.memory();
let pss = if rss > 10 * 1024 * 1024 {
match pss_cache.get(&pid_u32) {
Some((val, last)) if last.elapsed() < Duration::from_secs(5) => *val,
_ => {
let val = get_pss(pid_u32).unwrap_or(rss);
pss_cache.insert(pid_u32, (val, Instant::now()));
val
}
}
} else {
rss
};
ProcessRawStats {
pid: pid_u32,
ppid: p.parent().map(|pp| pp.as_u32()),
name: p.name().to_string_lossy().to_string(),
cpu: p.cpu_usage(),
pss,
}
}).collect();
let mut tree_nodes: HashMap<u32, ProcessNode> = raw_processes.iter().map(|p| {
(p.pid, ProcessNode {
pid: p.pid,
name: p.name.clone(),
cpu_self: p.cpu,
cpu_inclusive: 0.0,
mem_pss_self: p.pss,
mem_pss_inclusive: 0,
children: Vec::new(),
})
}).collect();
let ppid_map: HashMap<u32, u32> = raw_processes.iter()
.filter_map(|p| p.ppid.map(|ppid| (p.pid, ppid)))
.collect();
let smart_tree = build_tree_recursive(&mut tree_nodes, &ppid_map, None);
let mut prof_lock = profiling_ref.lock().unwrap();
let is_profiling = prof_lock.is_some();
if let Some(session) = prof_lock.as_mut() {
session.snapshots.push(Snapshot {
timestamp: now,
cpu_total: sys.global_cpu_usage(),
mem_used: sys.used_memory(),
processes: raw_processes.clone(),
});
}
{
let mut lock = data_ref.lock().unwrap();
lock.cpu_total = cpu_total;
lock.mem_used = mem_used;
lock.mem_total = mem_total;
lock.process_tree = tree;
lock.process_count = process_count;
lock.cpu_total = sys.global_cpu_usage();
lock.mem_used = sys.used_memory();
lock.mem_total = sys.total_memory();
lock.smart_tree = smart_tree;
lock.process_count = raw_processes.len();
lock.is_profiling = is_profiling;
}
thread::sleep(Duration::from_secs(1));
let elapsed = tick_start.elapsed();
if elapsed < Duration::from_secs(1) {
thread::sleep(Duration::from_secs(1) - elapsed);
}
}
});
}
}
fn build_process_tree(sys: &System) -> Vec<ProcessNode> {
let mut raw_nodes: HashMap<u32, ProcessNode> = HashMap::new();
let mut ppid_map: HashMap<u32, u32> = HashMap::new();
for (pid, proc) in sys.processes() {
let pid_u32 = pid.as_u32();
raw_nodes.insert(pid_u32, ProcessNode {
pid: pid_u32,
name: proc.name().to_string_lossy().to_string(),
cpu_self: proc.cpu_usage(),
cpu_children: 0.0,
mem_rss: proc.memory(),
mem_children: 0,
children: Vec::new(),
});
if let Some(parent) = proc.parent() {
ppid_map.insert(pid_u32, parent.as_u32());
fn get_pss(pid: u32) -> Option<u64> {
let path = format!("/proc/{}/smaps_rollup", pid);
if let Ok(contents) = fs::read_to_string(path) {
for line in contents.lines() {
if line.starts_with("Pss:") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
if let Ok(kb) = parts[1].parse::<u64>() {
return Some(kb * 1024);
}
}
}
}
}
None
}
let mut children_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (child, parent) in &ppid_map {
children_map.entry(*parent).or_default().push(*child);
fn build_tree_recursive(
nodes: &mut HashMap<u32, ProcessNode>,
ppid_map: &HashMap<u32, u32>,
target_pid: Option<u32>
) -> Vec<ProcessNode> {
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (child, parent) in ppid_map {
child_map.entry(*parent).or_default().push(*child);
}
let roots: Vec<u32> = raw_nodes.keys()
.filter(|pid| !ppid_map.contains_key(pid))
.cloned()
.collect();
let roots: Vec<u32> = if let Some(tpid) = target_pid {
if nodes.contains_key(&tpid) { vec![tpid] } else { vec![] }
} else {
nodes.keys()
.filter(|pid| {
let ppid = ppid_map.get(pid).cloned().unwrap_or(0);
ppid <= 1 || !nodes.contains_key(&ppid)
})
.cloned()
.collect()
};
fn build_recursive(
pid: u32,
nodes_map: &mut HashMap<u32, ProcessNode>,
child_map: &HashMap<u32, Vec<u32>>
) -> Option<ProcessNode> {
if let Some(mut node) = nodes_map.remove(&pid) {
fn assemble(pid: u32, nodes: &mut HashMap<u32, ProcessNode>, child_map: &HashMap<u32, Vec<u32>>) -> Option<ProcessNode> {
if let Some(mut node) = nodes.remove(&pid) {
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
let mut inc_cpu = node.cpu_self;
let mut inc_mem = node.mem_pss_self;
for c_pid in children_pids {
if let Some(child_node) = build_recursive(c_pid, nodes_map, child_map) {
node.cpu_children += child_node.total_cpu();
node.mem_children += child_node.total_mem();
if let Some(child_node) = assemble(c_pid, nodes, child_map) {
inc_cpu += child_node.cpu_inclusive;
inc_mem += child_node.mem_pss_inclusive;
node.children.push(child_node);
}
}
node.children.sort_by(|a, b| b.total_mem().cmp(&a.total_mem()));
node.cpu_inclusive = inc_cpu;
node.mem_pss_inclusive = inc_mem;
node.children.sort_by(|a, b| b.mem_pss_inclusive.cmp(&a.mem_pss_inclusive));
Some(node)
} else {
None
}
}
let mut tree = Vec::new();
for root_pid in roots {
if let Some(node) = build_recursive(root_pid, &mut raw_nodes, &children_map) {
tree.push(node);
let mut result = Vec::new();
for root in roots {
if let Some(node) = assemble(root, nodes, &child_map) {
result.push(node);
}
}
tree.sort_by(|a, b| b.total_cpu().partial_cmp(&a.total_cpu()).unwrap_or(std::cmp::Ordering::Equal));
tree
result.sort_by(|a, b| b.cpu_inclusive.partial_cmp(&a.cpu_inclusive).unwrap_or(std::cmp::Ordering::Equal));
result
}