472 lines
14 KiB
Rust
472 lines
14 KiB
Rust
#![cfg_attr(
|
|
all(not(debug_assertions), target_os = "windows"),
|
|
windows_subsystem = "windows"
|
|
)]
|
|
|
|
use sysinfo::{System, Pid};
|
|
use std::sync::Mutex;
|
|
use std::process::Command;
|
|
use tauri::State;
|
|
use serde::{Serialize, Deserialize};
|
|
use std::collections::HashMap;
|
|
use chrono::{DateTime, Utc};
|
|
use std::fs;
|
|
use rayon::prelude::*;
|
|
use clap::Parser;
|
|
use std::path::PathBuf;
|
|
use std::time::Duration;
|
|
|
|
// --- CLI ---
|
|
|
|
#[derive(Parser, Debug)]
|
|
#[command(author, version, about, long_about = None)]
|
|
struct Cli {
|
|
/// Start profiling immediately without GUI
|
|
#[arg(short, long)]
|
|
headless: bool,
|
|
|
|
/// Duration of profiling in seconds (for headless mode)
|
|
#[arg(short, long, default_value_t = 30)]
|
|
duration: u64,
|
|
|
|
/// Interval between snapshots in milliseconds
|
|
#[arg(short, long, default_value_t = 1000)]
|
|
interval: u64,
|
|
|
|
/// Output path for the JSON report
|
|
#[arg(short, long)]
|
|
output: Option<PathBuf>,
|
|
|
|
/// Open the GUI with the collected data after headless profiling
|
|
#[arg(short, long)]
|
|
gui: bool,
|
|
|
|
/// Open an existing JSON report file in the GUI
|
|
#[arg(short, long)]
|
|
file: Option<PathBuf>,
|
|
}
|
|
|
|
// --- Data Structures ---
|
|
|
|
#[derive(Serialize, Clone)]
|
|
struct SystemStats {
|
|
cpu_usage: Vec<f32>,
|
|
total_memory: u64,
|
|
used_memory: u64,
|
|
processes: Vec<ProcessStats>,
|
|
is_recording: bool,
|
|
recording_duration: u64,
|
|
}
|
|
|
|
#[derive(Serialize, Clone, Debug)]
|
|
struct ProcessStats {
|
|
pid: u32,
|
|
parent_pid: Option<u32>,
|
|
name: String,
|
|
cpu_usage: f32,
|
|
memory: u64,
|
|
status: String,
|
|
user_id: Option<String>,
|
|
is_syspulse: bool,
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
struct Snapshot {
|
|
timestamp: DateTime<Utc>,
|
|
cpu_usage: Vec<f32>,
|
|
used_memory: u64,
|
|
processes: Vec<ProcessStats>,
|
|
}
|
|
|
|
struct ProfilingSession {
|
|
is_active: bool,
|
|
start_time: Option<DateTime<Utc>>,
|
|
snapshots: Vec<Snapshot>,
|
|
}
|
|
|
|
struct AppState {
|
|
sys: Mutex<System>,
|
|
profiling: Mutex<ProfilingSession>,
|
|
initial_report: Mutex<Option<Report>>,
|
|
}
|
|
|
|
// --- Report Structures ---
|
|
|
|
#[derive(Serialize, Deserialize, Clone)]
|
|
struct Report {
|
|
start_time: String,
|
|
end_time: String,
|
|
duration_seconds: i64,
|
|
timeline: Vec<TimelinePoint>,
|
|
aggregated_processes: Vec<AggregatedProcess>,
|
|
}
|
|
|
|
#[derive(Serialize, Deserialize, Clone)]
|
|
struct TimelinePoint {
|
|
time: String,
|
|
avg_cpu: f32,
|
|
memory_gb: f32,
|
|
}
|
|
|
|
#[derive(Serialize, Deserialize, Clone)]
|
|
struct ProcessHistoryPoint {
|
|
time: String,
|
|
cpu_usage: f32,
|
|
memory_mb: f32,
|
|
}
|
|
|
|
#[derive(Serialize, Deserialize, Clone)]
|
|
struct AggregatedProcess {
|
|
pid: u32,
|
|
name: String,
|
|
avg_cpu: f32,
|
|
peak_cpu: f32,
|
|
avg_memory_mb: f32,
|
|
peak_memory_mb: f32,
|
|
inclusive_avg_cpu: f32,
|
|
inclusive_avg_memory_mb: f32,
|
|
instance_count: usize,
|
|
warnings: Vec<String>,
|
|
history: Vec<ProcessHistoryPoint>,
|
|
is_syspulse: bool,
|
|
children: Vec<AggregatedProcess>,
|
|
}
|
|
|
|
// --- Helpers ---
|
|
|
|
fn get_pss(pid: u32) -> Option<u64> {
|
|
let path = format!("/proc/{}/smaps_rollup", pid);
|
|
if let Ok(contents) = fs::read_to_string(path) {
|
|
for line in contents.lines() {
|
|
if line.starts_with("Pss:") {
|
|
let parts: Vec<&str> = line.split_whitespace().collect();
|
|
if parts.len() >= 2 {
|
|
if let Ok(kb) = parts[1].parse::<u64>() {
|
|
return Some(kb * 1024);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
None
|
|
}
|
|
|
|
fn is_syspulse_recursive(pid: u32, self_pid: u32, sys: &System) -> bool {
|
|
if pid == self_pid { return true; }
|
|
let mut current = sys.process(Pid::from_u32(pid));
|
|
while let Some(proc) = current {
|
|
if let Some(ppid) = proc.parent() {
|
|
if ppid.as_u32() == self_pid { return true; }
|
|
current = sys.process(ppid);
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
false
|
|
}
|
|
|
|
fn collect_snapshot(sys: &mut System, self_pid: u32) -> Snapshot {
|
|
sys.refresh_cpu_all();
|
|
sys.refresh_memory();
|
|
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
|
|
|
|
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
|
|
let used_memory = sys.used_memory();
|
|
|
|
let processes: Vec<ProcessStats> = sys.processes().iter()
|
|
.par_bridge()
|
|
.filter_map(|(pid, p)| {
|
|
let rss = p.memory();
|
|
if rss == 0 { return None; }
|
|
let pid_u32 = pid.as_u32();
|
|
let memory = if rss > 10 * 1024 * 1024 { get_pss(pid_u32).unwrap_or(rss) } else { rss };
|
|
Some(ProcessStats {
|
|
pid: pid_u32,
|
|
parent_pid: p.parent().map(|pp| pp.as_u32()),
|
|
name: p.name().to_string_lossy().to_string(),
|
|
cpu_usage: p.cpu_usage(),
|
|
memory,
|
|
status: format!("{:?}", p.status()),
|
|
user_id: p.user_id().map(|u| u.to_string()),
|
|
is_syspulse: is_syspulse_recursive(pid_u32, self_pid, &sys),
|
|
})
|
|
}).collect();
|
|
|
|
Snapshot {
|
|
timestamp: Utc::now(),
|
|
cpu_usage,
|
|
used_memory,
|
|
processes,
|
|
}
|
|
}
|
|
|
|
fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Report {
|
|
let end_time = Utc::now();
|
|
let duration = (end_time - start_time).num_seconds();
|
|
|
|
let timeline: Vec<TimelinePoint> = snapshots.iter().map(|s| {
|
|
let avg_cpu = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
|
|
TimelinePoint {
|
|
time: s.timestamp.format("%H:%M:%S").to_string(),
|
|
avg_cpu,
|
|
memory_gb: s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0,
|
|
}
|
|
}).collect();
|
|
|
|
struct PidStats {
|
|
name: String,
|
|
history: Vec<ProcessHistoryPoint>,
|
|
peak_cpu: f32,
|
|
peak_mem: f32,
|
|
is_syspulse: bool,
|
|
is_zombie: bool,
|
|
}
|
|
|
|
let mut pid_map: HashMap<u32, PidStats> = HashMap::new();
|
|
let num_snapshots = snapshots.len() as f32;
|
|
|
|
for snapshot in &snapshots {
|
|
for proc in &snapshot.processes {
|
|
let entry = pid_map.entry(proc.pid).or_insert_with(|| PidStats {
|
|
name: proc.name.clone(),
|
|
history: Vec::new(),
|
|
peak_cpu: 0.0,
|
|
peak_mem: 0.0,
|
|
is_syspulse: proc.is_syspulse,
|
|
is_zombie: false,
|
|
});
|
|
let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
|
|
entry.history.push(ProcessHistoryPoint {
|
|
time: snapshot.timestamp.format("%H:%M:%S").to_string(),
|
|
cpu_usage: proc.cpu_usage,
|
|
memory_mb: mem_mb,
|
|
});
|
|
if proc.cpu_usage > entry.peak_cpu { entry.peak_cpu = proc.cpu_usage; }
|
|
if mem_mb > entry.peak_mem { entry.peak_mem = mem_mb; }
|
|
if proc.status.contains("Zombie") { entry.is_zombie = true; }
|
|
}
|
|
}
|
|
|
|
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, stats)| {
|
|
let total_cpu: f32 = stats.history.iter().map(|h| h.cpu_usage).sum();
|
|
let total_mem: f32 = stats.history.iter().map(|h| h.memory_mb).sum();
|
|
let mut warnings = Vec::new();
|
|
if stats.is_zombie { warnings.push("Zombie".to_string()); }
|
|
if stats.peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
|
|
(pid, AggregatedProcess {
|
|
pid,
|
|
name: stats.name,
|
|
avg_cpu: total_cpu / num_snapshots,
|
|
peak_cpu: stats.peak_cpu,
|
|
avg_memory_mb: total_mem / num_snapshots,
|
|
peak_memory_mb: stats.peak_mem,
|
|
inclusive_avg_cpu: 0.0,
|
|
inclusive_avg_memory_mb: 0.0,
|
|
instance_count: 1,
|
|
warnings,
|
|
history: stats.history,
|
|
is_syspulse: stats.is_syspulse,
|
|
children: Vec::new(),
|
|
})
|
|
}).collect();
|
|
|
|
let mut child_to_parent = HashMap::new();
|
|
for snapshot in &snapshots {
|
|
for proc in &snapshot.processes {
|
|
if let Some(ppid) = proc.parent_pid {
|
|
if nodes.contains_key(&ppid) {
|
|
child_to_parent.insert(proc.pid, ppid);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
|
|
for (&child, &parent) in &child_to_parent {
|
|
child_map.entry(parent).or_default().push(child);
|
|
}
|
|
|
|
let root_pids: Vec<u32> = nodes.keys()
|
|
.filter(|pid| !child_to_parent.contains_key(pid))
|
|
.cloned()
|
|
.collect();
|
|
|
|
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> Option<AggregatedProcess> {
|
|
let mut node = nodes.remove(&pid)?;
|
|
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
|
|
let mut inc_cpu = node.avg_cpu;
|
|
let mut inc_mem = node.avg_memory_mb;
|
|
for c_pid in children_pids {
|
|
if let Some(child_node) = build_node(c_pid, nodes, child_map) {
|
|
inc_cpu += child_node.inclusive_avg_cpu;
|
|
inc_mem += child_node.inclusive_avg_memory_mb;
|
|
node.children.push(child_node);
|
|
}
|
|
}
|
|
node.inclusive_avg_cpu = inc_cpu;
|
|
node.inclusive_avg_memory_mb = inc_mem;
|
|
Some(node)
|
|
}
|
|
|
|
let mut final_roots = Vec::new();
|
|
for pid in root_pids {
|
|
if let Some(root_node) = build_node(pid, &mut nodes, &child_map) {
|
|
final_roots.push(root_node);
|
|
}
|
|
}
|
|
let remaining_pids: Vec<u32> = nodes.keys().cloned().collect();
|
|
for pid in remaining_pids {
|
|
if let Some(node) = build_node(pid, &mut nodes, &child_map) {
|
|
final_roots.push(node);
|
|
}
|
|
}
|
|
final_roots.sort_by(|a, b| b.inclusive_avg_cpu.partial_cmp(&a.inclusive_avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
|
|
|
|
Report {
|
|
start_time: start_time.to_rfc3339(),
|
|
end_time: end_time.to_rfc3339(),
|
|
duration_seconds: duration,
|
|
timeline,
|
|
aggregated_processes: final_roots,
|
|
}
|
|
}
|
|
|
|
// --- Commands ---
|
|
|
|
#[tauri::command]
|
|
fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
|
|
let mut sys = state.sys.lock().unwrap();
|
|
let mut profiling = state.profiling.lock().unwrap();
|
|
|
|
let snapshot = collect_snapshot(&mut sys, std::process::id());
|
|
|
|
if profiling.is_active {
|
|
profiling.snapshots.push(snapshot.clone());
|
|
}
|
|
|
|
let recording_duration = profiling.start_time.map(|s| (Utc::now() - s).num_seconds() as u64).unwrap_or(0);
|
|
|
|
let display_processes = if minimal && !profiling.is_active {
|
|
Vec::new()
|
|
} else {
|
|
let mut p = snapshot.processes.clone();
|
|
p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
|
|
p.truncate(50);
|
|
p
|
|
};
|
|
|
|
SystemStats {
|
|
cpu_usage: snapshot.cpu_usage,
|
|
total_memory: sys.total_memory(),
|
|
used_memory: sys.used_memory(),
|
|
processes: display_processes,
|
|
is_recording: profiling.is_active,
|
|
recording_duration,
|
|
}
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn get_initial_report(state: State<AppState>) -> Option<Report> {
|
|
state.initial_report.lock().unwrap().clone()
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn save_report(report: Report) -> Result<String, String> {
|
|
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
|
|
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
|
|
std::fs::write(&path, json).map_err(|e| e.to_string())?;
|
|
Ok(path)
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn start_profiling(state: State<AppState>) {
|
|
let mut profiling = state.profiling.lock().unwrap();
|
|
profiling.is_active = true;
|
|
profiling.start_time = Some(Utc::now());
|
|
profiling.snapshots.clear();
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn stop_profiling(state: State<AppState>) -> Report {
|
|
let mut profiling = state.profiling.lock().unwrap();
|
|
profiling.is_active = false;
|
|
generate_report(profiling.start_time.unwrap_or(Utc::now()), profiling.snapshots.drain(..).collect())
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn run_as_admin(command: String) -> Result<String, String> {
|
|
let output = Command::new("pkexec")
|
|
.arg("sh")
|
|
.arg("-c")
|
|
.arg(&command)
|
|
.output()
|
|
.map_err(|e| e.to_string())?;
|
|
if output.status.success() {
|
|
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
|
} else {
|
|
Err(String::from_utf8_lossy(&output.stderr).to_string())
|
|
}
|
|
}
|
|
|
|
fn main() {
|
|
let cli = Cli::parse();
|
|
let mut initial_report: Option<Report> = None;
|
|
|
|
if let Some(file_path) = cli.file {
|
|
if let Ok(content) = fs::read_to_string(file_path) {
|
|
if let Ok(report) = serde_json::from_str(&content) {
|
|
initial_report = Some(report);
|
|
}
|
|
}
|
|
}
|
|
|
|
if cli.headless {
|
|
println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval);
|
|
let mut sys = System::new_all();
|
|
let start_time = Utc::now();
|
|
let mut snapshots = Vec::new();
|
|
let self_pid = std::process::id();
|
|
|
|
for i in 0..(cli.duration * 1000 / cli.interval) {
|
|
snapshots.push(collect_snapshot(&mut sys, self_pid));
|
|
std::thread::sleep(Duration::from_millis(cli.interval));
|
|
if (i + 1) % (1000 / cli.interval) == 0 {
|
|
println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration);
|
|
}
|
|
}
|
|
|
|
let report = generate_report(start_time, snapshots);
|
|
let json = serde_json::to_string_pretty(&report).unwrap();
|
|
let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"))));
|
|
fs::write(&out_path, json).expect("Failed to write report");
|
|
println!("✅ Report saved to: {:?}", out_path);
|
|
|
|
if cli.gui {
|
|
initial_report = Some(report);
|
|
} else {
|
|
return;
|
|
}
|
|
}
|
|
|
|
tauri::Builder::default()
|
|
.manage(AppState {
|
|
sys: Mutex::new(System::new_all()),
|
|
profiling: Mutex::new(ProfilingSession {
|
|
is_active: false,
|
|
start_time: None,
|
|
snapshots: Vec::new(),
|
|
}),
|
|
initial_report: Mutex::new(initial_report),
|
|
})
|
|
.invoke_handler(tauri::generate_handler![
|
|
get_system_stats,
|
|
get_initial_report,
|
|
start_profiling,
|
|
stop_profiling,
|
|
run_as_admin,
|
|
save_report
|
|
])
|
|
.run(tauri::generate_context!())
|
|
.expect("error while running tauri application");
|
|
}
|