refactor: modularize backend and frontend for better maintainability

This commit is contained in:
2026-02-23 18:51:32 +01:00
parent 34aac2da62
commit 80820013d0
5 changed files with 459 additions and 406 deletions

19
src-tauri/src/cli.rs Normal file
View File

@@ -0,0 +1,19 @@
use clap::Parser;
use std::path::PathBuf;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Cli {
#[arg(short, long)]
pub headless: bool,
#[arg(short, long, default_value_t = 30)]
pub duration: u64,
#[arg(short, long, default_value_t = 1000)]
pub interval: u64,
#[arg(short, long)]
pub output: Option<PathBuf>,
#[arg(short, long)]
pub gui: bool,
#[arg(short, long)]
pub file: Option<PathBuf>,
}

61
src-tauri/src/commands.rs Normal file
View File

@@ -0,0 +1,61 @@
use tauri::State;
use crate::models::*;
use crate::profiler::*;
use chrono::Utc;
use std::process::Command as StdCommand;
#[tauri::command]
pub fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
let mut pss_cache = state.pss_cache.lock().unwrap();
let self_pid = std::process::id();
let syspulse_pids = get_syspulse_pids(self_pid, &sys);
let snapshot = collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, profiling.is_active);
if profiling.is_active { profiling.snapshots.push(snapshot.clone()); }
let recording_duration = profiling.start_time.map(|s| (Utc::now() - s).num_seconds() as u64).unwrap_or(0);
let display_processes = if minimal && !profiling.is_active { Vec::new() } else {
let mut p = snapshot.processes.clone();
p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
p.truncate(50);
p
};
SystemStats { cpu_usage: snapshot.cpu_usage, total_memory: sys.total_memory(), used_memory: sys.used_memory(), processes: display_processes, is_recording: profiling.is_active, recording_duration }
}
#[tauri::command]
pub fn get_initial_report(state: State<AppState>) -> Option<Report> { state.initial_report.lock().unwrap().clone() }
#[tauri::command]
pub fn save_report(report: Report) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}
#[tauri::command]
pub fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true; profiling.mode = ProfilingMode::Global; profiling.target_pid = None; profiling.start_time = Some(Utc::now()); profiling.snapshots.clear();
}
#[tauri::command]
pub fn start_targeted_profiling(state: State<AppState>, pid: u32) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true; profiling.mode = ProfilingMode::Targeted; profiling.target_pid = Some(pid); profiling.start_time = Some(Utc::now()); profiling.snapshots.clear();
}
#[tauri::command]
pub fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
let snapshots: Vec<Snapshot> = profiling.snapshots.drain(..).collect();
generate_report(profiling.start_time.unwrap_or(Utc::now()), snapshots, profiling.mode, profiling.target_pid)
}
#[tauri::command]
pub fn run_as_admin(command: String) -> Result<String, String> {
let output = StdCommand::new("pkexec").arg("sh").arg("-c").arg(&command).output().map_err(|e| e.to_string())?;
if output.status.success() { Ok(String::from_utf8_lossy(&output.stdout).to_string()) } else { Err(String::from_utf8_lossy(&output.stderr).to_string()) }
}

View File

@@ -3,436 +3,87 @@
windows_subsystem = "windows" windows_subsystem = "windows"
)] )]
use sysinfo::{System, Pid}; mod models;
mod cli;
mod profiler;
mod commands;
use sysinfo::System;
use std::sync::Mutex; use std::sync::Mutex;
use std::process::Command; use chrono::Utc;
use tauri::State;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use chrono::{DateTime, Utc};
use std::fs; use std::fs;
use clap::Parser; use clap::Parser;
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use std::collections::HashMap;
// --- CLI --- use crate::models::*;
use crate::cli::Cli;
#[derive(Parser, Debug)] use crate::profiler::*;
#[command(author, version, about, long_about = None)] use crate::commands::*;
struct Cli {
#[arg(short, long)]
headless: bool,
#[arg(short, long, default_value_t = 30)]
duration: u64,
#[arg(short, long, default_value_t = 1000)]
interval: u64,
#[arg(short, long)]
output: Option<PathBuf>,
#[arg(short, long)]
gui: bool,
#[arg(short, long)]
file: Option<PathBuf>,
}
// --- Data Structures ---
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
enum ProfilingMode {
Global,
Targeted,
}
#[derive(Serialize, Clone)]
struct SystemStats {
cpu_usage: Vec<f32>,
total_memory: u64,
used_memory: u64,
processes: Vec<ProcessStats>,
is_recording: bool,
recording_duration: u64,
}
#[derive(Serialize, Clone, Debug)]
struct ProcessStats {
pid: u32,
parent_pid: Option<u32>,
name: String,
cpu_usage: f32,
memory: u64,
status: String,
user_id: Option<String>,
is_syspulse: bool,
}
#[derive(Clone)]
struct Snapshot {
timestamp: DateTime<Utc>,
cpu_usage: Vec<f32>,
used_memory: u64,
processes: Vec<ProcessStats>,
}
struct ProfilingSession {
is_active: bool,
mode: ProfilingMode,
target_pid: Option<u32>,
start_time: Option<DateTime<Utc>>,
snapshots: Vec<Snapshot>,
}
struct AppState {
sys: Mutex<System>,
profiling: Mutex<ProfilingSession>,
initial_report: Mutex<Option<Report>>,
pss_cache: Mutex<HashMap<u32, (u64, DateTime<Utc>)>>,
}
// --- Report Structures ---
#[derive(Serialize, Deserialize, Clone)]
struct Report {
start_time: String,
end_time: String,
duration_seconds: i64,
mode: ProfilingMode,
target_name: Option<String>,
timeline: Vec<TimelinePoint>,
aggregated_processes: Vec<AggregatedProcess>,
}
#[derive(Serialize, Deserialize, Clone)]
struct TimelinePoint {
time: String,
cpu_total: f32,
mem_total_gb: f32,
cpu_profiler: f32,
mem_profiler_gb: f32,
}
#[derive(Serialize, Deserialize, Clone)]
struct ProcessHistoryPoint {
time: String,
cpu_usage: f32,
memory_mb: f32,
}
#[derive(Serialize, Deserialize, Clone)]
struct AggregatedProcess {
pid: u32,
name: String,
avg_cpu: f32,
peak_cpu: f32,
avg_memory_mb: f32,
peak_memory_mb: f32,
inclusive_avg_cpu: f32,
inclusive_avg_memory_mb: f32,
instance_count: usize,
warnings: Vec<String>,
history: Vec<ProcessHistoryPoint>,
is_syspulse: bool,
children: Vec<AggregatedProcess>,
}
// --- Helpers ---
fn get_pss(pid: u32) -> Option<u64> {
let path = format!("/proc/{}/smaps_rollup", pid);
if let Ok(contents) = fs::read_to_string(path) {
for line in contents.lines() {
if line.starts_with("Pss:") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
if let Ok(kb) = parts[1].parse::<u64>() {
return Some(kb * 1024);
}
}
}
}
}
None
}
fn get_syspulse_pids(self_pid: u32, sys: &System) -> HashSet<u32> {
let mut res = HashSet::new();
res.insert(self_pid);
let mut children_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (pid, p) in sys.processes() {
if let Some(ppid) = p.parent() {
children_map.entry(ppid.as_u32()).or_default().push(pid.as_u32());
}
}
let mut stack = vec![self_pid];
while let Some(pid) = stack.pop() {
if let Some(children) = children_map.get(&pid) {
for &c in children {
if res.insert(c) {
stack.push(c);
}
}
}
}
res
}
fn collect_snapshot(
sys: &mut System,
syspulse_pids: &HashSet<u32>,
pss_cache: &mut HashMap<u32, (u64, DateTime<Utc>)>,
collect_pss: bool
) -> Snapshot {
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let used_memory = sys.used_memory();
let now = Utc::now();
let processes: Vec<ProcessStats> = sys.processes().iter()
.map(|(pid, p)| {
let pid_u32 = pid.as_u32();
let rss = p.memory();
let is_syspulse = syspulse_pids.contains(&pid_u32);
let mut memory = rss;
if collect_pss && (rss > 20 * 1024 * 1024 || is_syspulse) {
let needs_refresh = match pss_cache.get(&pid_u32) {
Some((_, last)) => (now - *last).num_seconds() > 5,
None => true,
};
if needs_refresh {
if let Some(pss) = get_pss(pid_u32) {
pss_cache.insert(pid_u32, (pss, now));
memory = pss;
}
} else if let Some((cached_pss, _)) = pss_cache.get(&pid_u32) {
memory = *cached_pss;
}
}
ProcessStats {
pid: pid_u32,
parent_pid: p.parent().map(|pp| pp.as_u32()),
name: p.name().to_string_lossy().to_string(),
cpu_usage: p.cpu_usage(),
memory,
status: format!("{:?}", p.status()),
user_id: p.user_id().map(|u| u.to_string()),
is_syspulse,
}
}).collect();
Snapshot {
timestamp: now,
cpu_usage,
used_memory,
processes,
}
}
fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>, mode: ProfilingMode, target_pid: Option<u32>) -> Report {
let end_time = Utc::now();
let duration = (end_time - start_time).num_seconds();
let timeline: Vec<TimelinePoint> = snapshots.iter().map(|s| {
let cpu_total = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
let mem_total_gb = s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0;
let profiler_stats = s.processes.iter().filter(|p| p.is_syspulse).fold((0.0, 0), |acc, p| (acc.0 + p.cpu_usage, acc.1 + p.memory));
TimelinePoint {
time: s.timestamp.format("%H:%M:%S").to_string(),
cpu_total,
mem_total_gb,
cpu_profiler: profiler_stats.0 / s.cpu_usage.len() as f32,
mem_profiler_gb: profiler_stats.1 as f32 / 1024.0 / 1024.0 / 1024.0,
}
}).collect();
let mut target_name = None;
if let Some(tpid) = target_pid {
if let Some(snapshot) = snapshots.first() {
if let Some(p) = snapshot.processes.iter().find(|p| p.pid == tpid) { target_name = Some(p.name.clone()); }
}
}
let mut pid_map: HashMap<u32, (String, Option<u32>, Vec<ProcessHistoryPoint>, f32, f32, bool, bool)> = HashMap::new();
let num_snapshots = snapshots.len() as f32;
for snapshot in &snapshots {
for proc in &snapshot.processes {
let entry = pid_map.entry(proc.pid).or_insert_with(|| (proc.name.clone(), proc.parent_pid, Vec::new(), 0.0, 0.0, proc.is_syspulse, false));
let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
entry.2.push(ProcessHistoryPoint { time: snapshot.timestamp.format("%H:%M:%S").to_string(), cpu_usage: proc.cpu_usage, memory_mb: mem_mb });
if proc.cpu_usage > entry.3 { entry.3 = proc.cpu_usage; }
if mem_mb > entry.4 { entry.4 = mem_mb; }
if proc.status.contains("Zombie") { entry.6 = true; }
}
}
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, (name, _, history, peak_cpu, peak_mem, is_syspulse, is_zombie))| {
let total_cpu: f32 = history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = history.iter().map(|h| h.memory_mb).sum();
let mut warnings = Vec::new();
if is_zombie { warnings.push("Zombie".to_string()); }
if peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
(pid, AggregatedProcess { pid, name, avg_cpu: total_cpu / num_snapshots, peak_cpu, avg_memory_mb: total_mem / num_snapshots, peak_memory_mb: peak_mem, inclusive_avg_cpu: 0.0, inclusive_avg_memory_mb: 0.0, instance_count: 1, warnings, history, is_syspulse, children: Vec::new() })
}).collect();
let mut child_to_parent = HashMap::new();
for snapshot in &snapshots {
for proc in &snapshot.processes {
if let Some(ppid) = proc.parent_pid { if nodes.contains_key(&ppid) { child_to_parent.insert(proc.pid, ppid); } }
}
}
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (&child, &parent) in &child_to_parent { child_map.entry(parent).or_default().push(child); }
let root_pids: Vec<u32> = nodes.keys().filter(|pid| !child_to_parent.contains_key(pid)).cloned().collect();
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> Option<AggregatedProcess> {
let mut node = nodes.remove(&pid)?;
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
let mut inc_cpu = node.avg_cpu;
let mut inc_mem = node.avg_memory_mb;
for c_pid in children_pids {
if let Some(child_node) = build_node(c_pid, nodes, child_map) {
inc_cpu += child_node.inclusive_avg_cpu;
inc_mem += child_node.inclusive_avg_memory_mb;
node.children.push(child_node);
}
}
node.inclusive_avg_cpu = inc_cpu;
node.inclusive_avg_memory_mb = inc_mem;
Some(node)
}
let mut final_roots = Vec::new();
for pid in root_pids { if let Some(root_node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(root_node); } }
let remaining_pids: Vec<u32> = nodes.keys().cloned().collect();
for pid in remaining_pids { if let Some(node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(node); } }
if mode == ProfilingMode::Global {
let mut name_groups: HashMap<String, AggregatedProcess> = HashMap::new();
fn flatten_to_groups(node: AggregatedProcess, groups: &mut HashMap<String, AggregatedProcess>) {
let entry = groups.entry(node.name.clone()).or_insert_with(|| {
let mut base = node.clone();
base.children = Vec::new(); base.instance_count = 0; base.inclusive_avg_cpu = 0.0; base.inclusive_avg_memory_mb = 0.0; base.avg_cpu = 0.0; base.avg_memory_mb = 0.0;
base
});
entry.avg_cpu += node.avg_cpu; entry.avg_memory_mb += node.avg_memory_mb; entry.instance_count += 1;
if node.peak_cpu > entry.peak_cpu { entry.peak_cpu = node.peak_cpu; }
if node.peak_memory_mb > entry.peak_memory_mb { entry.peak_memory_mb = node.peak_memory_mb; }
for child in node.children { flatten_to_groups(child, groups); }
}
for root in final_roots { flatten_to_groups(root, &mut name_groups); }
let mut flattened: Vec<AggregatedProcess> = name_groups.into_values().collect();
flattened.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: flattened }
} else {
let mut targeted_roots = Vec::new();
if let Some(tpid) = target_pid {
fn extract_target(nodes: Vec<AggregatedProcess>, tpid: u32) -> (Vec<AggregatedProcess>, Option<AggregatedProcess>) {
let mut remaining = Vec::new(); let mut found = None;
for mut node in nodes {
if node.pid == tpid { found = Some(node); }
else {
let (new_children, sub_found) = extract_target(node.children, tpid);
node.children = new_children;
if let Some(f) = sub_found { found = Some(f); }
remaining.push(node);
}
}
(remaining, found)
}
let (_, target_node) = extract_target(final_roots, tpid);
if let Some(t) = target_node { targeted_roots.push(t); }
}
Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: targeted_roots }
}
}
// --- Commands ---
#[tauri::command]
fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
let mut pss_cache = state.pss_cache.lock().unwrap();
let self_pid = std::process::id();
let syspulse_pids = get_syspulse_pids(self_pid, &sys);
let snapshot = collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, profiling.is_active);
if profiling.is_active { profiling.snapshots.push(snapshot.clone()); }
let recording_duration = profiling.start_time.map(|s| (Utc::now() - s).num_seconds() as u64).unwrap_or(0);
let display_processes = if minimal && !profiling.is_active { Vec::new() } else {
let mut p = snapshot.processes.clone();
p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
p.truncate(50);
p
};
SystemStats { cpu_usage: snapshot.cpu_usage, total_memory: sys.total_memory(), used_memory: sys.used_memory(), processes: display_processes, is_recording: profiling.is_active, recording_duration }
}
#[tauri::command]
fn get_initial_report(state: State<AppState>) -> Option<Report> { state.initial_report.lock().unwrap().clone() }
#[tauri::command]
fn save_report(report: Report) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}
#[tauri::command]
fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true; profiling.mode = ProfilingMode::Global; profiling.target_pid = None; profiling.start_time = Some(Utc::now()); profiling.snapshots.clear();
}
#[tauri::command]
fn start_targeted_profiling(state: State<AppState>, pid: u32) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true; profiling.mode = ProfilingMode::Targeted; profiling.target_pid = Some(pid); profiling.start_time = Some(Utc::now()); profiling.snapshots.clear();
}
#[tauri::command]
fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
let snapshots: Vec<Snapshot> = profiling.snapshots.drain(..).collect();
generate_report(profiling.start_time.unwrap_or(Utc::now()), snapshots, profiling.mode, profiling.target_pid)
}
#[tauri::command]
fn run_as_admin(command: String) -> Result<String, String> {
let output = Command::new("pkexec").arg("sh").arg("-c").arg(&command).output().map_err(|e| e.to_string())?;
if output.status.success() { Ok(String::from_utf8_lossy(&output.stdout).to_string()) } else { Err(String::from_utf8_lossy(&output.stderr).to_string()) }
}
fn main() { fn main() {
let cli = Cli::parse(); let cli = Cli::parse();
let mut initial_report: Option<Report> = None; let mut initial_report: Option<Report> = None;
if let Some(file_path) = cli.file { if let Ok(content) = fs::read_to_string(file_path) { if let Ok(report) = serde_json::from_str(&content) { initial_report = Some(report); } } }
if let Some(file_path) = cli.file {
if let Ok(content) = fs::read_to_string(file_path) {
if let Ok(report) = serde_json::from_str(&content) {
initial_report = Some(report);
}
}
}
if cli.headless { if cli.headless {
println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval); println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval);
let mut sys = System::new_all(); let mut pss_cache = HashMap::new(); let start_time = Utc::now(); let mut snapshots = Vec::new(); let self_pid = std::process::id(); let mut sys = System::new_all();
let mut pss_cache = HashMap::new();
let start_time = Utc::now();
let mut snapshots = Vec::new();
let self_pid = std::process::id();
for i in 0..(cli.duration * 1000 / cli.interval) { for i in 0..(cli.duration * 1000 / cli.interval) {
let syspulse_pids = get_syspulse_pids(self_pid, &sys); let syspulse_pids = get_syspulse_pids(self_pid, &sys);
snapshots.push(collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, true)); snapshots.push(collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, true));
std::thread::sleep(Duration::from_millis(cli.interval)); std::thread::sleep(Duration::from_millis(cli.interval));
if (i + 1) % (1000 / cli.interval) == 0 { println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration); } if (i + 1) % (1000 / cli.interval) == 0 {
println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration);
}
} }
let report = generate_report(start_time, snapshots, ProfilingMode::Global, None); let report = generate_report(start_time, snapshots, ProfilingMode::Global, None);
let json = serde_json::to_string_pretty(&report).unwrap(); let json = serde_json::to_string_pretty(&report).unwrap();
let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S")))); let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"))));
fs::write(&out_path, json).expect("Failed to write report"); fs::write(&out_path, json).expect("Failed to write report");
println!("✅ Report saved to: {:?}", out_path); println!("✅ Report saved to: {:?}", out_path);
if cli.gui { initial_report = Some(report); } else { return; } if cli.gui {
initial_report = Some(report);
} else {
return;
}
} }
tauri::Builder::default() tauri::Builder::default()
.manage(AppState { sys: Mutex::new(System::new_all()), profiling: Mutex::new(ProfilingSession { is_active: false, mode: ProfilingMode::Global, target_pid: None, start_time: None, snapshots: Vec::new() }), initial_report: Mutex::new(initial_report), pss_cache: Mutex::new(HashMap::new()) }) .manage(AppState {
.invoke_handler(tauri::generate_handler![get_system_stats, get_initial_report, start_profiling, start_targeted_profiling, stop_profiling, run_as_admin, save_report]) sys: Mutex::new(System::new_all()),
profiling: Mutex::new(ProfilingSession {
is_active: false,
mode: ProfilingMode::Global,
target_pid: None,
start_time: None,
snapshots: Vec::new()
}),
initial_report: Mutex::new(initial_report),
pss_cache: Mutex::new(HashMap::new())
})
.invoke_handler(tauri::generate_handler![
get_system_stats,
get_initial_report,
start_profiling,
start_targeted_profiling,
stop_profiling,
run_as_admin,
save_report
])
.run(tauri::generate_context!()) .run(tauri::generate_context!())
.expect("error while running tauri application"); .expect("error while running tauri application");
} }

100
src-tauri/src/models.rs Normal file
View File

@@ -0,0 +1,100 @@
use serde::{Serialize, Deserialize};
use chrono::{DateTime, Utc};
use std::sync::Mutex;
use sysinfo::System;
use std::collections::HashMap;
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
pub enum ProfilingMode {
Global,
Targeted,
}
#[derive(Serialize, Clone)]
pub struct SystemStats {
pub cpu_usage: Vec<f32>,
pub total_memory: u64,
pub used_memory: u64,
pub processes: Vec<ProcessStats>,
pub is_recording: bool,
pub recording_duration: u64,
}
#[derive(Serialize, Clone, Debug)]
pub struct ProcessStats {
pub pid: u32,
pub parent_pid: Option<u32>,
pub name: String,
pub cpu_usage: f32,
pub memory: u64,
pub status: String,
pub user_id: Option<String>,
pub is_syspulse: bool,
}
#[derive(Clone)]
pub struct Snapshot {
pub timestamp: DateTime<Utc>,
pub cpu_usage: Vec<f32>,
pub used_memory: u64,
pub processes: Vec<ProcessStats>,
}
pub struct ProfilingSession {
pub is_active: bool,
pub mode: ProfilingMode,
pub target_pid: Option<u32>,
pub start_time: Option<DateTime<Utc>>,
pub snapshots: Vec<Snapshot>,
}
pub struct AppState {
pub sys: Mutex<System>,
pub profiling: Mutex<ProfilingSession>,
pub initial_report: Mutex<Option<Report>>,
pub pss_cache: Mutex<HashMap<u32, (u64, DateTime<Utc>)>>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct Report {
pub start_time: String,
pub end_time: String,
pub duration_seconds: i64,
pub mode: ProfilingMode,
pub target_name: Option<String>,
pub timeline: Vec<TimelinePoint>,
pub aggregated_processes: Vec<AggregatedProcess>,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct TimelinePoint {
pub time: String,
pub cpu_total: f32,
pub mem_total_gb: f32,
pub cpu_profiler: f32,
pub mem_profiler_gb: f32,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct ProcessHistoryPoint {
pub time: String,
pub cpu_usage: f32,
pub memory_mb: f32,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct AggregatedProcess {
pub pid: u32,
pub name: String,
pub avg_cpu: f32,
pub peak_cpu: f32,
pub avg_memory_mb: f32,
pub peak_memory_mb: f32,
pub inclusive_avg_cpu: f32,
pub inclusive_avg_memory_mb: f32,
pub instance_count: usize,
pub warnings: Vec<String>,
pub history: Vec<ProcessHistoryPoint>,
pub is_syspulse: bool,
pub children: Vec<AggregatedProcess>,
}

222
src-tauri/src/profiler.rs Normal file
View File

@@ -0,0 +1,222 @@
use sysinfo::System;
use chrono::{DateTime, Utc};
use std::collections::{HashMap, HashSet};
use std::fs;
use crate::models::*;
pub fn get_pss(pid: u32) -> Option<u64> {
let path = format!("/proc/{}/smaps_rollup", pid);
if let Ok(contents) = fs::read_to_string(path) {
for line in contents.lines() {
if line.starts_with("Pss:") {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() >= 2 {
if let Ok(kb) = parts[1].parse::<u64>() {
return Some(kb * 1024);
}
}
}
}
}
None
}
pub fn get_syspulse_pids(self_pid: u32, sys: &System) -> HashSet<u32> {
let mut res = HashSet::new();
res.insert(self_pid);
let mut children_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (pid, p) in sys.processes() {
if let Some(ppid) = p.parent() {
children_map.entry(ppid.as_u32()).or_default().push(pid.as_u32());
}
}
let mut stack = vec![self_pid];
while let Some(pid) = stack.pop() {
if let Some(children) = children_map.get(&pid) {
for &c in children {
if res.insert(c) {
stack.push(c);
}
}
}
}
res
}
pub fn collect_snapshot(
sys: &mut System,
syspulse_pids: &HashSet<u32>,
pss_cache: &mut HashMap<u32, (u64, DateTime<Utc>)>,
collect_pss: bool
) -> Snapshot {
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let used_memory = sys.used_memory();
let now = Utc::now();
let processes: Vec<ProcessStats> = sys.processes().iter()
.map(|(pid, p)| {
let pid_u32 = pid.as_u32();
let rss = p.memory();
let is_syspulse = syspulse_pids.contains(&pid_u32);
let mut memory = rss;
if collect_pss && (rss > 20 * 1024 * 1024 || is_syspulse) {
let needs_refresh = match pss_cache.get(&pid_u32) {
Some((_, last)) => (now - *last).num_seconds() > 5,
None => true,
};
if needs_refresh {
if let Some(pss) = get_pss(pid_u32) {
pss_cache.insert(pid_u32, (pss, now));
memory = pss;
}
} else if let Some((cached_pss, _)) = pss_cache.get(&pid_u32) {
memory = *cached_pss;
}
}
ProcessStats {
pid: pid_u32,
parent_pid: p.parent().map(|pp| pp.as_u32()),
name: p.name().to_string_lossy().to_string(),
cpu_usage: p.cpu_usage(),
memory,
status: format!("{:?}", p.status()),
user_id: p.user_id().map(|u| u.to_string()),
is_syspulse,
}
}).collect();
Snapshot {
timestamp: now,
cpu_usage,
used_memory,
processes,
}
}
pub fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>, mode: ProfilingMode, target_pid: Option<u32>) -> Report {
let end_time = Utc::now();
let duration = (end_time - start_time).num_seconds();
let timeline: Vec<TimelinePoint> = snapshots.iter().map(|s| {
let cpu_total = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
let mem_total_gb = s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0;
let profiler_stats = s.processes.iter().filter(|p| p.is_syspulse).fold((0.0, 0), |acc, p| (acc.0 + p.cpu_usage, acc.1 + p.memory));
TimelinePoint {
time: s.timestamp.format("%H:%M:%S").to_string(),
cpu_total,
mem_total_gb,
cpu_profiler: profiler_stats.0 / s.cpu_usage.len() as f32,
mem_profiler_gb: profiler_stats.1 as f32 / 1024.0 / 1024.0 / 1024.0,
}
}).collect();
let mut target_name = None;
if let Some(tpid) = target_pid {
if let Some(snapshot) = snapshots.first() {
if let Some(p) = snapshot.processes.iter().find(|p| p.pid == tpid) { target_name = Some(p.name.clone()); }
}
}
let mut pid_map: HashMap<u32, (String, Option<u32>, Vec<ProcessHistoryPoint>, f32, f32, bool, bool)> = HashMap::new();
let num_snapshots = snapshots.len() as f32;
for snapshot in &snapshots {
for proc in &snapshot.processes {
let entry = pid_map.entry(proc.pid).or_insert_with(|| (proc.name.clone(), proc.parent_pid, Vec::new(), 0.0, 0.0, proc.is_syspulse, false));
let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
entry.2.push(ProcessHistoryPoint { time: snapshot.timestamp.format("%H:%M:%S").to_string(), cpu_usage: proc.cpu_usage, memory_mb: mem_mb });
if proc.cpu_usage > entry.3 { entry.3 = proc.cpu_usage; }
if mem_mb > entry.4 { entry.4 = mem_mb; }
if proc.status.contains("Zombie") { entry.6 = true; }
}
}
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, (name, _, history, peak_cpu, peak_mem, is_syspulse, is_zombie))| {
let total_cpu: f32 = history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = history.iter().map(|h| h.memory_mb).sum();
let mut warnings = Vec::new();
if is_zombie { warnings.push("Zombie".to_string()); }
if peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
(pid, AggregatedProcess { pid, name, avg_cpu: total_cpu / num_snapshots, peak_cpu, avg_memory_mb: total_mem / num_snapshots, peak_memory_mb: peak_mem, inclusive_avg_cpu: 0.0, inclusive_avg_memory_mb: 0.0, instance_count: 1, warnings, history, is_syspulse, children: Vec::new() })
}).collect();
let mut child_to_parent = HashMap::new();
for snapshot in &snapshots {
for proc in &snapshot.processes {
if let Some(ppid) = proc.parent_pid { if nodes.contains_key(&ppid) { child_to_parent.insert(proc.pid, ppid); } }
}
}
let mut child_map: HashMap<u32, Vec<u32>> = HashMap::new();
for (&child, &parent) in &child_to_parent { child_map.entry(parent).or_default().push(child); }
let root_pids: Vec<u32> = nodes.keys().filter(|pid| !child_to_parent.contains_key(pid)).cloned().collect();
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> Option<AggregatedProcess> {
let mut node = nodes.remove(&pid)?;
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
let mut inc_cpu = node.avg_cpu;
let mut inc_mem = node.avg_memory_mb;
for c_pid in children_pids {
if let Some(child_node) = build_node(c_pid, nodes, child_map) {
inc_cpu += child_node.inclusive_avg_cpu;
inc_mem += child_node.inclusive_avg_memory_mb;
node.children.push(child_node);
}
}
node.inclusive_avg_cpu = inc_cpu;
node.inclusive_avg_memory_mb = inc_mem;
Some(node)
}
let mut final_roots = Vec::new();
for pid in root_pids { if let Some(root_node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(root_node); } }
let remaining_pids: Vec<u32> = nodes.keys().cloned().collect();
for pid in remaining_pids { if let Some(node) = build_node(pid, &mut nodes, &child_map) { final_roots.push(node); } }
if mode == ProfilingMode::Global {
let mut name_groups: HashMap<String, AggregatedProcess> = HashMap::new();
fn flatten_to_groups(node: AggregatedProcess, groups: &mut HashMap<String, AggregatedProcess>) {
let entry = groups.entry(node.name.clone()).or_insert_with(|| {
let mut base = node.clone();
base.children = Vec::new(); base.instance_count = 0; base.inclusive_avg_cpu = 0.0; base.inclusive_avg_memory_mb = 0.0; base.avg_cpu = 0.0; base.avg_memory_mb = 0.0;
base
});
entry.avg_cpu += node.avg_cpu; entry.avg_memory_mb += node.avg_memory_mb; entry.instance_count += 1;
if node.peak_cpu > entry.peak_cpu { entry.peak_cpu = node.peak_cpu; }
if node.peak_memory_mb > entry.peak_memory_mb { entry.peak_memory_mb = node.peak_memory_mb; }
for child in node.children { flatten_to_groups(child, groups); }
}
for root in final_roots { flatten_to_groups(root, &mut name_groups); }
let mut flattened: Vec<AggregatedProcess> = name_groups.into_values().collect();
flattened.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: flattened }
} else {
let mut targeted_roots = Vec::new();
if let Some(tpid) = target_pid {
fn extract_target(nodes: Vec<AggregatedProcess>, tpid: u32) -> (Vec<AggregatedProcess>, Option<AggregatedProcess>) {
let mut remaining = Vec::new(); let mut found = None;
for mut node in nodes {
if node.pid == tpid { found = Some(node); }
else {
let (new_children, sub_found) = extract_target(node.children, tpid);
node.children = new_children;
if let Some(f) = sub_found { found = Some(f); }
remaining.push(node);
}
}
(remaining, found)
}
let (_, target_node) = extract_target(final_roots, tpid);
if let Some(t) = target_node { targeted_roots.push(t); }
}
Report { start_time: start_time.to_rfc3339(), end_time: end_time.to_rfc3339(), duration_seconds: duration, mode, target_name, timeline, aggregated_processes: targeted_roots }
}
}