feat: initialize SysPulse-rs profiler project with Tauri v2 and React

This commit is contained in:
2026-02-22 17:39:34 +01:00
commit ec142b09bd
20 changed files with 1352 additions and 0 deletions

343
src-tauri/src/main.rs Normal file
View File

@@ -0,0 +1,343 @@
#![cfg_attr(
all(not(debug_assertions), target_os = "windows"),
windows_subsystem = "windows"
)]
use sysinfo::System;
use std::sync::Mutex;
use std::process::Command;
use tauri::State;
use serde::{Serialize, Deserialize};
use std::collections::HashMap;
use chrono::{DateTime, Utc};
// --- Data Structures ---
#[derive(Serialize, Clone)]
struct SystemStats {
cpu_usage: Vec<f32>,
total_memory: u64,
used_memory: u64,
processes: Vec<ProcessStats>,
is_recording: bool,
recording_duration: u64, // seconds
}
#[derive(Serialize, Clone, Debug)]
struct ProcessStats {
pid: u32,
name: String,
cpu_usage: f32,
memory: u64,
status: String,
user_id: Option<String>,
}
#[derive(Clone)]
struct Snapshot {
timestamp: DateTime<Utc>,
cpu_usage: Vec<f32>,
used_memory: u64,
processes: Vec<ProcessStats>,
}
struct ProfilingSession {
is_active: bool,
start_time: Option<DateTime<Utc>>,
snapshots: Vec<Snapshot>,
}
struct AppState {
sys: Mutex<System>,
profiling: Mutex<ProfilingSession>,
}
// --- Report Structures ---
#[derive(Serialize, Deserialize)]
struct Report {
start_time: String,
end_time: String,
duration_seconds: i64,
timeline: Vec<TimelinePoint>,
aggregated_processes: Vec<AggregatedProcess>,
}
#[derive(Serialize, Deserialize)]
struct TimelinePoint {
time: String,
avg_cpu: f32,
memory_gb: f32,
}
#[derive(Serialize, Deserialize)]
struct ProcessHistoryPoint {
time: String,
cpu_usage: f32,
memory_mb: f32,
}
#[derive(Serialize, Deserialize)]
struct AggregatedProcess {
name: String,
avg_cpu: f32,
peak_cpu: f32,
avg_memory_mb: f32,
peak_memory_mb: f32,
instance_count: usize,
warnings: Vec<String>,
history: Vec<ProcessHistoryPoint>,
}
// --- Commands ---
#[tauri::command]
fn get_system_stats(
state: State<AppState>,
exclude_self: bool,
minimal: bool
) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
if minimal {
sys.refresh_cpu();
sys.refresh_memory();
} else {
sys.refresh_all();
}
let self_pid = std::process::id();
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let total_memory = sys.total_memory();
let used_memory = sys.used_memory();
let mut processes: Vec<ProcessStats> = if minimal && !profiling.is_active {
Vec::new()
} else {
sys.processes().iter()
.filter(|(pid, _)| !exclude_self || pid.as_u32() != self_pid)
.map(|(pid, process)| {
ProcessStats {
pid: pid.as_u32(),
name: process.name().to_string(),
cpu_usage: process.cpu_usage(),
memory: process.memory(),
status: format!("{:?}", process.status()),
user_id: process.user_id().map(|uid| uid.to_string()),
}
}).collect()
};
if profiling.is_active {
// Even in minimal mode, if recording we need the processes for the report
if minimal {
sys.refresh_processes();
processes = sys.processes().iter()
.filter(|(pid, _)| !exclude_self || pid.as_u32() != self_pid)
.map(|(pid, process)| {
ProcessStats {
pid: pid.as_u32(),
name: process.name().to_string(),
cpu_usage: process.cpu_usage(),
memory: process.memory(),
status: format!("{:?}", process.status()),
user_id: process.user_id().map(|uid| uid.to_string()),
}
}).collect();
}
profiling.snapshots.push(Snapshot {
timestamp: Utc::now(),
cpu_usage: cpu_usage.clone(),
used_memory,
processes: processes.clone(),
});
}
let recording_duration = if let Some(start) = profiling.start_time {
(Utc::now() - start).num_seconds() as u64
} else {
0
};
if !minimal {
processes.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
processes.truncate(50);
} else if !profiling.is_active {
processes.clear();
}
SystemStats {
cpu_usage,
total_memory,
used_memory,
processes,
is_recording: profiling.is_active,
recording_duration,
}
}
#[tauri::command]
fn save_report(report: Report) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}
#[tauri::command]
fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true;
profiling.start_time = Some(Utc::now());
profiling.snapshots.clear();
}
#[tauri::command]
fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
let start = profiling.start_time.unwrap_or(Utc::now());
let end = Utc::now();
let duration = (end - start).num_seconds();
// 1. Generate Session Timeline
let timeline: Vec<TimelinePoint> = profiling.snapshots.iter().map(|s| {
let avg_cpu = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
TimelinePoint {
time: s.timestamp.format("%H:%M:%S").to_string(),
avg_cpu,
memory_gb: s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0,
}
}).collect();
// 2. Aggregate Processes over Time
let mut process_map: HashMap<String, Vec<ProcessHistoryPoint>> = HashMap::new();
let mut peak_stats: HashMap<String, (f32, f32)> = HashMap::new(); // (Peak CPU, Peak Mem)
let mut unique_pids: HashMap<String, std::collections::HashSet<u32>> = HashMap::new();
let mut status_flags: HashMap<String, bool> = HashMap::new(); // Zombie check
for snapshot in &profiling.snapshots {
let mut snapshot_procs: HashMap<String, (f32, u64)> = HashMap::new();
for proc in &snapshot.processes {
let entry = snapshot_procs.entry(proc.name.clone()).or_default();
entry.0 += proc.cpu_usage;
entry.1 += proc.memory;
unique_pids.entry(proc.name.clone()).or_default().insert(proc.pid);
if proc.status.contains("Zombie") {
status_flags.insert(proc.name.clone(), true);
}
}
// Record history for all processes seen in this snapshot
for (name, (cpu, mem)) in snapshot_procs {
let hist_entry = process_map.entry(name.clone()).or_default();
let mem_mb = mem as f32 / 1024.0 / 1024.0;
hist_entry.push(ProcessHistoryPoint {
time: snapshot.timestamp.format("%H:%M:%S").to_string(),
cpu_usage: cpu,
memory_mb: mem_mb,
});
let peaks = peak_stats.entry(name).or_insert((0.0, 0.0));
if cpu > peaks.0 { peaks.0 = cpu; }
if mem_mb > peaks.1 { peaks.1 = mem_mb; }
}
}
let mut aggregated_processes: Vec<AggregatedProcess> = Vec::new();
let num_snapshots = profiling.snapshots.len() as f32;
for (name, history) in process_map {
let (peak_cpu, peak_mem) = peak_stats.get(&name).cloned().unwrap_or((0.0, 0.0));
let count = unique_pids.get(&name).map(|s| s.len()).unwrap_or(0);
// Average over the whole SESSION (zeros for snapshots where not present)
let total_cpu_sum: f32 = history.iter().map(|h| h.cpu_usage).sum();
let total_mem_sum: f32 = history.iter().map(|h| h.memory_mb).sum();
let avg_cpu = if num_snapshots > 0.0 { total_cpu_sum / num_snapshots } else { 0.0 };
let avg_mem = if num_snapshots > 0.0 { total_mem_sum / num_snapshots } else { 0.0 };
let mut warnings = Vec::new();
if status_flags.get(&name).cloned().unwrap_or(false) {
warnings.push("Zombie Process Detected".to_string());
}
if peak_cpu > 80.0 {
warnings.push("High Peak Load".to_string());
}
if peak_mem > 2048.0 {
warnings.push("Heavy Memory usage".to_string());
}
aggregated_processes.push(AggregatedProcess {
name,
avg_cpu,
peak_cpu,
avg_memory_mb: avg_mem,
peak_memory_mb: peak_mem,
instance_count: count,
warnings,
history,
});
}
// Sort by Average CPU descending
aggregated_processes.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report {
start_time: start.to_rfc3339(),
end_time: end.to_rfc3339(),
duration_seconds: duration,
timeline,
aggregated_processes,
}
}
#[tauri::command]
fn run_as_admin(command: String) -> Result<String, String> {
// Uses pkexec to run a command as root.
// CAUTION: This is a simple implementation. In production, validate inputs carefully.
// Splitting command for safety is hard, so we assume 'command' is a simple executable name or safe string.
// Example usage from frontend: "kill -9 1234" -> pkexec kill -9 1234
let output = Command::new("pkexec")
.arg("sh")
.arg("-c")
.arg(&command)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
Err(String::from_utf8_lossy(&output.stderr).to_string())
}
}
fn main() {
tauri::Builder::default()
.manage(AppState {
sys: Mutex::new(System::new_all()),
profiling: Mutex::new(ProfilingSession {
is_active: false,
start_time: None,
snapshots: Vec::new(),
})
})
.invoke_handler(tauri::generate_handler![
get_system_stats,
start_profiling,
stop_profiling,
run_as_admin,
save_report
])
.run(tauri::generate_context!())
.expect("error while running tauri application");
}