feat: implement CLI interface for headless profiling and report viewing

This commit is contained in:
2026-02-23 01:35:16 +01:00
parent 47452d9330
commit e6c5adca5e
4 changed files with 197 additions and 99 deletions

View File

@@ -6,11 +6,31 @@ A professional, high-performance Linux system profiler built with **Rust** and *
## 🚀 Usage
1. **Live Dashboard**: Monitor real-time CPU and Memory load. Use the "Hide SysPulse" toggle to exclude the profiler's own overhead from the results.
2. **Recording**: Click **Record Profile** to start a session. The app automatically switches to a **Minimal Footprint Mode** to ensure the most accurate results by reducing UI overhead.
1. **Live Dashboard**: Monitor real-time CPU and Memory load.
2. **Recording**: Click **Record Profile** to start a session. The app automatically switches to a **Minimal Footprint Mode**.
3. **Analysis**: Stop the recording to view a comprehensive **Profiling Report**.
4. **Inspection**: Click any process in the report matrix to open the **Process Inspector**, showing a dedicated time-series graph of that application's resource consumption throughout the session.
5. **Admin Control**: Hover over any process and click the **Shield** icon to terminate it (uses `pkexec` for secure sudo authentication).
4. **Inspection**: Click any process in the report matrix to open the **Process Inspector**.
5. **Admin Control**: Hover over any process and click the **Shield** icon to terminate it.
---
## 💻 CLI Interface
SysPulse can be controlled via the command line for headless profiling or automated data collection.
```bash
# Start a 60-second headless profiling run and save to a specific file
./syspulse --headless --duration 60 --output my_report.json
# Run a headless profile and immediately open the results in the GUI
./syspulse --headless --duration 10 --gui
# Open an existing JSON report file directly in the GUI
./syspulse --file my_report.json
# Show all CLI options
./syspulse --help
```
---

View File

@@ -18,3 +18,4 @@ sysinfo = "0.38.2"
tauri = "2.10.2"
tokio = { version = "1.49.0", features = ["full"] }
rayon = "1.10"
clap = { version = "4.5", features = ["derive"] }

View File

@@ -12,6 +12,39 @@ use std::collections::HashMap;
use chrono::{DateTime, Utc};
use std::fs;
use rayon::prelude::*;
use clap::Parser;
use std::path::PathBuf;
use std::time::Duration;
// --- CLI ---
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Cli {
/// Start profiling immediately without GUI
#[arg(short, long)]
headless: bool,
/// Duration of profiling in seconds (for headless mode)
#[arg(short, long, default_value_t = 30)]
duration: u64,
/// Interval between snapshots in milliseconds
#[arg(short, long, default_value_t = 1000)]
interval: u64,
/// Output path for the JSON report
#[arg(short, long)]
output: Option<PathBuf>,
/// Open the GUI with the collected data after headless profiling
#[arg(short, long)]
gui: bool,
/// Open an existing JSON report file in the GUI
#[arg(short, long)]
file: Option<PathBuf>,
}
// --- Data Structures ---
@@ -22,7 +55,7 @@ struct SystemStats {
used_memory: u64,
processes: Vec<ProcessStats>,
is_recording: bool,
recording_duration: u64, // seconds
recording_duration: u64,
}
#[derive(Serialize, Clone, Debug)]
@@ -54,11 +87,12 @@ struct ProfilingSession {
struct AppState {
sys: Mutex<System>,
profiling: Mutex<ProfilingSession>,
initial_report: Mutex<Option<Report>>,
}
// --- Report Structures ---
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone)]
struct Report {
start_time: String,
end_time: String,
@@ -67,21 +101,21 @@ struct Report {
aggregated_processes: Vec<AggregatedProcess>,
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone)]
struct TimelinePoint {
time: String,
avg_cpu: f32,
memory_gb: f32,
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone)]
struct ProcessHistoryPoint {
time: String,
cpu_usage: f32,
memory_mb: f32,
}
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone)]
struct AggregatedProcess {
pid: u32,
name: String,
@@ -98,7 +132,7 @@ struct AggregatedProcess {
children: Vec<AggregatedProcess>,
}
// --- Helper for Real Memory (PSS) on Linux ---
// --- Helpers ---
fn get_pss(pid: u32) -> Option<u64> {
let path = format!("/proc/{}/smaps_rollup", pid);
@@ -117,8 +151,6 @@ fn get_pss(pid: u32) -> Option<u64> {
None
}
// --- Commands ---
fn is_syspulse_recursive(pid: u32, self_pid: u32, sys: &System) -> bool {
if pid == self_pid { return true; }
let mut current = sys.process(Pid::from_u32(pid));
@@ -133,21 +165,12 @@ fn is_syspulse_recursive(pid: u32, self_pid: u32, sys: &System) -> bool {
false
}
#[tauri::command]
fn get_system_stats(
state: State<AppState>,
minimal: bool
) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
fn collect_snapshot(sys: &mut System, self_pid: u32) -> Snapshot {
sys.refresh_cpu_all();
sys.refresh_memory();
sys.refresh_processes(sysinfo::ProcessesToUpdate::All, true);
let self_pid = std::process::id();
let cpu_usage: Vec<f32> = sys.cpus().iter().map(|cpu| cpu.cpu_usage()).collect();
let total_memory = sys.total_memory();
let used_memory = sys.used_memory();
let processes: Vec<ProcessStats> = sys.processes().iter()
@@ -155,14 +178,8 @@ fn get_system_stats(
.filter_map(|(pid, p)| {
let rss = p.memory();
if rss == 0 { return None; }
let pid_u32 = pid.as_u32();
let memory = if rss > 10 * 1024 * 1024 {
get_pss(pid_u32).unwrap_or(rss)
} else {
rss
};
let memory = if rss > 10 * 1024 * 1024 { get_pss(pid_u32).unwrap_or(rss) } else { rss };
Some(ProcessStats {
pid: pid_u32,
parent_pid: p.parent().map(|pp| pp.as_u32()),
@@ -175,66 +192,19 @@ fn get_system_stats(
})
}).collect();
if profiling.is_active {
profiling.snapshots.push(Snapshot {
timestamp: Utc::now(),
cpu_usage: cpu_usage.clone(),
used_memory,
processes: processes.clone(),
});
}
let recording_duration = if let Some(start) = profiling.start_time {
(Utc::now() - start).num_seconds() as u64
} else {
0
};
let display_processes = if minimal && !profiling.is_active {
Vec::new()
} else {
let mut p = processes.clone();
p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
p.truncate(50);
p
};
SystemStats {
Snapshot {
timestamp: Utc::now(),
cpu_usage,
total_memory,
used_memory,
processes: display_processes,
is_recording: profiling.is_active,
recording_duration,
processes,
}
}
#[tauri::command]
fn save_report(report: Report) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}
fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Report {
let end_time = Utc::now();
let duration = (end_time - start_time).num_seconds();
#[tauri::command]
fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true;
profiling.start_time = Some(Utc::now());
profiling.snapshots.clear();
}
#[tauri::command]
fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
let start = profiling.start_time.unwrap_or(Utc::now());
let end = Utc::now();
let duration = (end - start).num_seconds();
let timeline: Vec<TimelinePoint> = profiling.snapshots.iter().map(|s| {
let timeline: Vec<TimelinePoint> = snapshots.iter().map(|s| {
let avg_cpu = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
TimelinePoint {
time: s.timestamp.format("%H:%M:%S").to_string(),
@@ -253,9 +223,9 @@ fn stop_profiling(state: State<AppState>) -> Report {
}
let mut pid_map: HashMap<u32, PidStats> = HashMap::new();
let num_snapshots = profiling.snapshots.len() as f32;
let num_snapshots = snapshots.len() as f32;
for snapshot in &profiling.snapshots {
for snapshot in &snapshots {
for proc in &snapshot.processes {
let entry = pid_map.entry(proc.pid).or_insert_with(|| PidStats {
name: proc.name.clone(),
@@ -265,14 +235,12 @@ fn stop_profiling(state: State<AppState>) -> Report {
is_syspulse: proc.is_syspulse,
is_zombie: false,
});
let mem_mb = proc.memory as f32 / 1024.0 / 1024.0;
entry.history.push(ProcessHistoryPoint {
time: snapshot.timestamp.format("%H:%M:%S").to_string(),
cpu_usage: proc.cpu_usage,
memory_mb: mem_mb,
});
if proc.cpu_usage > entry.peak_cpu { entry.peak_cpu = proc.cpu_usage; }
if mem_mb > entry.peak_mem { entry.peak_mem = mem_mb; }
if proc.status.contains("Zombie") { entry.is_zombie = true; }
@@ -282,11 +250,9 @@ fn stop_profiling(state: State<AppState>) -> Report {
let mut nodes: HashMap<u32, AggregatedProcess> = pid_map.into_iter().map(|(pid, stats)| {
let total_cpu: f32 = stats.history.iter().map(|h| h.cpu_usage).sum();
let total_mem: f32 = stats.history.iter().map(|h| h.memory_mb).sum();
let mut warnings = Vec::new();
if stats.is_zombie { warnings.push("Zombie".to_string()); }
if stats.peak_cpu > 80.0 { warnings.push("High Peak".to_string()); }
(pid, AggregatedProcess {
pid,
name: stats.name,
@@ -305,7 +271,7 @@ fn stop_profiling(state: State<AppState>) -> Report {
}).collect();
let mut child_to_parent = HashMap::new();
for snapshot in &profiling.snapshots {
for snapshot in &snapshots {
for proc in &snapshot.processes {
if let Some(ppid) = proc.parent_pid {
if nodes.contains_key(&ppid) {
@@ -328,10 +294,8 @@ fn stop_profiling(state: State<AppState>) -> Report {
fn build_node(pid: u32, nodes: &mut HashMap<u32, AggregatedProcess>, child_map: &HashMap<u32, Vec<u32>>) -> Option<AggregatedProcess> {
let mut node = nodes.remove(&pid)?;
let children_pids = child_map.get(&pid).cloned().unwrap_or_default();
let mut inc_cpu = node.avg_cpu;
let mut inc_mem = node.avg_memory_mb;
for c_pid in children_pids {
if let Some(child_node) = build_node(c_pid, nodes, child_map) {
inc_cpu += child_node.inclusive_avg_cpu;
@@ -339,7 +303,6 @@ fn stop_profiling(state: State<AppState>) -> Report {
node.children.push(child_node);
}
}
node.inclusive_avg_cpu = inc_cpu;
node.inclusive_avg_memory_mb = inc_mem;
Some(node)
@@ -351,25 +314,85 @@ fn stop_profiling(state: State<AppState>) -> Report {
final_roots.push(root_node);
}
}
let remaining_pids: Vec<u32> = nodes.keys().cloned().collect();
for pid in remaining_pids {
if let Some(node) = build_node(pid, &mut nodes, &child_map) {
final_roots.push(node);
}
}
final_roots.sort_by(|a, b| b.inclusive_avg_cpu.partial_cmp(&a.inclusive_avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report {
start_time: start.to_rfc3339(),
end_time: end.to_rfc3339(),
start_time: start_time.to_rfc3339(),
end_time: end_time.to_rfc3339(),
duration_seconds: duration,
timeline,
aggregated_processes: final_roots,
}
}
// --- Commands ---
#[tauri::command]
fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
let mut sys = state.sys.lock().unwrap();
let mut profiling = state.profiling.lock().unwrap();
let snapshot = collect_snapshot(&mut sys, std::process::id());
if profiling.is_active {
profiling.snapshots.push(snapshot.clone());
}
let recording_duration = profiling.start_time.map(|s| (Utc::now() - s).num_seconds() as u64).unwrap_or(0);
let display_processes = if minimal && !profiling.is_active {
Vec::new()
} else {
let mut p = snapshot.processes.clone();
p.sort_by(|a, b| b.cpu_usage.partial_cmp(&a.cpu_usage).unwrap_or(std::cmp::Ordering::Equal));
p.truncate(50);
p
};
SystemStats {
cpu_usage: snapshot.cpu_usage,
total_memory: sys.total_memory(),
used_memory: sys.used_memory(),
processes: display_processes,
is_recording: profiling.is_active,
recording_duration,
}
}
#[tauri::command]
fn get_initial_report(state: State<AppState>) -> Option<Report> {
state.initial_report.lock().unwrap().clone()
}
#[tauri::command]
fn save_report(report: Report) -> Result<String, String> {
let json = serde_json::to_string_pretty(&report).map_err(|e| e.to_string())?;
let path = format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"));
std::fs::write(&path, json).map_err(|e| e.to_string())?;
Ok(path)
}
#[tauri::command]
fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true;
profiling.start_time = Some(Utc::now());
profiling.snapshots.clear();
}
#[tauri::command]
fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
generate_report(profiling.start_time.unwrap_or(Utc::now()), profiling.snapshots.drain(..).collect())
}
#[tauri::command]
fn run_as_admin(command: String) -> Result<String, String> {
let output = Command::new("pkexec")
@@ -378,7 +401,6 @@ fn run_as_admin(command: String) -> Result<String, String> {
.arg(&command)
.output()
.map_err(|e| e.to_string())?;
if output.status.success() {
Ok(String::from_utf8_lossy(&output.stdout).to_string())
} else {
@@ -387,6 +409,45 @@ fn run_as_admin(command: String) -> Result<String, String> {
}
fn main() {
let cli = Cli::parse();
let mut initial_report: Option<Report> = None;
if let Some(file_path) = cli.file {
if let Ok(content) = fs::read_to_string(file_path) {
if let Ok(report) = serde_json::from_str(&content) {
initial_report = Some(report);
}
}
}
if cli.headless {
println!("⚡ SysPulse: Starting headless profiling for {}s (interval: {}ms)...", cli.duration, cli.interval);
let mut sys = System::new_all();
let start_time = Utc::now();
let mut snapshots = Vec::new();
let self_pid = std::process::id();
for i in 0..(cli.duration * 1000 / cli.interval) {
snapshots.push(collect_snapshot(&mut sys, self_pid));
std::thread::sleep(Duration::from_millis(cli.interval));
if (i + 1) % (1000 / cli.interval) == 0 {
println!(" Progress: {}/{}s", (i + 1) * cli.interval / 1000, cli.duration);
}
}
let report = generate_report(start_time, snapshots);
let json = serde_json::to_string_pretty(&report).unwrap();
let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"))));
fs::write(&out_path, json).expect("Failed to write report");
println!("✅ Report saved to: {:?}", out_path);
if cli.gui {
initial_report = Some(report);
} else {
return;
}
}
tauri::Builder::default()
.manage(AppState {
sys: Mutex::new(System::new_all()),
@@ -394,10 +455,12 @@ fn main() {
is_active: false,
start_time: None,
snapshots: Vec::new(),
})
}),
initial_report: Mutex::new(initial_report),
})
.invoke_handler(tauri::generate_handler![
get_system_stats,
get_initial_report,
start_profiling,
stop_profiling,
run_as_admin,

View File

@@ -92,6 +92,20 @@ function App() {
const [report, setReport] = useState<ProfilingReport | null>(null);
useEffect(() => {
// Check for initial report (from CLI args)
const checkInitialReport = async () => {
try {
const data = await invoke<ProfilingReport | null>('get_initial_report');
if (data) {
setReport(data);
setView('report');
}
} catch (e) {
console.error('Failed to get initial report:', e);
}
};
checkInitialReport();
const fetchStats = async () => {
try {
const isRecording = stats?.is_recording ?? false;