updated profiling options

This commit is contained in:
2026-02-23 17:56:53 +01:00
parent d46f057867
commit 8a24fa5689
2 changed files with 279 additions and 85 deletions

View File

@@ -3,7 +3,7 @@
windows_subsystem = "windows"
)]
use sysinfo::System;
use sysinfo::{System, Pid};
use std::sync::Mutex;
use std::process::Command;
use tauri::State;
@@ -14,6 +14,7 @@ use std::fs;
use clap::Parser;
use std::path::PathBuf;
use std::time::Duration;
use rayon::prelude::*;
// --- CLI ---
@@ -36,6 +37,12 @@ struct Cli {
// --- Data Structures ---
#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
enum ProfilingMode {
Global,
Targeted,
}
#[derive(Serialize, Clone)]
struct SystemStats {
cpu_usage: Vec<f32>,
@@ -68,6 +75,8 @@ struct Snapshot {
struct ProfilingSession {
is_active: bool,
mode: ProfilingMode,
target_pid: Option<u32>,
start_time: Option<DateTime<Utc>>,
snapshots: Vec<Snapshot>,
}
@@ -86,6 +95,8 @@ struct Report {
start_time: String,
end_time: String,
duration_seconds: i64,
mode: ProfilingMode,
target_name: Option<String>,
timeline: Vec<TimelinePoint>,
aggregated_processes: Vec<AggregatedProcess>,
}
@@ -93,8 +104,10 @@ struct Report {
#[derive(Serialize, Deserialize, Clone)]
struct TimelinePoint {
time: String,
avg_cpu: f32,
memory_gb: f32,
cpu_total: f32,
mem_total_gb: f32,
cpu_profiler: f32,
mem_profiler_gb: f32,
}
#[derive(Serialize, Deserialize, Clone)]
@@ -220,19 +233,36 @@ fn collect_snapshot(
}
}
fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Report {
fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>, mode: ProfilingMode, target_pid: Option<u32>) -> Report {
let end_time = Utc::now();
let duration = (end_time - start_time).num_seconds();
let timeline: Vec<TimelinePoint> = snapshots.iter().map(|s| {
let avg_cpu = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
let cpu_total = s.cpu_usage.iter().sum::<f32>() / s.cpu_usage.len() as f32;
let mem_total_gb = s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0;
let profiler_stats = s.processes.iter()
.filter(|p| p.is_syspulse)
.fold((0.0, 0), |acc, p| (acc.0 + p.cpu_usage, acc.1 + p.memory));
TimelinePoint {
time: s.timestamp.format("%H:%M:%S").to_string(),
avg_cpu,
memory_gb: s.used_memory as f32 / 1024.0 / 1024.0 / 1024.0,
cpu_total,
mem_total_gb,
cpu_profiler: profiler_stats.0 / s.cpu_usage.len() as f32,
mem_profiler_gb: profiler_stats.1 as f32 / 1024.0 / 1024.0 / 1024.0,
}
}).collect();
let mut target_name = None;
if let Some(tpid) = target_pid {
if let Some(snapshot) = snapshots.first() {
if let Some(p) = snapshot.processes.iter().find(|p| p.pid == tpid) {
target_name = Some(p.name.clone());
}
}
}
let mut pid_map: HashMap<u32, (String, Option<u32>, Vec<ProcessHistoryPoint>, f32, f32, bool, bool)> = HashMap::new();
let num_snapshots = snapshots.len() as f32;
@@ -332,14 +362,82 @@ fn generate_report(start_time: DateTime<Utc>, snapshots: Vec<Snapshot>) -> Repor
final_roots.push(node);
}
}
final_roots.sort_by(|a, b| b.inclusive_avg_cpu.partial_cmp(&a.inclusive_avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report {
start_time: start_time.to_rfc3339(),
end_time: end_time.to_rfc3339(),
duration_seconds: duration,
timeline,
aggregated_processes: final_roots,
if mode == ProfilingMode::Global {
// Global mode aggregation: Group by NAME, and flattened
let mut name_groups: HashMap<String, AggregatedProcess> = HashMap::new();
fn flatten_to_groups(node: AggregatedProcess, groups: &mut HashMap<String, AggregatedProcess>) {
let entry = groups.entry(node.name.clone()).or_insert_with(|| {
let mut base = node.clone();
base.children = Vec::new();
base.instance_count = 0;
base.inclusive_avg_cpu = 0.0;
base.inclusive_avg_memory_mb = 0.0;
base.avg_cpu = 0.0;
base.avg_memory_mb = 0.0;
base
});
entry.avg_cpu += node.avg_cpu;
entry.avg_memory_mb += node.avg_memory_mb;
entry.instance_count += 1;
if node.peak_cpu > entry.peak_cpu { entry.peak_cpu = node.peak_cpu; }
if node.peak_memory_mb > entry.peak_memory_mb { entry.peak_memory_mb = node.peak_memory_mb; }
for child in node.children {
flatten_to_groups(child, groups);
}
}
for root in final_roots {
flatten_to_groups(root, &mut name_groups);
}
let mut flattened: Vec<AggregatedProcess> = name_groups.into_values().collect();
flattened.sort_by(|a, b| b.avg_cpu.partial_cmp(&a.avg_cpu).unwrap_or(std::cmp::Ordering::Equal));
Report {
start_time: start_time.to_rfc3339(),
end_time: end_time.to_rfc3339(),
duration_seconds: duration,
mode,
target_name,
timeline,
aggregated_processes: flattened,
}
} else {
// Targeted mode: Return only the target root(s) and their hierarchy
let mut targeted_roots = Vec::new();
if let Some(tpid) = target_pid {
// Find target or its closest surviving ancestor/child in the tree
// Actually final_roots already contains the full tree.
// We want only the root that contains our target PID.
fn find_target_node(roots: &mut Vec<AggregatedProcess>, tpid: u32) -> Option<AggregatedProcess> {
for i in 0..roots.len() {
if roots[i].pid == tpid {
return Some(roots.remove(i));
}
if let Some(found) = find_target_node(&mut roots[i].children, tpid) {
return Some(found);
}
}
None
}
if let Some(target_tree) = find_target_node(&mut final_roots, tpid) {
targeted_roots.push(target_tree);
}
}
Report {
start_time: start_time.to_rfc3339(),
end_time: end_time.to_rfc3339(),
duration_seconds: duration,
mode,
target_name,
timeline,
aggregated_processes: targeted_roots,
}
}
}
@@ -354,8 +452,6 @@ fn get_system_stats(state: State<AppState>, minimal: bool) -> SystemStats {
let self_pid = std::process::id();
let syspulse_pids = get_syspulse_pids(self_pid, &sys);
// NO PSS collection during live dashboard. ONLY during profiling or after.
// This is the primary cause of high CPU/I/O.
let snapshot = collect_snapshot(&mut sys, &syspulse_pids, &mut pss_cache, profiling.is_active);
if profiling.is_active {
@@ -400,6 +496,18 @@ fn save_report(report: Report) -> Result<String, String> {
fn start_profiling(state: State<AppState>) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true;
profiling.mode = ProfilingMode::Global;
profiling.target_pid = None;
profiling.start_time = Some(Utc::now());
profiling.snapshots.clear();
}
#[tauri::command]
fn start_targeted_profiling(state: State<AppState>, pid: u32) {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = true;
profiling.mode = ProfilingMode::Targeted;
profiling.target_pid = Some(pid);
profiling.start_time = Some(Utc::now());
profiling.snapshots.clear();
}
@@ -409,7 +517,7 @@ fn stop_profiling(state: State<AppState>) -> Report {
let mut profiling = state.profiling.lock().unwrap();
profiling.is_active = false;
let snapshots: Vec<Snapshot> = profiling.snapshots.drain(..).collect();
generate_report(profiling.start_time.unwrap_or(Utc::now()), snapshots)
generate_report(profiling.start_time.unwrap_or(Utc::now()), snapshots, profiling.mode, profiling.target_pid)
}
#[tauri::command]
@@ -456,7 +564,7 @@ fn main() {
}
}
let report = generate_report(start_time, snapshots);
let report = generate_report(start_time, snapshots, ProfilingMode::Global, None);
let json = serde_json::to_string_pretty(&report).unwrap();
let out_path = cli.output.unwrap_or_else(|| PathBuf::from(format!("syspulse_report_{}.json", Utc::now().format("%Y%m%d_%H%M%S"))));
fs::write(&out_path, json).expect("Failed to write report");
@@ -474,6 +582,8 @@ fn main() {
sys: Mutex::new(System::new_all()),
profiling: Mutex::new(ProfilingSession {
is_active: false,
mode: ProfilingMode::Global,
target_pid: None,
start_time: None,
snapshots: Vec::new(),
}),
@@ -484,6 +594,7 @@ fn main() {
get_system_stats,
get_initial_report,
start_profiling,
start_targeted_profiling,
stop_profiling,
run_as_admin,
save_report