Skip to content

Commit

Permalink
bug: Fix some performance regressions (#344)
Browse files Browse the repository at this point in the history
Fixes some performance regressions and forgotten cleanup.

Changes to attempt to improve performance to match 0.4.x:

- Remove `trace!` and `--debug` for now.  These were a significant hog.  Removing this dropped initial memory usage by about half.
- Add additional cleaning step for `pid_mapping`  during process harvesting.  This should hopefully improve memory usage as time goes on.
- Slightly change how we do sorting to hopefully be a bit more optimal?  This was just an easy change to make that I spotted.
- Fix broken cleaning child thread task.
  • Loading branch information
ClementTsang authored Dec 11, 2020
1 parent 030f4dd commit fd003f8
Show file tree
Hide file tree
Showing 11 changed files with 143 additions and 195 deletions.
10 changes: 9 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,21 @@
# These are backup files generated by rustfmt
**/*.rs.bk

# Stuff to really ignore
# Logging
*.log

# Flamegraph stuff
rust-unmangle
*.svg
*.data

# IntelliJ
.idea/

# Heaptrack files
*.zst

# For testing
sample_configs/testing.toml

# Wix
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Bug Fixes

- [#344](https://github.com/ClementTsang/bottom/pull/344): Fixes a performance regression causing high memory and CPU usage over time.

- [#345](https://github.com/ClementTsang/bottom/pull/345): Fixes process states not showing.

## [0.5.3] - 2020-11-26
Expand Down
3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ doc = false

[profile.release]
debug = 1
# debug = true
opt-level = 3
lto = "fat"
codegen-units = 1
Expand Down Expand Up @@ -88,4 +89,4 @@ output = "bottom_x86_64_installer.msi"
[dev-dependencies.cargo-husky]
version = "1"
default-features = false
features = ["user-hooks"]
features = ["user-hooks"]
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,6 @@ Use `btm --help` for more information.
--color <COLOR SCHEME> Use a color scheme, use --help for supported values.
-C, --config <CONFIG PATH> Sets the location of the config file.
-u, --current_usage Sets process CPU% to be based on current CPU%.
--debug Enables debug logging.
-t, --default_time_value <MS> Default time value for graphs in ms.
--default_widget_count <INT> Sets the n'th selected widget type as the default.
--default_widget_type <WIDGET TYPE> Sets which widget type to use as the default widget.
Expand Down
42 changes: 23 additions & 19 deletions src/app/data_farmer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,26 +102,30 @@ impl DataCollection {
}

pub fn clean_data(&mut self, max_time_millis: u64) {
trace!("Cleaning data.");
// trace!("Cleaning data.");
let current_time = Instant::now();

let mut remove_index = 0;
for entry in &self.timed_data_vec {
if current_time.duration_since(entry.0).as_millis() >= max_time_millis as u128 {
remove_index += 1;
} else {
break;
}
}
let remove_index = match self
.timed_data_vec
.binary_search_by(|(instant, _timed_data)| {
current_time
.duration_since(*instant)
.as_millis()
.cmp(&(max_time_millis as u128))
.reverse()
}) {
Ok(index) => index,
Err(index) => index,
};

self.timed_data_vec.drain(0..remove_index);
}

pub fn eat_data(&mut self, harvested_data: &Data) {
trace!("Eating data now...");
// trace!("Eating data now...");
let harvested_time = harvested_data.last_collection_time;
trace!("Harvested time: {:?}", harvested_time);
trace!("New current instant: {:?}", self.current_instant);
// trace!("Harvested time: {:?}", harvested_time);
// trace!("New current instant: {:?}", self.current_instant);
let mut new_entry = TimedData::default();

// Network
Expand Down Expand Up @@ -171,7 +175,7 @@ impl DataCollection {
fn eat_memory_and_swap(
&mut self, memory: &mem::MemHarvest, swap: &mem::MemHarvest, new_entry: &mut TimedData,
) {
trace!("Eating mem and swap.");
// trace!("Eating mem and swap.");
// Memory
let mem_percent = match memory.mem_total_in_mb {
0 => 0f64,
Expand All @@ -194,7 +198,7 @@ impl DataCollection {
}

fn eat_network(&mut self, network: &network::NetworkHarvest, new_entry: &mut TimedData) {
trace!("Eating network.");
// trace!("Eating network.");
// FIXME [NETWORKING; CONFIG]: The ability to config this?
// FIXME [NETWORKING]: Support bits, support switching between decimal and binary units (move the log part to conversion and switch on the fly)
// RX
Expand All @@ -216,7 +220,7 @@ impl DataCollection {
}

fn eat_cpu(&mut self, cpu: &[cpu::CpuData], new_entry: &mut TimedData) {
trace!("Eating CPU.");
// trace!("Eating CPU.");
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
Expand All @@ -227,15 +231,15 @@ impl DataCollection {
}

fn eat_temp(&mut self, temperature_sensors: &[temperature::TempHarvest]) {
trace!("Eating temps.");
// trace!("Eating temps.");
// TODO: [PO] To implement
self.temp_harvest = temperature_sensors.to_vec();
}

fn eat_disks(
&mut self, disks: &[disks::DiskHarvest], io: &disks::IOHarvest, harvested_time: Instant,
) {
trace!("Eating disks.");
// trace!("Eating disks.");
// TODO: [PO] To implement

let time_since_last_harvest = harvested_time
Expand Down Expand Up @@ -308,12 +312,12 @@ impl DataCollection {
}

fn eat_proc(&mut self, list_of_processes: &[processes::ProcessHarvest]) {
trace!("Eating proc.");
// trace!("Eating proc.");
self.process_harvest = list_of_processes.to_vec();
}

fn eat_battery(&mut self, list_of_batteries: &[batteries::BatteryHarvest]) {
trace!("Eating batteries.");
// trace!("Eating batteries.");
self.battery_harvest = list_of_batteries.to_vec();
}
}
86 changes: 14 additions & 72 deletions src/app/data_harvester.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,10 @@ pub struct DataCollector {

impl Default for DataCollector {
fn default() -> Self {
trace!("Creating default data collector...");
// trace!("Creating default data collector...");
DataCollector {
data: Data::default(),
sys: System::new_all(),
sys: System::new_with_specifics(sysinfo::RefreshKind::new()),
#[cfg(target_os = "linux")]
pid_mapping: HashMap::new(),
#[cfg(target_os = "linux")]
Expand All @@ -116,22 +116,24 @@ impl Default for DataCollector {
battery_list: None,
#[cfg(target_os = "linux")]
page_file_size_kb: unsafe {
let page_file_size_kb = libc::sysconf(libc::_SC_PAGESIZE) as u64 / 1024;
trace!("Page file size in KB: {}", page_file_size_kb);
page_file_size_kb
// let page_file_size_kb = libc::sysconf(libc::_SC_PAGESIZE) as u64 / 1024;
// trace!("Page file size in KB: {}", page_file_size_kb);
// page_file_size_kb
libc::sysconf(libc::_SC_PAGESIZE) as u64 / 1024
},
}
}
}

impl DataCollector {
pub fn init(&mut self) {
trace!("Initializing data collector.");
// trace!("Initializing data collector.");
self.sys.refresh_memory();
self.mem_total_kb = self.sys.get_total_memory();
trace!("Total memory in KB: {}", self.mem_total_kb);
// trace!("Total memory in KB: {}", self.mem_total_kb);

if self.widgets_to_harvest.use_battery {
trace!("First run battery vec creation.");
// trace!("First run battery vec creation.");
if let Ok(battery_manager) = Manager::new() {
if let Ok(batteries) = battery_manager.batteries() {
let battery_list: Vec<Battery> = batteries.filter_map(Result::ok).collect();
Expand All @@ -143,15 +145,15 @@ impl DataCollector {
}
}

trace!("Running first run.");
// trace!("Running first run.");
futures::executor::block_on(self.update_data());
trace!("First run done. Sleeping for 250ms...");
// trace!("First run done. Sleeping for 250ms...");
std::thread::sleep(std::time::Duration::from_millis(250));

trace!("First run done. Running first run cleanup now.");
// trace!("First run done. Running first run cleanup now.");
self.data.cleanup();

trace!("Enabled widgets to harvest: {:#?}", self.widgets_to_harvest);
// trace!("Enabled widgets to harvest: {:#?}", self.widgets_to_harvest);
}

pub fn set_collected_data(&mut self, used_widgets: UsedWidgets) {
Expand Down Expand Up @@ -208,13 +210,6 @@ impl DataCollector {
// CPU
if self.widgets_to_harvest.use_cpu {
self.data.cpu = Some(cpu::get_cpu_data_list(&self.sys, self.show_average_cpu));
if log_enabled!(log::Level::Trace) {
if let Some(cpus) = &self.data.cpu {
trace!("cpus: {:#?} results", cpus.len());
} else {
trace!("Found no cpus.");
}
}
}

// Batteries
Expand All @@ -223,14 +218,6 @@ impl DataCollector {
self.data.list_of_batteries =
Some(batteries::refresh_batteries(&battery_manager, battery_list));
}

if log_enabled!(log::Level::Trace) {
if let Some(batteries) = &self.data.list_of_batteries {
trace!("batteries: {:#?} results", batteries.len());
} else {
trace!("Found no batteries.");
}
}
}

if self.widgets_to_harvest.use_proc {
Expand Down Expand Up @@ -260,14 +247,6 @@ impl DataCollector {
} {
self.data.list_of_processes = Some(process_list);
}

if log_enabled!(log::Level::Trace) {
if let Some(processes) = &self.data.list_of_processes {
trace!("processes: {:#?} results", processes.len());
} else {
trace!("Found no processes.");
}
}
}

// I am *well* aware that the sysinfo part w/ blocking code is... not great.
Expand Down Expand Up @@ -362,63 +341,26 @@ impl DataCollector {
self.total_rx = net_data.total_rx;
self.total_tx = net_data.total_tx;
self.data.network = Some(net_data);
if log_enabled!(log::Level::Trace) {
trace!("Total rx: {:#?}", self.total_rx);
trace!("Total tx: {:#?}", self.total_tx);
if let Some(network) = &self.data.network {
trace!("network rx: {:#?}", network.rx);
trace!("network tx: {:#?}", network.tx);
} else {
trace!("Could not find any networks.");
}
}
}

if let Ok(memory) = mem_res.0 {
self.data.memory = memory;
if log_enabled!(log::Level::Trace) {
trace!("mem: {:?} results", self.data.memory);
}
}

if let Ok(swap) = mem_res.1 {
self.data.swap = swap;
if log_enabled!(log::Level::Trace) {
trace!("swap: {:?} results", self.data.swap);
}
}

if let Ok(disks) = disk_res {
self.data.disks = disks;
if log_enabled!(log::Level::Trace) {
if let Some(disks) = &self.data.disks {
trace!("disks: {:#?} results", disks.len());
} else {
trace!("Could not find any disks.");
}
}
}

if let Ok(io) = io_res {
self.data.io = io;
if log_enabled!(log::Level::Trace) {
if let Some(io) = &self.data.io {
trace!("io: {:#?} results", io.len());
} else {
trace!("Could not find any io results.");
}
}
}

if let Ok(temp) = temp_res {
self.data.temperature_sensors = temp;
if log_enabled!(log::Level::Trace) {
if let Some(sensors) = &self.data.temperature_sensors {
trace!("temp: {:#?} results", sensors.len());
} else {
trace!("Could not find any temp sensors.");
}
}
}

// Update time
Expand Down
8 changes: 7 additions & 1 deletion src/app/data_harvester/processes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -375,8 +375,10 @@ pub fn get_process_data(
time_difference_in_secs: u64, mem_total_kb: u64, page_file_kb: u64,
) -> crate::utils::error::Result<Vec<ProcessHarvest>> {
// TODO: [PROC THREADS] Add threads
use std::collections::HashSet;

if let Ok((cpu_usage, cpu_fraction)) = cpu_usage_calculation(prev_idle, prev_non_idle) {
let mut pids_to_clear: HashSet<Pid> = pid_mapping.keys().cloned().collect();
let process_vector: Vec<ProcessHarvest> = std::fs::read_dir("/proc")?
.filter_map(|dir| {
if let Ok(dir) = dir {
Expand All @@ -393,6 +395,7 @@ pub fn get_process_data(
mem_total_kb,
page_file_kb,
) {
pids_to_clear.remove(&pid);
return Some(process_object);
}
}
Expand All @@ -402,9 +405,12 @@ pub fn get_process_data(
})
.collect();

pids_to_clear.iter().for_each(|pid| {
pid_mapping.remove(pid);
});

Ok(process_vector)
} else {
trace!("Could not calculate CPU usage.");
Err(BottomError::GenericError(
"Could not calculate CPU usage.".to_string(),
))
Expand Down
Loading

0 comments on commit fd003f8

Please sign in to comment.