├── .all-contributorsrc
├── .cargo-husky
└── hooks
│ └── pre-push
├── .cargo
└── config.toml
├── .cirrus.yml
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ ├── config.yml
│ ├── feature_request.yml
│ └── packaging.yml
├── pull_request_template.md
└── workflows
│ ├── build_releases.yml
│ ├── ci.yml
│ ├── clear_workflow_cache.yml
│ ├── coverage.yml
│ ├── deployment.yml
│ ├── docs.yml
│ ├── nightly.yml
│ ├── post_release.yml
│ ├── test_docs.yml
│ └── validate_schema.yml
├── .gitignore
├── .markdownlint.json
├── CHANGELOG.md
├── CONTRIBUTING.md
├── Cargo.lock
├── Cargo.toml
├── Cross.toml
├── LICENSE
├── README.md
├── assets
└── demo.gif
├── build.rs
├── clippy.toml
├── codecov.yml
├── desktop
└── bottom.desktop
├── docs
├── .gitignore
├── README.md
├── content
│ ├── assets
│ │ └── screenshots
│ │ │ ├── basic.webp
│ │ │ ├── battery.webp
│ │ │ ├── config
│ │ │ ├── disk-filtering
│ │ │ │ ├── disk_name_filter.webp
│ │ │ │ ├── disk_name_mount_filter.webp
│ │ │ │ └── disk_no_filter.webp
│ │ │ └── layout
│ │ │ │ └── sample_layout.webp
│ │ │ ├── cpu.webp
│ │ │ ├── disk.webp
│ │ │ ├── memory.webp
│ │ │ ├── network
│ │ │ ├── network.webp
│ │ │ └── network_old.webp
│ │ │ ├── process
│ │ │ ├── process_default.webp
│ │ │ ├── process_full.webp
│ │ │ ├── process_grouped.webp
│ │ │ ├── process_kill_linux.webp
│ │ │ ├── process_kill_simple.webp
│ │ │ ├── process_sort_menu.webp
│ │ │ ├── process_tree.webp
│ │ │ └── search
│ │ │ │ ├── cpu.webp
│ │ │ │ ├── or.webp
│ │ │ │ ├── quotes.webp
│ │ │ │ ├── regex.webp
│ │ │ │ └── search.webp
│ │ │ ├── temperature.webp
│ │ │ └── troubleshooting
│ │ │ ├── cmd_prompt_font.webp
│ │ │ ├── cmd_prompt_props.webp
│ │ │ ├── dots.webp
│ │ │ ├── no_braille.webp
│ │ │ └── regedit_fonts.webp
│ ├── configuration
│ │ ├── command-line-options.md
│ │ └── config-file
│ │ │ ├── cpu.md
│ │ │ ├── data-filtering.md
│ │ │ ├── flags.md
│ │ │ ├── index.md
│ │ │ ├── layout.md
│ │ │ ├── processes.md
│ │ │ └── styling.md
│ ├── contribution
│ │ ├── development
│ │ │ ├── build_process.md
│ │ │ ├── deploy_process.md
│ │ │ ├── dev_env.md
│ │ │ ├── logging.md
│ │ │ └── testing.md
│ │ ├── documentation.md
│ │ ├── issues-and-pull-requests.md
│ │ └── packaging-and-distribution.md
│ ├── index.md
│ ├── overrides
│ │ └── main.html
│ ├── stylesheets
│ │ └── extra.css
│ ├── support
│ │ ├── official.md
│ │ └── unofficial.md
│ ├── troubleshooting.md
│ └── usage
│ │ ├── autocomplete.md
│ │ ├── basic-mode.md
│ │ ├── general-usage.md
│ │ └── widgets
│ │ ├── battery.md
│ │ ├── cpu.md
│ │ ├── disk.md
│ │ ├── memory.md
│ │ ├── network.md
│ │ ├── process.md
│ │ └── temperature.md
├── mkdocs.yml
├── requirements.txt
└── serve.sh
├── rustfmt.toml
├── sample_configs
├── default_config.toml
└── demo_config.toml
├── schema
├── README.md
├── nightly
│ └── bottom.json
├── v0.10
│ └── bottom.json
└── v0.9
│ └── bottom.json
├── scripts
├── cirrus
│ └── release.py
├── clear_cache.py
├── schema
│ ├── bad_file.toml
│ ├── generate.sh
│ ├── requirements.txt
│ └── validator.py
└── windows
│ └── choco
│ ├── bottom.nuspec.template
│ ├── choco_packager.py
│ └── chocolateyinstall.ps1.template
├── src
├── app.rs
├── app
│ ├── data
│ │ ├── mod.rs
│ │ ├── process.rs
│ │ ├── store.rs
│ │ ├── temperature.rs
│ │ └── time_series.rs
│ ├── filter.rs
│ ├── layout_manager.rs
│ ├── process_killer.rs
│ └── states.rs
├── bin
│ ├── main.rs
│ └── schema.rs
├── canvas.rs
├── canvas
│ ├── components.rs
│ ├── components
│ │ ├── data_table.rs
│ │ ├── data_table
│ │ │ ├── column.rs
│ │ │ ├── data_type.rs
│ │ │ ├── draw.rs
│ │ │ ├── props.rs
│ │ │ ├── sortable.rs
│ │ │ ├── state.rs
│ │ │ └── styling.rs
│ │ ├── pipe_gauge.rs
│ │ ├── time_graph.rs
│ │ ├── time_graph
│ │ │ ├── time_chart.rs
│ │ │ └── time_chart
│ │ │ │ ├── canvas.rs
│ │ │ │ ├── grid.rs
│ │ │ │ └── points.rs
│ │ └── widget_carousel.rs
│ ├── dialogs.rs
│ ├── dialogs
│ │ ├── dd_dialog.rs
│ │ └── help_dialog.rs
│ ├── drawing_utils.rs
│ ├── widgets.rs
│ └── widgets
│ │ ├── battery_display.rs
│ │ ├── cpu_basic.rs
│ │ ├── cpu_graph.rs
│ │ ├── disk_table.rs
│ │ ├── mem_basic.rs
│ │ ├── mem_graph.rs
│ │ ├── network_basic.rs
│ │ ├── network_graph.rs
│ │ ├── process_table.rs
│ │ └── temperature_table.rs
├── collection.rs
├── collection
│ ├── amd.rs
│ ├── amd
│ │ └── amd_gpu_marketing.rs
│ ├── batteries.rs
│ ├── cpu.rs
│ ├── cpu
│ │ └── sysinfo.rs
│ ├── disks.rs
│ ├── disks
│ │ ├── freebsd.rs
│ │ ├── io_counters.rs
│ │ ├── other.rs
│ │ ├── unix.rs
│ │ ├── unix
│ │ │ ├── file_systems.rs
│ │ │ ├── linux
│ │ │ │ ├── counters.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── partition.rs
│ │ │ ├── macos
│ │ │ │ ├── counters.rs
│ │ │ │ ├── io_kit.rs
│ │ │ │ ├── io_kit
│ │ │ │ │ ├── bindings.rs
│ │ │ │ │ ├── io_disks.rs
│ │ │ │ │ ├── io_iterator.rs
│ │ │ │ │ └── io_object.rs
│ │ │ │ └── mod.rs
│ │ │ ├── other
│ │ │ │ ├── bindings.rs
│ │ │ │ ├── mod.rs
│ │ │ │ └── partition.rs
│ │ │ └── usage.rs
│ │ ├── windows.rs
│ │ ├── windows
│ │ │ └── bindings.rs
│ │ └── zfs_io_counters.rs
│ ├── error.rs
│ ├── linux
│ │ └── utils.rs
│ ├── memory.rs
│ ├── memory
│ │ ├── arc.rs
│ │ ├── sysinfo.rs
│ │ └── windows.rs
│ ├── network.rs
│ ├── network
│ │ └── sysinfo.rs
│ ├── nvidia.rs
│ ├── processes.rs
│ ├── processes
│ │ ├── freebsd.rs
│ │ ├── linux
│ │ │ ├── mod.rs
│ │ │ └── process.rs
│ │ ├── macos.rs
│ │ ├── macos
│ │ │ └── sysctl_bindings.rs
│ │ ├── unix.rs
│ │ ├── unix
│ │ │ ├── process_ext.rs
│ │ │ └── user_table.rs
│ │ └── windows.rs
│ ├── temperature.rs
│ └── temperature
│ │ ├── linux.rs
│ │ └── sysinfo.rs
├── constants.rs
├── event.rs
├── lib.rs
├── options.rs
├── options
│ ├── args.rs
│ ├── config.rs
│ ├── config
│ │ ├── cpu.rs
│ │ ├── disk.rs
│ │ ├── flags.rs
│ │ ├── ignore_list.rs
│ │ ├── layout.rs
│ │ ├── network.rs
│ │ ├── process.rs
│ │ ├── style.rs
│ │ ├── style
│ │ │ ├── battery.rs
│ │ │ ├── borders.rs
│ │ │ ├── cpu.rs
│ │ │ ├── graphs.rs
│ │ │ ├── memory.rs
│ │ │ ├── network.rs
│ │ │ ├── tables.rs
│ │ │ ├── themes.rs
│ │ │ ├── themes
│ │ │ │ ├── default.rs
│ │ │ │ ├── gruvbox.rs
│ │ │ │ └── nord.rs
│ │ │ ├── utils.rs
│ │ │ └── widgets.rs
│ │ └── temperature.rs
│ └── error.rs
├── utils
│ ├── cancellation_token.rs
│ ├── conversion.rs
│ ├── data_units.rs
│ ├── general.rs
│ ├── logging.rs
│ └── strings.rs
└── widgets
│ ├── battery_info.rs
│ ├── cpu_graph.rs
│ ├── disk_table.rs
│ ├── mem_graph.rs
│ ├── mod.rs
│ ├── network_graph.rs
│ ├── process_table.rs
│ ├── process_table
│ ├── process_columns.rs
│ ├── process_data.rs
│ ├── query.rs
│ └── sort_table.rs
│ └── temperature_table.rs
├── tests
├── integration
│ ├── arg_tests.rs
│ ├── invalid_config_tests.rs
│ ├── layout_movement_tests.rs
│ ├── main.rs
│ ├── util.rs
│ └── valid_config_tests.rs
├── invalid_configs
│ ├── duplicate_temp_type.toml
│ ├── empty_layout.toml
│ ├── invalid_colour_hex.toml
│ ├── invalid_colour_hex_2.toml
│ ├── invalid_colour_hex_3.toml
│ ├── invalid_colour_name.toml
│ ├── invalid_colour_rgb.toml
│ ├── invalid_colour_rgb_2.toml
│ ├── invalid_colour_string.toml
│ ├── invalid_default_widget_count.toml
│ ├── invalid_disk_column.toml
│ ├── invalid_layout_widget_type.toml
│ ├── invalid_process_column.toml
│ ├── lone_default_widget_count.toml
│ └── toml_mismatch_type.toml
└── valid_configs
│ ├── all_proc.toml
│ ├── cpu_doughnut.toml
│ ├── empty_config.toml
│ ├── filtering.toml
│ ├── many_proc.toml
│ ├── styling.toml
│ ├── styling_2.toml
│ └── theme.toml
└── wix
├── License.rtf
└── main.wxs
/.cargo-husky/hooks/pre-push:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | echo "Running pre-push hook:"
6 |
7 | echo "Executing: cargo fmt --all -- --check"
8 | cargo fmt --all -- --check
9 |
10 | echo "Executing: cargo clippy --all-targets --workspace -- -D warnings"
11 | cargo clippy --all-targets --workspace -- -D warnings
12 |
--------------------------------------------------------------------------------
/.cargo/config.toml:
--------------------------------------------------------------------------------
1 | [target.x86_64-pc-windows-msvc]
2 | rustflags = ["-C", "target-feature=+crt-static"]
3 |
4 | [target.i686-pc-windows-msvc]
5 | rustflags = ["-C", "target-feature=+crt-static"]
6 |
--------------------------------------------------------------------------------
/.cirrus.yml:
--------------------------------------------------------------------------------
1 | %YAML 1.1
2 | ---
3 | # Configuration for CirrusCI. This is primarily used for testing and building FreeBSD and old versions of Linux,
4 | # since other CI platforms don't support build jobs for these configurations.
5 | #
6 | # Note that we set the YAML directive above to prevent some linting errors around the templates.
7 |
8 | setup_template: &SETUP_TEMPLATE
9 | setup_script:
10 | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs --output rustup.sh
11 | - sh rustup.sh --default-toolchain stable -y
12 |
13 | cache_template: &CACHE_TEMPLATE
14 | registry_cache:
15 | folder: $HOME/.cargo/registry
16 | reupload_on_changes: "true"
17 | fingerprint_script:
18 | - $HOME/.cargo/bin/rustc --version
19 | - cat Cargo.lock
20 | - echo $CIRRUS_OS
21 | - echo $CIRRUS_TASK_NAME
22 | target_cache:
23 | folder: target
24 | reupload_on_changes: "true"
25 | fingerprint_script:
26 | - $HOME/.cargo/bin/rustc --version
27 | - cat Cargo.lock
28 | - echo $CIRRUS_OS
29 | - echo $CIRRUS_TASK_NAME
30 |
31 | cleanup_template: &CLEANUP_TEMPLATE
32 | before_cache_script:
33 | - rm -rf $HOME/.cargo/registry/index
34 | - rm -rf $HOME/.cargo/registry/src
35 | - rm -f ./target/.rustc_info.json
36 |
37 | env:
38 | CARGO_INCREMENTAL: "0"
39 | CARGO_PROFILE_DEV_DEBUG: "0"
40 | CARGO_HUSKY_DONT_INSTALL_HOOKS: "true"
41 |
42 | release_task:
43 | auto_cancellation: "false"
44 | only_if: $CIRRUS_BUILD_SOURCE == "api" && $BTM_BUILD_RELEASE_CALLER == "ci"
45 | timeout_in: "30m"
46 | env:
47 | BTM_GENERATE: "true"
48 | COMPLETION_DIR: "target/tmp/bottom/completion/"
49 | MANPAGE_DIR: "target/tmp/bottom/manpage/"
50 | # -PLACEHOLDER FOR CI-
51 | matrix:
52 | - name: "Legacy Linux (2.17)"
53 | alias: "linux_2_17_build"
54 | container:
55 | image: quay.io/pypa/manylinux2014_x86_64
56 | env:
57 | TARGET: "x86_64-unknown-linux-gnu"
58 | NAME: "x86_64-unknown-linux-gnu-2-17"
59 | <<: *SETUP_TEMPLATE
60 | <<: *CACHE_TEMPLATE
61 | build_script:
62 | - . $HOME/.cargo/env
63 | - cargo build --release --verbose --locked --features deploy
64 | - mv ./target/release/btm ./
65 | - ./btm -V
66 | - mv "$COMPLETION_DIR" completion
67 | - mv "$MANPAGE_DIR" manpage
68 | - tar -czvf bottom_$NAME.tar.gz btm completion
69 | binaries_artifacts:
70 | path: bottom_$NAME.tar.gz
71 | <<: *CLEANUP_TEMPLATE
72 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: Open a discussion
4 | about: |
5 | Got a question about using bottom? Need help troubleshooting something? Or maybe just talk about something related to bottom? Feel free to open a discussion!
6 | url: https://github.com/ClementTsang/bottom/discussions/new
7 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Feature request
2 | description: Got a good idea that hasn't already been suggested? Mention it here!
3 | labels: ["feature"]
4 | body:
5 | - type: checkboxes
6 | id: acknowledgements
7 | attributes:
8 | label: Checklist
9 | options:
10 | - label: >
11 | I've looked through [the documentation](https://clementtsang.github.io/bottom/nightly/) and
12 | [existing open issues](https://github.com/ClementTsang/bottom/issues?q=is%3Aopen+is%3Aissue+label%3Afeature)
13 | for similar feature requests.
14 | required: true
15 |
16 | - type: textarea
17 | id: description
18 | validations:
19 | required: true
20 | attributes:
21 | label: Describe the feature request
22 | description: >
23 | Please describe what behaviour you are looking for, the motivation for it, and use cases where this feature
24 | would be helpful to both you and others. Try to be clear and concise.
25 |
26 | If you have any ideas to implement this feature as well, feel free to write them down here too.
27 | placeholder: |
28 | Example:
29 | It would be nice to support FreeBSD, as I and others often use similar tools on my FreeBSD-based system.
30 | I also noticed that sysinfo has FreeBSD support as a data source.
31 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/packaging.yml:
--------------------------------------------------------------------------------
1 | name: Packaging
2 | description: For issues, questions, or requests regarding packaging or distribution.
3 | labels: ["packaging"]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: >
8 | If this is an issue about supporting a new package/installation method for a platform you use, please
9 | consider maintaining it yourself/with others and submitting a PR or issue with a link to it - they'll be
10 | very much appreciated and likely added to the README quickly. [The documentation on packaging/distribution](https://clementtsang.github.io/bottom/nightly/contribution/packaging-and-distribution/)
11 | may be helpful in setting things up. If there are some issues with bottom itself causing problems with
12 | packaging, feel free to open an appropriate issue.
13 |
14 |
15 | If this is an issue regarding a specific existing distribution channel, feel free to report issues here if they
16 | are related to the following sources:
17 |
18 | * [crates.io](https://crates.io/crates/bottom)
19 | * [Binary releases/packages released on GitHub](https://github.com/ClementTsang/bottom/releases)
20 |
21 |
22 | For any other distribution channel, please first try to contact the package maintainers where appropriate
23 | to get help regarding distribution-specific issues (e.g. the package has issues installing, the package
24 | is outdated, etc.) before reaching out here. This is as while I am happy to help where possible, I do not
25 | personally use many of the various ways people distribute bottom. As such, unless specified, I might lack the
26 | platform-specific context, knowledge, or tools to be able to help you at all regarding the distribution method,
27 | and the best I can do is just point you to the package maintainer.
28 |
29 | - type: checkboxes
30 | id: acknowledgements
31 | attributes:
32 | label: Checklist
33 | options:
34 | - label: >
35 | I have read and understood the above text.
36 | required: true
37 |
38 | - type: textarea
39 | id: description
40 | validations:
41 | required: true
42 | attributes:
43 | label: Describe the issue
44 | description: >
45 | What is the packaging-related issue? Please be clear and concise.
46 | placeholder: |
47 | Example: Would it be possible to add shell completion generation as a separate build artifact?
48 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 | _A description of the change, what it does, and why it was made. If relevant (such as any change that modifies the UI), **please provide screenshots** of the changes:_
4 |
5 | ## Issue
6 |
7 | _If applicable, what issue does this address?_
8 |
9 | Closes: #
10 |
11 | ## Testing
12 |
13 | _If relevant, please state how this was tested. All changes **must** be tested to work:_
14 |
15 | _If this is a code change, please also indicate which platforms were tested:_
16 |
17 | - [ ] _Windows_
18 | - [ ] _macOS_
19 | - [ ] _Linux_
20 |
21 | ## Checklist
22 |
23 | _If relevant, ensure the following have been met:_
24 |
25 | - [ ] _Areas your change affects have been linted using rustfmt (`cargo fmt`)_
26 | - [ ] _The change has been tested and doesn't appear to cause any unintended breakage_
27 | - [ ] _Documentation has been added/updated if needed (`README.md`, help menu, doc pages, etc.)_
28 | - [ ] _The pull request passes the provided CI pipeline_
29 | - [ ] _There are no merge conflicts_
30 | - [ ] _If relevant, new tests were added (don't worry too much about coverage)_
31 |
--------------------------------------------------------------------------------
/.github/workflows/clear_workflow_cache.yml:
--------------------------------------------------------------------------------
1 | # Simple job to clear the cache used by a workflow. This automatically runs when a PR is closed/merged
2 | # to clean up the corresponding PR's cache.
3 |
4 | name: "clear workflow cache"
5 |
6 | on:
7 | workflow_dispatch:
8 | inputs:
9 | id:
10 | description: "Which id to clear. Type main/master/all to clean all, and keep-main/keep-master to clean all but the main branch."
11 | required: false
12 | pull_request:
13 | types:
14 | - closed
15 | schedule:
16 | - cron: "0 11 * * 0"
17 |
18 | jobs:
19 | clear-cache:
20 | runs-on: ubuntu-latest
21 | env:
22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
23 | steps:
24 | - name: Checkout repository
25 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
26 | with:
27 | fetch-depth: 1
28 |
29 | # We run each script twice with a small delay in between to try and catch everything.
30 | - name: Clear cache
31 | run: |
32 | if [[ -n "${{ github.event.schedule }}" ]]; then
33 | python ./scripts/clear_cache.py keep-main
34 | sleep 5
35 | python ./scripts/clear_cache.py keep-main
36 | elif [[ -z "${{ github.event.inputs.id }}" ]]; then
37 | python ./scripts/clear_cache.py ${{ github.event.pull_request.number }}
38 | sleep 5
39 | python ./scripts/clear_cache.py ${{ github.event.pull_request.number }}
40 | else
41 | python ./scripts/clear_cache.py ${{ github.event.inputs.id }}
42 | sleep 5
43 | python ./scripts/clear_cache.py ${{ github.event.inputs.id }}
44 | fi
45 |
--------------------------------------------------------------------------------
/.github/workflows/coverage.yml:
--------------------------------------------------------------------------------
1 | # Code coverage generation via cargo-llvm-cov, which is then uploaded to Codecov.
2 | # Codecov will report back via a comment if run on a PR.
3 | #
4 | # Note that Codecov will report back the average all uploaded coverage files.
5 |
6 | name: codecov
7 |
8 | on:
9 | workflow_dispatch:
10 | pull_request:
11 | push:
12 | branches:
13 | - main
14 |
15 | env:
16 | CARGO_INCREMENTAL: 0
17 | CARGO_HUSKY_DONT_INSTALL_HOOKS: true
18 |
19 | concurrency:
20 | group: ${{ github.workflow }}-${{ github.ref }}
21 | cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'ClementTsang/bottom' }}
22 |
23 | jobs:
24 | pre-job:
25 | runs-on: ubuntu-latest
26 | outputs:
27 | should_skip: ${{ steps.skip_check.outputs.should_skip }}
28 | steps:
29 | - id: skip_check
30 | uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
31 | with:
32 | skip_after_successful_duplicate: "false"
33 | paths: '["tests/**", "src/**", ".github/workflows/coverage.yml", ".cargo/**", "Cargo.toml", "Cargo.lock", "build.rs"]'
34 | do_not_skip: '["workflow_dispatch", "push"]'
35 |
36 | coverage:
37 | needs: pre-job
38 | if: ${{ needs.pre-job.outputs.should_skip != 'true' }}
39 | runs-on: ${{ matrix.info.os }}
40 | timeout-minutes: 12
41 | strategy:
42 | fail-fast: false
43 | matrix:
44 | info:
45 | - { os: "ubuntu-latest", target: "x86_64-unknown-linux-gnu" }
46 | - { os: "macos-14", target: "aarch64-apple-darwin", cross: false }
47 | - { os: "windows-2019", target: "x86_64-pc-windows-msvc" }
48 | steps:
49 | - name: Checkout repository
50 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
51 |
52 | - name: Set up Rust toolchain
53 | uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17
54 | with:
55 | toolchain: stable
56 |
57 | - name: Enable Rust cache
58 | uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # 2.7.8
59 | if: ${{ github.event_name != 'pull_request' || ! github.event.pull_request.head.repo.fork }} # If it is a PR, only if not a fork
60 | with:
61 | key: ${{ matrix.info.target }}
62 | cache-all-crates: true
63 |
64 | - name: Install cargo-llvm-cov
65 | run: |
66 | rustup component add llvm-tools-preview
67 | cargo install cargo-llvm-cov --version 0.6.11 --locked
68 |
69 | - name: Generate code coverage
70 | run: |
71 | cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info --locked --target=${{ matrix.info.target }}
72 |
73 | # The token is generally not needed, but sometimes the default shared token hits limits.
74 | - name: Upload to codecov.io
75 | uses: Wandalen/wretry.action@6feedb7dedadeb826de0f45ff482b53b379a7844 # v3.5.0
76 | with:
77 | action: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0
78 | with: |
79 | files: lcov.info
80 | fail_ci_if_error: true
81 | token: ${{ secrets.CODECOV_TOKEN }}
82 | flags: ${{ matrix.info.os }}
83 | attempt_limit: 5
84 | attempt_delay: 1500
85 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | # Workflow to deploy mkdocs documentation.
2 |
3 | name: docs
4 |
5 | on:
6 | workflow_dispatch:
7 | push:
8 | branches:
9 | - main
10 | paths:
11 | - "docs/**"
12 | - ".github/workflows/docs.yml"
13 |
14 | env:
15 | # Assign commit authorship to official GitHub Actions bot when pushing to the `gh-pages` branch:
16 | GIT_USER: "github-actions[bot]"
17 | GIT_EMAIL: "41898282+github-actions[bot]@users.noreply.github.com"
18 |
19 | jobs:
20 | build-documentation:
21 | name: Build and deploy docs
22 | runs-on: ubuntu-latest
23 | steps:
24 | - name: Checkout repository
25 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
26 | with:
27 | fetch-depth: 0
28 |
29 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
30 | with:
31 | python-version: 3.12
32 |
33 | - name: Install Python dependencies
34 | run: pip install -r docs/requirements.txt
35 |
36 | - name: Configure git user and email
37 | run: |
38 | git config --global user.name ${GIT_USER}
39 | git config --global user.email ${GIT_EMAIL}
40 | echo Name: $(git config --get user.name)
41 | echo Email: $(git config --get user.email)
42 |
43 | - name: Build and deploy docs with mike
44 | run: |
45 | cd docs
46 | mike deploy nightly --push
47 |
--------------------------------------------------------------------------------
/.github/workflows/nightly.yml:
--------------------------------------------------------------------------------
1 | # Creates nightly deployment builds for main targets. Note this does not cover package distribution channels,
2 | # such as choco.
3 |
4 | name: nightly
5 |
6 | on:
7 | schedule:
8 | - cron: "0 0 * * *"
9 | workflow_dispatch:
10 | inputs:
11 | isMock:
12 | description: "Mock run"
13 | default: true
14 | required: false
15 | type: boolean
16 |
17 | env:
18 | CARGO_INCREMENTAL: 0
19 | CARGO_PROFILE_DEV_DEBUG: 0
20 | CARGO_HUSKY_DONT_INSTALL_HOOKS: true
21 |
22 | jobs:
23 | # Check if things should be skipped, or if this is a mock job.
24 | initialize-job:
25 | name: initialize-job
26 | runs-on: ubuntu-latest
27 | outputs:
28 | should_skip: ${{ steps.skip_check.outputs.should_skip }}
29 | steps:
30 | - name: Check if this action should be skipped
31 | id: skip_check
32 | uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
33 | with:
34 | skip_after_successful_duplicate: "true"
35 | do_not_skip: '["workflow_dispatch"]'
36 |
37 | - name: Check if mock
38 | run: |
39 | if [[ -z "${{ github.event.inputs.isMock }}" ]]; then
40 | echo "This is a scheduled nightly run."
41 | elif [[ "${{ github.event.inputs.isMock }}" == "true" ]]; then
42 | echo "This is a mock run."
43 | else
44 | echo "This is NOT a mock run. Watch for the generated files!"
45 | fi
46 |
47 | build-release:
48 | needs: initialize-job
49 | if: ${{ needs.initialize-job.outputs.should_skip != 'true' }}
50 | uses: ./.github/workflows/build_releases.yml
51 | with:
52 | caller: "nightly"
53 | secrets: inherit
54 |
55 | upload-release:
56 | name: upload-release
57 | needs: build-release
58 | runs-on: ubuntu-latest
59 | steps:
60 | - name: Checkout repository
61 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
62 | with:
63 | fetch-depth: 1
64 |
65 | - name: Get release artifacts
66 | uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
67 | with:
68 | pattern: release-*
69 | path: release
70 | merge-multiple: true
71 |
72 | - name: Print out all release files
73 | run: |
74 | echo "Generated $(ls ./release | wc -l) files:"
75 | du -h -d 0 ./release/*
76 |
77 | - name: Delete tag and release if not mock
78 | if: github.event.inputs.isMock != 'true'
79 | run: gh release delete nightly --cleanup-tag
80 | env:
81 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
82 |
83 | - name: Sleep for a few seconds to prevent timing issues between the deletion and creation of the release
84 | run: sleep 10
85 | if: github.event.inputs.isMock != 'true'
86 |
87 | - name: Add all release files and create nightly release if not mock
88 | uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # 2.0.8
89 | if: github.event.inputs.isMock != 'true'
90 | with:
91 | token: ${{ secrets.GITHUB_TOKEN }}
92 | prerelease: true
93 | tag_name: "nightly"
94 | draft: false
95 | fail_on_unmatched_files: true
96 | files: |
97 | ./release/*
98 |
--------------------------------------------------------------------------------
/.github/workflows/test_docs.yml:
--------------------------------------------------------------------------------
1 | # Small CI workflow to test if mkdocs documentation can be successfully built.
2 |
3 | name: test docs
4 |
5 | on:
6 | workflow_dispatch:
7 | pull_request:
8 |
9 | concurrency:
10 | group: ${{ github.workflow }}-${{ github.ref }}
11 | cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'ClementTsang/bottom' }}
12 |
13 | jobs:
14 | pre-job:
15 | runs-on: ubuntu-latest
16 | outputs:
17 | should_skip: ${{ steps.skip_check.outputs.should_skip }}
18 | steps:
19 | - id: skip_check
20 | uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
21 | with:
22 | skip_after_successful_duplicate: "true"
23 | paths: '["docs/**", ".github/workflows/docs.yml", ".github/workflows/test_docs.yml"]'
24 | do_not_skip: '["workflow_dispatch"]'
25 |
26 | test-build-documentation:
27 | name: Test building docs
28 | needs: pre-job
29 | if: ${{ needs.pre-job.outputs.should_skip != 'true' }}
30 | runs-on: ubuntu-latest
31 | steps:
32 | - name: Checkout repository
33 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
34 | with:
35 | fetch-depth: 0
36 |
37 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
38 | with:
39 | python-version: 3.12
40 |
41 | - name: Install Python dependencies
42 | run: pip install -r docs/requirements.txt
43 |
44 | - name: Build docs with mkdocs
45 | run: |
46 | cd docs
47 | mkdocs build
48 |
--------------------------------------------------------------------------------
/.github/workflows/validate_schema.yml:
--------------------------------------------------------------------------------
1 | # Workflow to validate the latest schema.
2 |
3 | name: "validate schema"
4 | on:
5 | workflow_dispatch:
6 | pull_request:
7 | push:
8 | branches:
9 | - main
10 | paths:
11 | - "schema/**"
12 | - "scripts/schema/**"
13 | - ".github/workflows/validate_schema.yml"
14 | - "src/bin/schema.rs"
15 | - "Cargo.toml"
16 |
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.ref }}
19 | cancel-in-progress: ${{ github.event_name == 'pull_request' || github.repository != 'ClementTsang/bottom' }}
20 |
21 | jobs:
22 | pre-job:
23 | runs-on: ubuntu-latest
24 | outputs:
25 | should_skip: ${{ steps.skip_check.outputs.should_skip }}
26 | steps:
27 | - id: skip_check
28 | uses: fkirc/skip-duplicate-actions@f75f66ce1886f00957d99748a42c724f4330bdcf # v5.3.1
29 | with:
30 | skip_after_successful_duplicate: "true"
31 | paths: '["schema/**", "scripts/schema/**", ".github/workflows/validate_schema.yml", "src/bin/schema.rs", "Cargo.toml"]'
32 | do_not_skip: '["workflow_dispatch"]'
33 |
34 | test-build-documentation:
35 | name: Test validating schema
36 | needs: pre-job
37 | if: ${{ needs.pre-job.outputs.should_skip != 'true' }}
38 | runs-on: ubuntu-latest
39 | steps:
40 | - name: Checkout repository
41 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
42 | with:
43 | fetch-depth: 0
44 |
45 | - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0
46 | with:
47 | python-version: 3.12
48 |
49 | - name: Install Python dependencies
50 | run: pip install -r scripts/schema/requirements.txt
51 |
52 | - name: Test nightly validates on valid sample configs
53 | run: |
54 | python3 scripts/schema/validator.py -s ./schema/nightly/bottom.json -f ./sample_configs/default_config.toml
55 | python3 scripts/schema/validator.py --uncomment -s ./schema/nightly/bottom.json -f ./sample_configs/default_config.toml
56 | python3 scripts/schema/validator.py -s ./schema/nightly/bottom.json -f ./sample_configs/demo_config.toml
57 |
58 | - name: Test nightly catches on a bad sample config
59 | run: |
60 | python3 scripts/schema/validator.py -s ./schema/nightly/bottom.json -f scripts/schema/bad_file.toml --should_fail
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Generated by Cargo
2 | # will have compiled files and executables
3 | /target/
4 |
5 | # These are backup files generated by rustfmt
6 | **/*.rs.bk
7 |
8 | # Logging
9 | *.log
10 |
11 | # Flamegraph stuff
12 | flamegraphs/
13 | rust-unmangle
14 | *.svg
15 | *.data
16 | *.data.old
17 |
18 | # IntelliJ
19 | .idea/
20 |
21 | # Heaptrack files
22 | *.zst
23 |
24 | # For testing
25 | sample_configs/testing*.toml
26 |
27 | # Cargo-deny
28 | deny.toml
29 |
30 | # vscode
31 | .vscode
32 |
33 | # mkdocs
34 | site/
35 |
36 | # dhat heap profiling
37 | dhat-heap.json
38 | dhat/
39 |
40 | # cargo vet
41 | supply-chain/
42 |
43 | # samply profiling
44 | profile.json
45 | profile.json.gz
46 |
47 | **/venv/
48 |
49 | # Sometimes used for scripts
50 | .ruff_cache
51 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "MD013": false,
3 | "MD041": false,
4 | "MD033": false,
5 | "MD040": false,
6 | "MD024": false,
7 | "MD025": false,
8 | "MD046": false
9 | }
10 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution
2 |
3 | Contribution in any way is appreciated, whether it is reporting problems, fixing bugs, implementing features, improving the documentation, etc.
4 |
5 | ## Opening an issue
6 |
7 | ### Bug reports
8 |
9 | When filing a bug report, fill out the [bug report template](https://github.com/ClementTsang/bottom/issues/new?assignees=&labels=bug&template=bug_report.yml). Be sure to give all the neccessary details! It is _incredibly_ difficult for a maintainer to fix a bug when it cannot be reproduced,
10 | so that makes it much easier to reproduce the problem!
11 |
12 | ### Feature requests
13 |
14 | Please fill out the [feature request template](https://github.com/ClementTsang/bottom/issues/new?assignees=&labels=feature&template=feature_request.yml). Remember to give details about what the feature is along with why you think this suggestion will be useful.
15 |
16 | ## Pull requests
17 |
18 | If you want to directly contribute documentation changes or code, follow this! The expected workflow for a pull request is:
19 |
20 | 1. Fork the project.
21 | 2. Make your changes.
22 | 3. Make any documentation changes if necessary - if you add a new feature, it'll probably need documentation changes. See [here](https://clementtsang.github.io/bottom/nightly/contribution/documentation/) for tips on documentation.
23 | 4. Commit and create a pull request to merge into the `main` branch. **Please fill out the pull request template**.
24 | 5. Ask a maintainer to review your pull request.
25 | - Check if the CI workflow passes. These consist of clippy lints, rustfmt checks, and basic tests. If you are a
26 | first-time contributor, you may need to wait for a maintainer to let CI run.
27 | - If changes are suggested or any comments are made, they should probably be addressed.
28 | 6. Once it looks good, it'll be merged! Note that _generally_, PRs are squashed to maintain repo cleanliness, though
29 | feel free to ask otherwise if that isn't preferable.
30 |
31 | For more details, see [here](https://clementtsang.github.io/bottom/nightly/contribution/issues-and-pull-requests/).
32 |
33 | ### Documentation
34 |
35 | For contributing to documentation, see [here](https://clementtsang.github.io/bottom/nightly/contribution/documentation/).
36 |
37 | ### Packaging
38 |
39 | If you want to become a package maintainer, see [here](https://clementtsang.github.io/bottom/nightly/contribution/packaging-and-distribution/)
40 | for details on how to build bottom, how to generate/obtain completion files and manpages, and how to add installation instructions for the package to the README.
41 |
--------------------------------------------------------------------------------
/Cross.toml:
--------------------------------------------------------------------------------
1 | [build.env]
2 | passthrough = ["RUST_BACKTRACE", "BTM_GENERATE"]
3 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Clement Tsang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/assets/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/assets/demo.gif
--------------------------------------------------------------------------------
/clippy.toml:
--------------------------------------------------------------------------------
1 | cognitive-complexity-threshold = 100
2 | type-complexity-threshold = 500
3 | too-many-arguments-threshold = 8
4 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | target: auto
6 | threshold: 30%
7 | patch: off
8 |
--------------------------------------------------------------------------------
/desktop/bottom.desktop:
--------------------------------------------------------------------------------
1 | [Desktop Entry]
2 | Name=bottom
3 | Version=0.10.2
4 | GenericName=System Monitor
5 | Comment=A customizable cross-platform graphical process/system monitor for the terminal.
6 | Exec=btm
7 | Terminal=true
8 | Type=Application
9 | Categories=Utility;System;ConsoleOnly;Monitor;
10 | StartupNotify=false
11 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | site/
2 | venv/
3 | .cache/
4 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Extended Documentation
2 |
3 | This is where the extended documentation resides, hosted on GitHub Pages. We use [MkDocs](https://www.mkdocs.org/),
4 | [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/), and [mike](https://github.com/jimporter/mike).
5 |
6 | Documentation is currently built using Python 3.11, though it should work fine with older versions.
7 |
8 | ## Running locally
9 |
10 | One way is to just run `serve.sh`. Alternatively, the manual steps are, assuming your current working directory
11 | is the bottom repo:
12 |
13 | ```bash
14 | # Change directories to the documentation.
15 | cd docs/
16 |
17 | # Create and activate venv.
18 | python -m venv venv
19 | source venv/bin/activate
20 |
21 | # Install requirements
22 | pip install -r requirements.txt
23 |
24 | # Run mkdocs
25 | venv/bin/mkdocs serve
26 | ```
27 |
28 | ## Deploying
29 |
30 | Deploying is done via [mike](https://github.com/jimporter/mike) in order to get versioning. Typically,
31 | this is done through CI, but can be done manually if needed.
32 |
33 | ### Nightly docs
34 |
35 | ```bash
36 | cd docs
37 | mike deploy nightly --push
38 | ```
39 |
40 | ### Stable docs
41 |
42 | ```bash
43 | cd docs
44 |
45 | # Rename the previous stable version
46 | mike retitle --push stable $OLD_STABLE_VERSION
47 |
48 | # Set the newest version as the most recent stable version
49 | mike deploy --push --update-aliases $RELEASE_VERSION stable
50 |
51 | # Append a "(stable)" string to the end.
52 | mike retitle --push $RELEASE_VERSION "$RELEASE_VERSION (stable)"
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/basic.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/basic.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/battery.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/battery.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/config/disk-filtering/disk_name_filter.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/config/disk-filtering/disk_name_filter.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/config/disk-filtering/disk_name_mount_filter.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/config/disk-filtering/disk_name_mount_filter.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/config/disk-filtering/disk_no_filter.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/config/disk-filtering/disk_no_filter.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/config/layout/sample_layout.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/config/layout/sample_layout.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/cpu.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/cpu.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/disk.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/disk.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/memory.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/memory.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/network/network.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/network/network.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/network/network_old.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/network/network_old.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_default.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_default.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_full.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_full.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_grouped.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_grouped.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_kill_linux.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_kill_linux.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_kill_simple.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_kill_simple.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_sort_menu.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_sort_menu.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/process_tree.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/process_tree.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/search/cpu.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/search/cpu.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/search/or.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/search/or.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/search/quotes.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/search/quotes.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/search/regex.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/search/regex.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/process/search/search.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/process/search/search.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/temperature.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/temperature.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/troubleshooting/cmd_prompt_font.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/troubleshooting/cmd_prompt_font.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/troubleshooting/cmd_prompt_props.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/troubleshooting/cmd_prompt_props.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/troubleshooting/dots.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/troubleshooting/dots.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/troubleshooting/no_braille.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/troubleshooting/no_braille.webp
--------------------------------------------------------------------------------
/docs/content/assets/screenshots/troubleshooting/regedit_fonts.webp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ClementTsang/bottom/3d35d083470a14766c0b03b8add46aba485e3a52/docs/content/assets/screenshots/troubleshooting/regedit_fonts.webp
--------------------------------------------------------------------------------
/docs/content/configuration/config-file/cpu.md:
--------------------------------------------------------------------------------
1 | # CPU
2 |
3 | ## Default CPU Graph Selection
4 |
5 | You can configure which CPU graph is shown by default when starting up bottom by setting `cpu.default`.
6 |
7 | ```toml
8 | [cpu]
9 | # One of "all" (default), "average"/"avg"
10 | default = "average"
11 | ```
12 |
--------------------------------------------------------------------------------
/docs/content/configuration/config-file/data-filtering.md:
--------------------------------------------------------------------------------
1 | # Data Filtering
2 |
3 | !!! Warning
4 |
5 | This section is in progress, and is just copied from the old documentation.
6 |
7 | You can hide specific disks, temperature sensors, and networks by name in the config file via `disk.name_filter` and `disk.mount_filter`, `temperature.sensor_filter`, and `network.interface_filter` respectively. Regex (`regex = true`), case-sensitivity (`case_sensitive = true`), and matching only if the entire word matches (`whole_word = true`) are supported, but are off by default. Filters default to denying entries that match and can be toggled by setting `is_list_ignored` to `false` in the config file.
8 |
9 | For example, here's the disk widget with no filter:
10 |
11 | 
12 |
13 | The following in the config file would filter out some entries by disk name:
14 |
15 | ```toml
16 | [disk.name_filter]
17 | is_list_ignored = true
18 | list = ["/dev/sda"]
19 | regex = true
20 | case_sensitive = false
21 | whole_word = false
22 | ```
23 |
24 | 
25 |
26 | If there are two potentially conflicting filters (i.e. when you are using both a disk and mount filter), the filter that explicitly allows an entry takes precedence over a filter that explicitly denies one. So for example, let's say we set a disk filter accepting anything with `/dev/sda`, but deny anything with `/mnt/.*` or `/`. So to do so, we write in the config file:
27 |
28 | ```toml
29 | [disk.name_filter]
30 | is_list_ignored = false
31 | list = ["/dev/sda"]
32 | regex = true
33 | case_sensitive = false
34 | whole_word = false
35 |
36 | [disk.mount_filter]
37 | is_list_ignored = true
38 | list = ["/mnt/.*", "/"]
39 | regex = true
40 | case_sensitive = false
41 | whole_word = true
42 | ```
43 |
44 | This gives us:
45 |
46 | 
47 |
--------------------------------------------------------------------------------
/docs/content/configuration/config-file/index.md:
--------------------------------------------------------------------------------
1 | # Config File
2 |
3 | For persistent configuration, and for certain configuration options, bottom supports config files.
4 |
5 | ## Default Config File
6 |
7 | If no config file argument is given, it will automatically look for a config file at these locations:
8 |
9 | | OS | Default Config Location |
10 | | ------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
11 | | macOS | `$HOME/Library/Application Support/bottom/bottom.toml` `$HOME/.config/bottom/bottom.toml` `$XDG_CONFIG_HOME/bottom/bottom.toml` |
12 | | Linux | `$HOME/.config/bottom/bottom.toml` `$XDG_CONFIG_HOME/bottom/bottom.toml` |
13 | | Windows | `C:\Users\\AppData\Roaming\bottom\bottom.toml` |
14 |
15 | If the config file doesn't exist at the path, bottom will automatically try to create a new config file at the location
16 | with default values.
17 |
18 | ## JSON Schema
19 |
20 | The configuration file also has [JSON Schema](https://json-schema.org/) support to make it easier to manage, if your
21 | IDE/editor supports it.
22 |
--------------------------------------------------------------------------------
/docs/content/configuration/config-file/layout.md:
--------------------------------------------------------------------------------
1 | # Layout
2 |
3 | !!! Warning
4 |
5 | This section is in progress, and is just copied from the old documentation.
6 |
7 | bottom supports customizable layouts via the config file. Currently, layouts are controlled by using TOML objects and arrays.
8 |
9 | For example, given the sample layout:
10 |
11 | ```toml
12 | [[row]]
13 | [[row.child]]
14 | type="cpu"
15 | [[row]]
16 | ratio=2
17 | [[row.child]]
18 | ratio=4
19 | type="mem"
20 | [[row.child]]
21 | ratio=3
22 | [[row.child.child]]
23 | type="temp"
24 | [[row.child.child]]
25 | type="disk"
26 | ```
27 |
28 | This would give a layout that has two rows, with a 1:2 ratio. The first row has only the CPU widget.
29 | The second row is split into two columns with a 4:3 ratio. The first column contains the memory widget.
30 | The second column is split into two rows with a 1:1 ratio. The first is the temperature widget, the second is the disk widget.
31 |
32 | This is what the layout would look like when run:
33 |
34 | 
35 |
36 | Each `[[row]]` represents a _row_ in the layout. A row can have any number of `child` values. Each `[[row.child]]`
37 | represents either a _column or a widget_. A column can have any number of `child` values as well. Each `[[row.child.child]]`
38 | represents a _widget_. A widget is represented by having a `type` field set to a string.
39 |
40 | The following `type` values are supported:
41 |
42 | | | |
43 | | -------------------------------- | ------------------------ |
44 | | `"cpu"` | CPU chart and legend |
45 | | `"mem", "memory"` | Memory chart |
46 | | `"net", "network"` | Network chart and legend |
47 | | `"proc", "process", "processes"` | Process table and search |
48 | | `"temp", "temperature"` | Temperature table |
49 | | `"disk"` | Disk table |
50 | | `"empty"` | An empty space |
51 | | `"batt", "battery"` | Battery statistics |
52 |
53 | Each component of the layout accepts a `ratio` value. If this is not set, it defaults to 1.
54 |
55 | Furthermore, you can have duplicate widgets.
56 |
57 | For an example, look at the [default config](https://github.com/ClementTsang/bottom/blob/main/sample_configs/default_config.toml), which contains the default layout.
58 |
--------------------------------------------------------------------------------
/docs/content/configuration/config-file/processes.md:
--------------------------------------------------------------------------------
1 | # Processes
2 |
3 | ## Columns
4 |
5 | You can configure which columns are shown by the process widget by setting the `columns` setting:
6 |
7 | ```toml
8 | [processes]
9 | # Pick which columns you want to use in any order.
10 | columns = ["cpu%", "mem%", "pid", "name", "read", "write", "tread", "twrite", "state", "user", "time", "gmem%", "gpu%"]
11 | ```
12 |
--------------------------------------------------------------------------------
/docs/content/contribution/development/build_process.md:
--------------------------------------------------------------------------------
1 | # Build Process
2 |
3 | !!! Warning
4 |
5 | This section is currently somewhat WIP.
6 |
7 | !!! Warning
8 |
9 | This section is intended for people who wish to work on/build/distribute bottom, not general users.
10 |
11 | ## Overview
12 |
13 | bottom manages its own binary builds for nightly and stable release purposes. The core build workflow is handled by [`build_releases.yml`](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/build_releases.yml), called by a wrapper workflow for [nightly](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/nightly.yml) and [stable](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/deployment.yml) releases. Builds take place via GitHub Actions.
14 |
15 | The main things built are:
16 |
17 | - Binaries for various platforms
18 | - MSI installer for Windows
19 | - `.deb` package for Debian and its derivatives
20 |
21 | This documentation gives a high-level overview of the build process for each part. For the most up-to-date and detailed reference, definitely refer back to the [`build_releases.yml`](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/build_releases.yml) file.
22 |
23 | ## Binaries
24 |
25 | Binaries are built currently for various targets. Note that not all of these are officially supported. The following general steps are performed:
26 |
27 | - Set up the Rust toolchain for the action runner.
28 | - Enable cache.
29 | - Build a release build with:
30 |
31 | - `--features deploy`, which enables only crates needed for release builds.
32 | - `--locked` to lock the dependency versions.
33 | - The following env variables set:
34 |
35 | - `BTM_GENERATE: true`
36 | - `COMPLETION_DIR: "target/tmp/bottom/completion/"`
37 | - `MANPAGE_DIR: "target/tmp/bottom/manpage/"`
38 |
39 | These generate the manpages and shell completions (see [Packaging](../packaging-and-distribution.md) for some more information).
40 |
41 | - Bundle the binaries and manpage/completions.
42 | - Cleanup.
43 |
44 | Some builds use [`cross`](https://github.com/cross-rs/cross) to do cross-compilation builds for architectures otherwise not natively supported by the runner.
45 |
46 | ## MSI
47 |
48 | This builds a full Windows installer using [`cargo-wix`](https://github.com/volks73/cargo-wix). This requires some setup beforehand with some dependencies:
49 |
50 | - Net-Framework-Core (handled by Powershell)
51 | - wixtoolset (handled by chocolatey)
52 | - Rust toolchain
53 |
54 | After that, cache is enabled, and `cargo wix` takes care of the rest.
55 |
56 | ## `.deb`
57 |
58 | Currently, `.deb` files are built for x86 and ARM architectures (`armv7`, `aarch64`). This is handled by [`cargo-deb`](https://crates.io/crates/cargo-deb).
59 |
60 | - For x86, this is handled natively with just `cargo-deb`.
61 | - For ARM, this uses a Docker container, [cargo-deb-arm](https://github.com/ClementTsang/cargo-deb-arm), which correctly sets the dependencies and architecture for the generated `.deb` file.
62 |
63 | There are additional checks via `dpkg` to ensure the architecture is correctly set.
64 |
--------------------------------------------------------------------------------
/docs/content/contribution/development/deploy_process.md:
--------------------------------------------------------------------------------
1 | # Deploy Process
2 |
3 | !!! Warning
4 |
5 | This section is currently WIP.
6 |
7 | !!! Warning
8 |
9 | This section is intended for people who wish to work on/build/distribute bottom, not general users.
10 |
11 | ## Overview
12 |
13 | bottom currently has two main deploy processes to worry about:
14 |
15 | - [Nightly](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/nightly.yml): a daily (00:00 UTC) GitHub action to build binary/installer files, and upload them to the [nightly release](https://github.com/ClementTsang/bottom/releases/tag/nightly). It can also be triggered manually as either a proper nightly release or a mock release.
16 | - [Stable](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/deployment.yml): a stable deployment, triggered manually or upon creation of a valid tag. This is a GitHub action that builds binary/installer files and uploads them to a new GitHub release.
17 |
18 | Furthermore, this workflow does not handle the following deployments, which must be manually handled:
19 |
20 | - [Chocolatey](https://community.chocolatey.org/packages/bottom)
21 | - [crates.io](https://crates.io/crates/bottom)
22 |
23 | ## Nightly
24 |
25 | This is, for the most part, automatic, though it can also be used as a way of testing build workflow changes and seeing if binaries can be successfully built at all against all the targets we want to build for.
26 |
27 | If one does not want to actually update the nightly release, and just want to test the general builds and workflow, one can run the workflow manually on a branch of choice with "mock" set as the parameter. Changing it to anything else will trigger a non-mock run.
28 |
29 | ## Stable
30 |
31 | This can be manually triggered, though the general use-case is setting a tag of the form `x.y.z` (after checking everything is good, of course). For example:
32 |
33 | ```bash
34 | git tag 0.6.9 && git push origin 0.6.9
35 | ```
36 |
37 | This will automatically trigger the deployment workflow, and create a draft release with the files uploaded. One still needs to fill in the details and release it.
38 |
39 | Furthermore, there are some deployments that are handled by maintainers of bottom that this workflow does not automatically finish. These must be manually handled.
40 |
41 | ### Chocolatey
42 |
43 | Upon releasing on GitHub, [choco-bottom](https://github.com/ClementTsang/choco-bottom) will automatically be updated with a new PR with the correct deployment files for Chocolatey. Check the PR, merge it if it is correct, then pull locally and deploy following the instructions in the [README](https://github.com/ClementTsang/choco-bottom/blob/master/README.md). Make sure to test installation and running at least once before deploying!
44 |
45 | If done correctly, there should be a new build on Chocolatey, which will take some time to validate.
46 |
47 | ### crates.io
48 |
49 | Validate everything builds properly and works (you should have done this before releasing though). If good, then deploying on crates.io is as simple as:
50 |
51 | ```bash
52 | cargo publish
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/content/contribution/development/dev_env.md:
--------------------------------------------------------------------------------
1 | # Development Environment
2 |
3 | !!! Warning
4 |
5 | This section is currently WIP.
6 |
7 | !!! Warning
8 |
9 | This section is intended for people who wish to work on/build/distribute bottom, not general users.
10 |
--------------------------------------------------------------------------------
/docs/content/contribution/development/logging.md:
--------------------------------------------------------------------------------
1 | # Logging
2 |
3 | !!! Warning
4 |
5 | This section is currently WIP.
6 |
7 | !!! Warning
8 |
9 | This section is intended for people who wish to work on/build/distribute bottom, not general users.
10 |
--------------------------------------------------------------------------------
/docs/content/contribution/development/testing.md:
--------------------------------------------------------------------------------
1 | # Testing
2 |
3 | !!! Warning
4 |
5 | This section is currently WIP.
6 |
7 | !!! Warning
8 |
9 | This section is intended for people who wish to work on/build/distribute bottom, not general users.
10 |
--------------------------------------------------------------------------------
/docs/content/contribution/documentation.md:
--------------------------------------------------------------------------------
1 | # Documentation
2 |
3 | ## When should documentation changes be done?
4 |
5 | - Whenever a new feature is added, a bug is fixed, or a breaking change is made, it should be documented where
6 | appropriate (ex: `README.md`, changelog, etc.)
7 | - New methods of installation are always appreciated and should be documented
8 |
9 | ## What pages need documentation?
10 |
11 | There are a few areas where documentation changes are often needed:
12 |
13 | - The [`README.md`](https://github.com/ClementTsang/bottom/blob/main/README.md)
14 | - The help menu inside of the application (located [here](https://github.com/ClementTsang/bottom/blob/main/src/constants.rs))
15 | - The [extended documentation](../index.md) (what you're reading right now)
16 | - The [`CHANGELOG.md`](https://github.com/ClementTsang/bottom/blob/main/CHANGELOG.md)
17 |
18 | ## How should I add/update documentation?
19 |
20 | 1. Fork the repository to make changes in.
21 |
22 | 2. Where you're adding documentation will probably affect what you need to do:
23 |
24 |
README.md or CHANGELOG.md
25 |
26 | For changes to [`README.md`](https://github.com/ClementTsang/bottom/blob/main/README.md) and [`CHANGELOG.md`](https://github.com/ClementTsang/bottom/blob/main/CHANGELOG.md), just follow the formatting provided and use any editor.
27 |
28 | Generally, changes to [`CHANGELOG.md`](https://github.com/ClementTsang/bottom/blob/main/CHANGELOG.md) will be handled
29 | by a maintainer, and the contents of the file should follow the [Keep a Changelog](https://keepachangelog.com/en/1.0.0/)
30 | format, as well as link to the relevant PR or issues.
31 |
32 |
Help menu
33 |
34 | For changes to the help menu, try to refer to the existing code within [`src/constants.rs`](https://github.com/ClementTsang/bottom/blob/main/src/constants.rs) on how the help menu is generated.
35 |
36 |
Extended documentation
37 |
38 | For changes to the extended documentation, you'll probably want at least Python 3.11 (older and newer versions
39 | should be fine), [MkDocs](https://www.mkdocs.org/), [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/),
40 | `mdx_truly_sane_lists`, and optionally [Mike](https://github.com/jimporter/mike) installed. These can help with
41 | validating your changes locally.
42 |
43 | You can do so through `pip` or your system's package managers. If you use `pip`, you can use venv to cleanly install
44 | the documentation dependencies:
45 |
46 | ```bash
47 | # Change directories to the documentation.
48 | cd docs/
49 |
50 | # Create venv, install the dependencies, and serve the page.
51 | ./serve.sh
52 | ```
53 |
54 | This will serve a local version of the docs that you can open on your browser. It will update as you make changes.
55 |
56 | 3. Once you have your documentation changes done, submit it as a pull request. For more information regarding that,
57 | refer to [Issues, Pull Requests, and Discussions](issues-and-pull-requests.md).
58 |
--------------------------------------------------------------------------------
/docs/content/contribution/issues-and-pull-requests.md:
--------------------------------------------------------------------------------
1 | # Issues, Pull Requests, and Discussions
2 |
3 | ## Discussions
4 |
5 | Discussions are open [in the repo](https://github.com/ClementTsang/bottom/discussions). As for the difference between discussions and issues:
6 |
7 | - Open an issue if what you have enough information to properly fill out any details needed for a report or request.
8 | - Open a discussion otherwise (e.g. asking a question).
9 |
10 | ## Opening an issue
11 |
12 | ### Bug reports
13 |
14 | When filing a bug report, please use the [bug report template](https://github.com/ClementTsang/bottom/issues/new?assignees=&labels=bug&template=bug_report.md&title=) and fill in as much as you can. It is _incredibly_ difficult for a maintainer to fix a bug when it cannot be reproduced, and giving as much detail as possible generally helps to make it easier to reproduce the problem!
15 |
16 | ### Feature requests
17 |
18 | Please use the [feature request template](https://github.com/ClementTsang/bottom/issues/new?assignees=&labels=feature&template=feature_request.md&title=) and fill it out. Remember to give details about what the feature is along with why you think this suggestion will be useful.
19 |
20 | Also, please check whether an existing issue has covered your specific feature request!
21 |
22 | ## Pull requests
23 |
24 | The expected workflow for a pull request is:
25 |
26 | 1. Fork the project.
27 | 2. Make your changes.
28 | 3. Make any documentation changes if necessary - if you add a new feature, it'll probably need documentation changes. See [here](./documentation.md) for tips on documentation.
29 | 4. Commit and create a pull request to merge into the `main` branch. **Please fill out the pull request template**.
30 | 5. Ask a maintainer to review your pull request.
31 | - Check if the CI workflow passes. These consist of clippy lints, rustfmt checks, and basic tests. If you are a
32 | first-time contributor, you may need to wait for a maintainer to let CI run.
33 | - If changes are suggested or any comments are made, they should probably be addressed.
34 | 6. Once it looks good, it'll be merged! Note that _generally_, PRs are squashed to maintain repo cleanliness, though
35 | feel free to ask otherwise if that isn't preferable.
36 |
--------------------------------------------------------------------------------
/docs/content/contribution/packaging-and-distribution.md:
--------------------------------------------------------------------------------
1 | # Packaging and Distribution
2 |
3 | Package maintainers are always welcome and appreciated! Here's some info on how one can help with package distribution
4 | and bottom.
5 |
6 | ## Pre-built binaries
7 |
8 | The latest stable release can be found [here](https://github.com/ClementTsang/bottom/releases/latest), where you can
9 | find pre-built binaries in either a `tar.gz` or `zip` format. Binaries here also include automatically generated shell
10 | completion files for zsh, bash, fish, and Powershell, which you may want to also install during the packaging
11 | process.
12 |
13 | You can also find a nightly build in the [releases page](https://github.com/ClementTsang/bottom/releases), built every
14 | day at 00:00 UTC off of the `main` branch.
15 |
16 | In both cases, we use a combination of GitHub Actions and CirrusCI (mainly for FreeBSD and macOS M1) to create our
17 | release binaries. [`build_releases.yml`](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/build_releases.yml)
18 | contains the GitHub Action workflow used to do both of these, if reference is needed.
19 |
20 | ## Building manually
21 |
22 | If you want to manually build bottom rather than distributing a pre-built binary, you'll need the most recent version
23 | of stable Rust, which you can get with:
24 |
25 | ```bash
26 | rustup update stable
27 | ```
28 |
29 | You'll then want to build with:
30 |
31 | ```bash
32 | cargo build --release --locked
33 | ```
34 |
35 | ### Manpage and completion generation
36 |
37 | bottom uses a [`build.rs`](https://github.com/ClementTsang/bottom/blob/main/build.rs) script to automatically generate
38 | a manpage and shell completions for the following shells:
39 |
40 | - Bash
41 | - Zsh
42 | - Fish
43 | - Powershell
44 | - Elvish
45 |
46 | If you want to generate manpages and/or completion files, set the `BTM_GENERATE` env var to a non-empty value. For
47 | example, run something like this:
48 |
49 | ```bash
50 | BTM_GENERATE=true cargo build --release --locked
51 | ```
52 |
53 | This will automatically generate completion and manpage files in `target/tmp/bottom/`. If you wish to regenerate the
54 | files, modify/delete either these files or set `BTM_GENERATE` to some other non-empty value to retrigger the build
55 | script.
56 |
57 | You may override the default diretories used to generate both completion and manpage files by specifying the
58 | `COMPLETION_DIR` and `MANPAGE_DIR` environment variables respectively.
59 |
60 | For more information, you may want to look at either the [`build.rs`](https://github.com/ClementTsang/bottom/blob/main/build.rs)
61 | file or the [binary build CI workflow](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/build_releases.yml).
62 |
63 | ## Adding an installation source
64 |
65 | Once you've finished your installation source, if you want to mention it in the main bottom repo, fork the repo and add
66 | the installation method and any details to the [`README.md`](https://github.com/ClementTsang/bottom/blob/main/README.md)
67 | file under the [Installation](https://github.com/ClementTsang/bottom#installation) section, as well as a corresponding
68 | table of contents entry. Once that's done, open a pull request - these will usually be approved of very quickly.
69 |
70 | You can find more info on the contribution process [here](issues-and-pull-requests.md#pull-requests).
71 |
--------------------------------------------------------------------------------
/docs/content/index.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Home
3 | ---
4 |
5 | # `bottom`
6 |
7 | A customizable cross-platform graphical process/system monitor for the terminal, supporting Linux, macOS, and Windows. Inspired by other tools like [gtop](https://github.com/aksakalli/gtop), [gotop](https://github.com/xxxserxxx/gotop), and [htop](https://github.com/htop-dev/htop).
8 |
9 | ---
10 |
11 | This site serves as extended documentation for bottom alongside the [`README.md`](https://github.com/ClementTsang/bottom#readme).
12 |
13 | !!! Warning
14 |
15 | Some areas of this site are still in progress and may be missing details. Feel free to suggest/contribute changes!
16 |
17 | ## Installation
18 |
19 | !!! Tip
20 |
21 | It's as good idea to first check out the [Support](support/official.md) page to see if your system is officially supported!
22 |
23 | !!! Tip
24 |
25 | If you're facing some issues during/after installation, check out the [Troubleshooting](troubleshooting.md) page for some common problems and solutions.
26 |
27 | To install bottom, refer to [the installation section of the `README.md`](https://github.com/ClementTsang/bottom#installation),
28 | which contains a list of all the installation methods.
29 |
30 | ## Usage and configuration
31 |
32 | The command to run bottom is `btm`.
33 |
34 | You can refer to the [usage](usage/general-usage.md) pages for more details on using bottom (e.g. keybinds, some features, a general overview of what each widget does).
35 |
36 | To configure bottom (e.g. how it behaves, how it looks, etc.) refer to the [command-line options page](configuration/command-line-options.md) for temporary settings, or [the config file page](configuration/config-file/index.md) for more permanent settings.
37 |
38 | ## Contribution
39 |
40 | New contributors are always welcome! See the [contribution](contribution/issues-and-pull-requests.md) section for how to contribute to
41 | bottom, whether it be filing issues, writing documentation, creating pull requests, etc.
42 |
--------------------------------------------------------------------------------
/docs/content/overrides/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
--------------------------------------------------------------------------------
/docs/content/stylesheets/extra.css:
--------------------------------------------------------------------------------
1 | :root {
2 | --md-primary-fg-color: #268bd2;
3 | --md-accent-fg-color: #81a1c1;
4 | }
5 |
6 | .md-typeset__table {
7 | min-width: 100%;
8 | }
9 |
10 | .md-typeset table:not([class]) {
11 | display: table;
12 | }
13 |
--------------------------------------------------------------------------------
/docs/content/support/official.md:
--------------------------------------------------------------------------------
1 | # Official support
2 |
3 | bottom _officially_ supports the following operating systems and corresponding architectures:
4 |
5 | - macOS (`x86_64`, `aarch64`)
6 | - Linux (`x86_64`, `i686`, `aarch64`)
7 | - Windows (`x86_64`, `i686`)
8 |
9 | These platforms are tested to work (with caveats, see below) and issues on these platforms will be fixed if possible.
10 |
11 | Furthermore, binaries are expected to be built and tested using the most recent version of stable Rust - if you are manually building
12 | bottom from the repo/source, then please try that as well.
13 |
14 | ## Known problems
15 |
16 | ### Linux
17 |
18 | - If you're using Linux via WSL or WSL2:
19 | - You may have issues with getting memory data.
20 | - Temperature sensors may not be correctly reported.
21 | - WSL2 will not match Windows' own Task Manager in terms of data.
22 |
23 | ### Windows
24 |
25 | - The temperature widget seems to require admin privileges in some cases to get data.
26 | - The battery widget seems to have issues with dual battery systems, like some Thinkpads.
27 | - If you are using WSL or WSL2:
28 | - You may have issues with getting memory data.
29 | - Temperature sensors may not be correctly reported.
30 | - WSL2 will not match Windows' own Task Manager in terms of data.
31 |
32 | ### macOS
33 |
34 | - The process widget may require elevated access (ex: `sudo btm`) to gather all data in some cases. _Please note that you should be certain that you trust any software you grant root privileges._
35 |
--------------------------------------------------------------------------------
/docs/content/support/unofficial.md:
--------------------------------------------------------------------------------
1 | # Unofficial support
2 |
3 | Systems and architectures that aren't officially supported may still work, but there are no guarantees on how much will
4 | work. For example, it might only compile, or it might run with bugs/broken features. Furthermore, while it will depend
5 | on the problem at the end of the day, _issues on unsupported platforms are likely to go unfixed_.
6 |
7 | Unofficially supported platforms known to compile/work:
8 |
9 | - FreeBSD
10 | - Linux on ARMv7 and ARMv6 (tested to compile in [CI](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/ci.yml))
11 | - Linux on PowerPC 64 LE (tested to compile in [CI](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/ci.yml))
12 | - Linux on an RISC-V (tested to compile in [CI](https://github.com/ClementTsang/bottom/blob/main/.github/workflows/ci.yml), tested to run on an [Allwinner D1 Nezha](https://github.com/ClementTsang/bottom/issues/564))
13 |
14 | ## Known problems
15 |
16 | None at the moment.
17 |
--------------------------------------------------------------------------------
/docs/content/usage/autocomplete.md:
--------------------------------------------------------------------------------
1 | # Auto-Complete
2 |
3 | The release binaries in [the releases page](https://github.com/ClementTsang/bottom/releases) are packaged with
4 | shell auto-completion files for Bash, Zsh, fish, Powershell, Elvish, Fig, and Nushell. To install them:
5 |
6 | - For Bash, move `btm.bash` to `$XDG_CONFIG_HOME/bash_completion or /etc/bash_completion.d/`.
7 | - For Zsh, move `_btm` to one of your `$fpath` directories.
8 | - For fish, move `btm.fish` to `$HOME/.config/fish/completions/`.
9 | - For PowerShell, add `_btm.ps1` to your PowerShell [profile]().
10 | - For Elvish, the completion file is `btm.elv`.
11 | - For Fig, the completion file is `btm.ts`.
12 | - For Nushell, source `btm.nu`.
13 |
14 | The individual auto-completion files are also included in the stable/nightly releases as `completion.tar.gz` if needed.
15 |
--------------------------------------------------------------------------------
/docs/content/usage/basic-mode.md:
--------------------------------------------------------------------------------
1 | # Basic Mode
2 |
3 | Basic mode is a special layout that removes all of the graphs and provides an interface that resembles (a very stripped-down version of) htop.
4 |
5 |
6 |
7 |
8 |
9 | Basic mode can be enabled either through a command line flag:
10 |
11 | ```bash
12 | btm -b
13 |
14 | # or
15 |
16 | btm --basic
17 | ```
18 |
19 | or through the config:
20 |
21 | ```toml
22 | [flags]
23 | basic = true
24 | ```
25 |
26 | ## Notes
27 |
28 | In this mode, widgets that use tables (temperatures, processes, disks, and batteries) are only shown one at a time.
29 | One can switch between these widgets either by clicking the arrow buttons or by using the general widget selection shortcuts (for example, ++ctrl+left++ or ++H++)
30 | to switch which widget is shown.
31 |
32 | Also note that in this mode, widget expansion and custom layouts are disabled.
33 |
34 | ## Key bindings
35 |
36 | Basic mode follows the same key bindings as normal, barring widget expansion being disabled, and that the ++"%"++ key while selecting the memory widget toggles between total usage and percentage.
37 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/battery.md:
--------------------------------------------------------------------------------
1 | # Battery Widget
2 |
3 | !!! Warning
4 |
5 | The battery features are unavailable if the binary is compiled with the `battery` feature disabled or if there are no batteries on the system!
6 |
7 | The battery widget provides information about batteries on the system.
8 |
9 |
10 |
11 |
12 |
13 | The battery widget can be enabled through either the `--battery` flag, the `battery = true` option in a config file, or specifying the widget in a custom layout.
14 |
15 | ## Features
16 |
17 | The following data is displayed for batteries:
18 |
19 | - Charge percent
20 | - Consumption rate
21 | - Charging state
22 | - Time to empty/charge, based on the current state
23 | - Battery health percent
24 |
25 | The battery widget also supports devices with multiple batteries, and you can switch between them using the keyboard or the mouse.
26 |
27 | ## Key bindings
28 |
29 | Note that key bindings are generally case-sensitive.
30 |
31 | | Binding | Action |
32 | | ------------------------------------- | ---------------------------------------------------------- |
33 | | ++left++ ++h++ ++alt+h++ | Moves to the battery entry to the left of the current one |
34 | | ++right++ ++l++ ++alt+l++ | Moves to the battery entry to the right of the current one |
35 |
36 | ## Mouse bindings
37 |
38 | | Binding | Action |
39 | | ----------- | ----------------------- |
40 | | ++lbutton++ | Selects a battery entry |
41 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/cpu.md:
--------------------------------------------------------------------------------
1 | # CPU Widget
2 |
3 | The CPU widget displays a visual representation of CPU usage over a time range.
4 |
5 |
6 |
7 |
8 |
9 | ## Features
10 |
11 | The CPU widget is composed of two parts: the graph and the legend:
12 |
13 | - The graph displays the usage data for the currently selected entry as a percentage
14 | - The legend displays all available entries that can be displayed on the graph along with their last recorded use percentage (except for the "All" option)
15 |
16 | Users can scroll through the legend using either the keyboard or mouse to select which entry to display on the graph. The "All" option shows every entry
17 | at the same time, though this may get a bit hard to follow if you have a large number of cores/threads.
18 |
19 | One can also adjust the displayed time range through either the keyboard or mouse, with a range of 30s to 600s.
20 |
21 | ## Key bindings
22 |
23 | Note that key bindings are generally case-sensitive.
24 |
25 | ### Graph
26 |
27 | | Binding | Action |
28 | | --------- | --------------------------------------- |
29 | | ++plus++ | Zoom in on chart (decrease time range) |
30 | | ++minus++ | Zoom out on chart (increase time range) |
31 | | ++equal++ | Reset zoom |
32 |
33 | ### Legend
34 |
35 | | Binding | Action |
36 | | ------------------ | ------------------------------------- |
37 | | ++up++ , ++k++ | Move up within a widget |
38 | | ++down++ , ++j++ | Move down within a widget |
39 | | ++g+g++ , ++home++ | Jump to the first entry in the legend |
40 | | ++G++ , ++end++ | Jump to the last entry in the legend |
41 |
42 | ## Mouse bindings
43 |
44 | ### Graph
45 |
46 | | Binding | Action |
47 | | ------------ | -------------------------------------------------------------- |
48 | | ++"Scroll"++ | Scrolling up or down zooms in or out of the graph respectively |
49 |
50 | ### Legend
51 |
52 | | Binding | Action |
53 | | ------------ | ------------------------------------------------- |
54 | | ++"Scroll"++ | Scroll through options to display in the graph |
55 | | ++lbutton++ | Selects a CPU thread/average to show in the graph |
56 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/disk.md:
--------------------------------------------------------------------------------
1 | # Disk Widget
2 |
3 | The disk widget provides a table of useful disk and partition information, like I/O per second and total usage.
4 |
5 |
6 |
7 |
8 |
9 | ## Features
10 |
11 | The disk widget provides the following information:
12 |
13 | - Disk name
14 | - Disk mount location
15 | - Amount of space used
16 | - Amount of space left
17 | - Total amount of space
18 | - Percentage of space used
19 | - Read per second
20 | - Write per second
21 |
22 | ## Key bindings
23 |
24 | Note that key bindings are generally case-sensitive.
25 |
26 | | Binding | Action |
27 | | ------------------ | ------------------------------------------------------------------- |
28 | | ++up++ , ++k++ | Move up within a widget |
29 | | ++down++ , ++j++ | Move down within a widget |
30 | | ++g+g++ , ++home++ | Jump to the first entry in the table |
31 | | ++G++ , ++end++ | Jump to the last entry in the table |
32 | | ++d++ | Sort by disk, press again to reverse sorting order |
33 | | ++m++ | Sort by mount, press again to reverse sorting order |
34 | | ++u++ | Sort by amount used, press again to reverse sorting order |
35 | | ++n++ | Sort by amount free, press again to reverse sorting order |
36 | | ++t++ | Sort by total space available, press again to reverse sorting order |
37 | | ++p++ | Sort by percentage used, press again to reverse sorting order |
38 | | ++r++ | Sort by read rate, press again to reverse sorting order |
39 | | ++w++ | Sort by write rate, press again to reverse sorting order |
40 |
41 | ## Mouse bindings
42 |
43 | | Binding | Action |
44 | | ----------- | ----------------------------- |
45 | | ++lbutton++ | Selects an entry in the table |
46 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/memory.md:
--------------------------------------------------------------------------------
1 | # Memory Widget
2 |
3 | The memory widget provides a visual representation of RAM and swap usage over time.
4 |
5 |
6 |
7 |
8 |
9 | ## Features
10 |
11 | The legend displays the current usage in terms of percentage and actual usage in binary units (KiB, MiB, GiB, etc.).
12 | If the total RAM or swap available is 0, then it is automatically hidden from the legend and graph.
13 |
14 | One can also adjust the displayed time range through either the keyboard or mouse, with a range of 30s to 600s.
15 |
16 | This widget can also be configured to display Nvidia and AMD GPU memory usage (`--disable_gpu` on Linux/Windows to disable) or cache memory usage (`--enable_cache_memory`).
17 |
18 | ## Key bindings
19 |
20 | Note that key bindings are generally case-sensitive.
21 |
22 | | Binding | Action |
23 | | --------- | --------------------------------------- |
24 | | ++plus++ | Zoom in on chart (decrease time range) |
25 | | ++minus++ | Zoom out on chart (increase time range) |
26 | | ++equal++ | Reset zoom |
27 |
28 | ## Mouse bindings
29 |
30 | | Binding | Action |
31 | | ------------ | -------------------------------------------------------------- |
32 | | ++"Scroll"++ | Scrolling up or down zooms in or out of the graph respectively |
33 |
34 | ## Calculations
35 |
36 | Memory usage is calculated using the following formula based on values from `/proc/meminfo` (based on [htop's implementation](https://github.com/htop-dev/htop/blob/976c6123f41492aaf613b9d172eef1842fb7b0a3/linux/LinuxProcessList.c#L1584)):
37 |
38 | ```
39 | MemTotal - MemFree - Buffers - (Cached + SReclaimable - Shmem)
40 | ```
41 |
42 | You can find more info on `/proc/meminfo` and its fields [here](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-meminfo).
43 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/network.md:
--------------------------------------------------------------------------------
1 | # Network Widget
2 |
3 | The network widget provides a visual representation of network input and output per second, as well as noting the total amount
4 | received and transmitted.
5 |
6 |
7 |
8 |
9 |
10 | ## Features
11 |
12 | The legend displays the current reads and writes per second in bits, as well as the total amount read/written.
13 |
14 | The y-axis automatically scales based on shown read/write values, and by default, is a linear scale based on base-10 units (e.x. kilobit, gigabit, etc.).
15 | Through [configuration](../../configuration/command-line-options.md), the read/write per second unit can be changed to bytes, while the y-axis can be changed to a
16 | log scale and/or use base-2 units (e.x. kibibit, gibibit, etc.).
17 |
18 | One can also adjust the displayed time range through either the keyboard or mouse, with a range of 30s to 600s.
19 |
20 | ## Key bindings
21 |
22 | Note that key bindings are generally case-sensitive.
23 |
24 | | Binding | Action |
25 | | --------- | --------------------------------------- |
26 | | ++plus++ | Zoom in on chart (decrease time range) |
27 | | ++minus++ | Zoom out on chart (increase time range) |
28 | | ++equal++ | Reset zoom |
29 |
30 | ## Mouse bindings
31 |
32 | | Binding | Action |
33 | | ------------ | -------------------------------------------------------------- |
34 | | ++"Scroll"++ | Scrolling up or down zooms in or out of the graph respectively |
35 |
--------------------------------------------------------------------------------
/docs/content/usage/widgets/temperature.md:
--------------------------------------------------------------------------------
1 | # Temperature Widget
2 |
3 | The temperature widget provides a table of temperature sensors and their current temperature.
4 |
5 |
6 |
7 |
8 |
9 | ## Features
10 |
11 | The temperature widget provides the sensor name as well as its current temperature.
12 |
13 | This widget can also be configured to display Nvidia and AMD GPU temperatures (`--disable_gpu` on Linux/Windows to disable).
14 |
15 | ## Key bindings
16 |
17 | Note that key bindings are generally case-sensitive.
18 |
19 | | Binding | Action |
20 | | ------------------ | --------------------------------------------------------- |
21 | | ++up++ , ++k++ | Move up within a widget |
22 | | ++down++ , ++j++ | Move down within a widget |
23 | | ++g+g++ , ++home++ | Jump to the first entry in the table |
24 | | ++G++ , ++end++ | Jump to the last entry in the table |
25 | | ++t++ | Sort by temperature, press again to reverse sorting order |
26 | | ++s++ | Sort by sensor name, press again to reverse sorting order |
27 |
28 | ## Mouse bindings
29 |
30 | | Binding | Action |
31 | | ----------- | ----------------------------- |
32 | | ++lbutton++ | Selects an entry in the table |
33 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | mkdocs == 1.6.1
2 | mkdocs-material == 9.6.9
3 | mdx_truly_sane_lists == 1.3
4 | mike == 2.1.3
5 | mkdocs-git-revision-date-localized-plugin == 1.4.5
6 |
7 |
--------------------------------------------------------------------------------
/docs/serve.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | VENV_PATH="./venv/"
6 | PYTHON_CMD=${1:-python}
7 |
8 | if [ ! -d $VENV_PATH ]; then
9 | echo "venv not found, creating one using the command '${PYTHON_CMD}'...";
10 | $PYTHON_CMD -m venv venv;
11 | source ./venv/bin/activate;
12 | pip install --upgrade pip;
13 | pip install -r requirements.txt;
14 | ./venv/bin/mkdocs serve;
15 | else
16 | echo "venv already found.";
17 | source ./venv/bin/activate;
18 | pip install --upgrade pip;
19 | pip install -r requirements.txt;
20 | ./venv/bin/mkdocs serve;
21 | fi;
22 |
23 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | reorder_imports = true
2 | reorder_modules = true
3 | merge_derives = true
4 | fn_params_layout = "Compressed"
5 | use_field_init_shorthand = true
6 | tab_spaces = 4
7 | max_width = 100
8 | style_edition = "2024"
9 |
10 | # Unstable options, disabled by default.
11 | # imports_granularity = "Crate"
12 | # group_imports = "StdExternalCrate"
13 | # wrap_comments = true
14 | # format_code_in_doc_comments = true
15 |
--------------------------------------------------------------------------------
/sample_configs/demo_config.toml:
--------------------------------------------------------------------------------
1 | [flags]
2 | avg_cpu = true
3 |
4 | # Temperature is one of:
5 | temperature_type = "c"
6 |
7 | rate = 1000
8 | cpu_left_legend = false
9 | current_usage = false
10 | group_processes = false
11 | case_sensitive = false
12 | whole_word = false
13 | regex = true
14 | default_widget_type = "cpu"
15 | default_widget_count = 1
16 |
17 | [styles]
18 | theme = "gruvbox"
19 |
--------------------------------------------------------------------------------
/schema/README.md:
--------------------------------------------------------------------------------
1 | # Config JSON Schema
2 |
3 | ## Generation
4 |
5 | These are automatically generated from code using [`schemars`](https://github.com/GREsau/schemars). They're locked
6 | behind a feature flag to avoid building unnecessary code for release builds, and you can generate them like so:
7 |
8 | ```bash
9 | cargo run --features="generate_schema" -- --generate_schema > schema/nightly/bottom.json
10 | ```
11 |
12 | Alternatively, run the script in `scripts/schema/generate.sh`, which does this for you.
13 |
14 | ## Publication
15 |
16 | To publish these schemas, cut a new version by copying `nightly` to a new folder with a version number matching bottom's
17 | (e.g. v0.10 if bottom is on v0.10.x bottom). Then, make a PR to [schemastore](https://github.com/SchemaStore/schemastore)
18 | updating the catalog.
19 |
20 | For more info, see the schemastore repo. An example PR can be found [here](https://github.com/SchemaStore/schemastore/pull/3571).
21 |
--------------------------------------------------------------------------------
/scripts/schema/bad_file.toml:
--------------------------------------------------------------------------------
1 | [flags]
2 | hide_avg_cpu = 'bad'
3 |
--------------------------------------------------------------------------------
/scripts/schema/generate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | cd "$(dirname "$0")";
6 | cd ../..
7 |
8 | cargo run --bin schema --features="generate_schema" -- $1 > schema/nightly/bottom.json
9 |
--------------------------------------------------------------------------------
/scripts/schema/requirements.txt:
--------------------------------------------------------------------------------
1 | jsonschema-rs == 0.26.1
2 |
--------------------------------------------------------------------------------
/scripts/schema/validator.py:
--------------------------------------------------------------------------------
1 | #!/bin/python3
2 |
3 | # A simple script to validate that a schema is valid for a file.
4 |
5 | import argparse
6 | import tomllib
7 | import jsonschema_rs
8 | import re
9 | import traceback
10 |
11 |
12 | def main():
13 | parser = argparse.ArgumentParser(
14 | description="Validates a file against a JSON schema"
15 | )
16 | parser.add_argument(
17 | "-f", "--file", type=str, required=True, help="The file to check."
18 | )
19 | parser.add_argument(
20 | "-s", "--schema", type=str, required=True, help="The schema to use."
21 | )
22 | parser.add_argument(
23 | "--uncomment",
24 | required=False,
25 | action="store_true",
26 | help="Uncomment the settings inside the file.",
27 | )
28 | parser.add_argument(
29 | "--should_fail",
30 | required=False,
31 | action="store_true",
32 | help="Whether the checked file should fail.",
33 | )
34 | args = parser.parse_args()
35 |
36 | file = args.file
37 | schema = args.schema
38 | should_fail = args.should_fail
39 | uncomment = args.uncomment
40 |
41 | with open(file, "rb") as f, open(schema) as s:
42 | try:
43 | validator = jsonschema_rs.validator_for(s.read())
44 | except:
45 | print("Couldn't create validator.")
46 | exit()
47 |
48 | if uncomment:
49 | read_file = f.read().decode("utf-8")
50 | read_file = re.sub(r"^#([a-zA-Z\[])", r"\1", read_file, flags=re.MULTILINE)
51 | read_file = re.sub(
52 | r"^#(\s\s+)([a-zA-Z\[])", r"\2", read_file, flags=re.MULTILINE
53 | )
54 | print(f"uncommented file: \n{read_file}\n=====\n")
55 |
56 | toml_str = tomllib.loads(read_file)
57 | else:
58 | toml_str = tomllib.load(f)
59 |
60 | try:
61 | validator.validate(toml_str)
62 | if should_fail:
63 | print("Fail! Should have errored.")
64 | exit(1)
65 | else:
66 | print("All good!")
67 | except jsonschema_rs.ValidationError as err:
68 | print(f"Caught error: `{err}`")
69 | print(traceback.format_exc())
70 |
71 | if should_fail:
72 | print("Caught error, good!")
73 | else:
74 | print("Fail!")
75 | exit(1)
76 |
77 |
78 | if __name__ == "__main__":
79 | main()
80 |
--------------------------------------------------------------------------------
/scripts/windows/choco/choco_packager.py:
--------------------------------------------------------------------------------
1 | # Because choco is a special case and I'm too lazy to make my
2 | # packaging script robust enough, so whatever, hard-code time.
3 |
4 | import hashlib
5 | import sys
6 | from string import Template
7 | import os
8 |
9 | args = sys.argv
10 | deployment_file_path_64 = args[1]
11 | version = args[2]
12 | nuspec_template = args[3]
13 | ps1_template = args[4]
14 | generated_nuspec = args[5]
15 | generated_ps1 = args[6]
16 | generated_ps1_dir = args[7]
17 |
18 | print("Generating Chocolatey package for:")
19 | print(" 64-bit: %s" % deployment_file_path_64)
20 | print(" VERSION: %s" % version)
21 | print(" NUSPEC TEMPLATE: %s" % nuspec_template)
22 | print(" PS1 TEMPLATE: %s" % ps1_template)
23 | print(" GENERATED NUSPEC: %s" % generated_nuspec)
24 | print(" GENERATED PS1: %s" % generated_ps1)
25 | print(" GENERATED PS1 DIR: %s" % generated_ps1_dir)
26 |
27 | with open(deployment_file_path_64, "rb") as deployment_file_64:
28 | hash_64 = hashlib.sha1(deployment_file_64.read()).hexdigest()
29 |
30 | print("Generated hash for 64-bit program: %s" % str(hash_64))
31 |
32 | with open(nuspec_template, "r") as template_file:
33 | template = Template(template_file.read())
34 | substitute = template.safe_substitute(version=version)
35 | print("\n================== Generated nuspec file ==================\n")
36 | print(substitute)
37 | print("\n============================================================\n")
38 |
39 | with open(generated_nuspec, "w") as generated_file:
40 | generated_file.write(substitute)
41 |
42 | os.makedirs(generated_ps1_dir)
43 | with open(ps1_template, "r") as template_file:
44 | template = Template(template_file.read())
45 | substitute = template.safe_substitute(version=version, hash_64=hash_64)
46 | print(
47 | "\n================== Generated chocolatey-install file ==================\n"
48 | )
49 | print(substitute)
50 | print("\n============================================================\n")
51 |
52 | with open(generated_ps1, "w") as generated_file:
53 | generated_file.write(substitute)
54 |
--------------------------------------------------------------------------------
/scripts/windows/choco/chocolateyinstall.ps1.template:
--------------------------------------------------------------------------------
1 | $ErrorActionPreference = 'Stop';
2 | $toolsDir = "$(Split-Path -parent $MyInvocation.MyCommand.Definition)"
3 | $url = 'https://github.com/ClementTsang/bottom/releases/download/$version/bottom_x86_64-pc-windows-msvc.zip'
4 |
5 | $packageArgs = @{
6 | packageName = $env:ChocolateyPackageName
7 | softwareName = 'bottom'
8 | unzipLocation = $toolsDir
9 | fileType = 'exe'
10 | url = $url
11 | checksum = '$hash_64'
12 | checksumType = 'sha1'
13 |
14 | }
15 | Install-ChocolateyZipPackage @packageArgs
16 |
--------------------------------------------------------------------------------
/src/app/data/mod.rs:
--------------------------------------------------------------------------------
1 | //! How we manage data internally.
2 |
3 | mod time_series;
4 | pub use time_series::{TimeSeriesData, Values};
5 |
6 | mod process;
7 | pub use process::ProcessData;
8 |
9 | mod store;
10 | pub use store::*;
11 |
12 | mod temperature;
13 | pub use temperature::*;
14 |
--------------------------------------------------------------------------------
/src/app/data/process.rs:
--------------------------------------------------------------------------------
1 | use std::{collections::BTreeMap, vec::Vec};
2 |
3 | use hashbrown::HashMap;
4 |
5 | use crate::collection::processes::{Pid, ProcessHarvest};
6 |
7 | #[derive(Clone, Debug, Default)]
8 | pub struct ProcessData {
9 | /// A PID to process data map.
10 | pub process_harvest: BTreeMap,
11 |
12 | /// A mapping between a process PID to any children process PIDs.
13 | pub process_parent_mapping: HashMap>,
14 |
15 | /// PIDs corresponding to processes that have no parents.
16 | pub orphan_pids: Vec,
17 | }
18 |
19 | impl ProcessData {
20 | pub(super) fn ingest(&mut self, list_of_processes: Vec) {
21 | self.process_parent_mapping.clear();
22 |
23 | // Reverse as otherwise the pid mappings are in the wrong order.
24 | list_of_processes.iter().rev().for_each(|process_harvest| {
25 | if let Some(parent_pid) = process_harvest.parent_pid {
26 | if let Some(entry) = self.process_parent_mapping.get_mut(&parent_pid) {
27 | entry.push(process_harvest.pid);
28 | } else {
29 | self.process_parent_mapping
30 | .insert(parent_pid, vec![process_harvest.pid]);
31 | }
32 | }
33 | });
34 |
35 | self.process_parent_mapping.shrink_to_fit();
36 |
37 | let process_pid_map = list_of_processes
38 | .into_iter()
39 | .map(|process| (process.pid, process))
40 | .collect();
41 | self.process_harvest = process_pid_map;
42 |
43 | // We collect all processes that either:
44 | // - Do not have a parent PID (that is, they are orphan processes)
45 | // - Have a parent PID but we don't have the parent (we promote them as orphans)
46 | self.orphan_pids = self
47 | .process_harvest
48 | .iter()
49 | .filter_map(|(pid, process_harvest)| match process_harvest.parent_pid {
50 | Some(parent_pid) if self.process_harvest.contains_key(&parent_pid) => None,
51 | _ => Some(*pid),
52 | })
53 | .collect();
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/src/app/data/temperature.rs:
--------------------------------------------------------------------------------
1 | //! Code around temperature data.
2 |
3 | use std::{fmt::Display, str::FromStr};
4 |
5 | #[derive(Clone, Debug, Copy, PartialEq, Eq, Default)]
6 | pub enum TemperatureType {
7 | #[default]
8 | Celsius,
9 | Kelvin,
10 | Fahrenheit,
11 | }
12 |
13 | impl FromStr for TemperatureType {
14 | type Err = String;
15 |
16 | fn from_str(s: &str) -> Result {
17 | match s {
18 | "fahrenheit" | "f" => Ok(TemperatureType::Fahrenheit),
19 | "kelvin" | "k" => Ok(TemperatureType::Kelvin),
20 | "celsius" | "c" => Ok(TemperatureType::Celsius),
21 | _ => Err(format!(
22 | "'{s}' is an invalid temperature type, use one of: [kelvin, k, celsius, c, fahrenheit, f]."
23 | )),
24 | }
25 | }
26 | }
27 |
28 | impl TemperatureType {
29 | /// Given a temperature in Celsius, covert it if necessary for a different
30 | /// unit.
31 | pub fn convert_temp_unit(&self, celsius: f32) -> TypedTemperature {
32 | match self {
33 | TemperatureType::Celsius => TypedTemperature::Celsius(celsius.ceil() as u32),
34 | TemperatureType::Kelvin => TypedTemperature::Kelvin((celsius + 273.15).ceil() as u32),
35 | TemperatureType::Fahrenheit => {
36 | TypedTemperature::Fahrenheit(((celsius * (9.0 / 5.0)) + 32.0).ceil() as u32)
37 | }
38 | }
39 | }
40 | }
41 |
42 | /// A temperature and its type.
43 | #[derive(Debug, PartialEq, Clone, Eq, PartialOrd, Ord)]
44 | pub enum TypedTemperature {
45 | Celsius(u32),
46 | Kelvin(u32),
47 | Fahrenheit(u32),
48 | }
49 |
50 | impl Display for TypedTemperature {
51 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
52 | match self {
53 | TypedTemperature::Celsius(val) => write!(f, "{val}°C"),
54 | TypedTemperature::Kelvin(val) => write!(f, "{val}K"),
55 | TypedTemperature::Fahrenheit(val) => write!(f, "{val}°F"),
56 | }
57 | }
58 | }
59 |
60 | #[cfg(test)]
61 | mod test {
62 | use super::*;
63 |
64 | #[test]
65 | fn temp_conversions() {
66 | const TEMP: f32 = 100.0;
67 |
68 | assert_eq!(
69 | TemperatureType::Celsius.convert_temp_unit(TEMP),
70 | TypedTemperature::Celsius(TEMP as u32),
71 | );
72 |
73 | assert_eq!(
74 | TemperatureType::Kelvin.convert_temp_unit(TEMP),
75 | TypedTemperature::Kelvin(373.15_f32.ceil() as u32)
76 | );
77 |
78 | assert_eq!(
79 | TemperatureType::Fahrenheit.convert_temp_unit(TEMP),
80 | TypedTemperature::Fahrenheit(212)
81 | );
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/src/app/process_killer.rs:
--------------------------------------------------------------------------------
1 | //! This file is meant to house (OS specific) implementations on how to kill
2 | //! processes.
3 |
4 | use anyhow::bail;
5 | #[cfg(target_os = "windows")]
6 | use windows::Win32::{
7 | Foundation::{CloseHandle, HANDLE},
8 | System::Threading::{
9 | OpenProcess, PROCESS_QUERY_INFORMATION, PROCESS_TERMINATE, TerminateProcess,
10 | },
11 | };
12 |
13 | use crate::collection::processes::Pid;
14 |
15 | /// Based from [this SO answer](https://stackoverflow.com/a/55231715).
16 | #[cfg(target_os = "windows")]
17 | struct Process(HANDLE);
18 |
19 | #[cfg(target_os = "windows")]
20 | impl Process {
21 | fn open(pid: u32) -> anyhow::Result {
22 | // SAFETY: Windows API call, tread carefully with the args.
23 | match unsafe { OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_TERMINATE, false, pid) } {
24 | Ok(process) => Ok(Process(process)),
25 | Err(_) => bail!("process may have already been terminated."),
26 | }
27 | }
28 |
29 | fn kill(self) -> anyhow::Result<()> {
30 | // SAFETY: Windows API call, this is safe as we are passing in the handle.
31 | let result = unsafe { TerminateProcess(self.0, 1) };
32 | if result.is_err() {
33 | bail!("process may have already been terminated.");
34 | }
35 |
36 | Ok(())
37 | }
38 | }
39 |
40 | #[cfg(target_os = "windows")]
41 | impl Drop for Process {
42 | fn drop(&mut self) {
43 | // SAFETY: Windows API call, this is safe as we are passing in the handle.
44 | unsafe {
45 | let _ = CloseHandle(self.0);
46 | }
47 | }
48 | }
49 |
50 | /// Kills a process, given a PID, for windows.
51 | #[cfg(target_os = "windows")]
52 | pub fn kill_process_given_pid(pid: Pid) -> anyhow::Result<()> {
53 | let process = Process::open(pid as u32)?;
54 | process.kill()?;
55 |
56 | Ok(())
57 | }
58 |
59 | /// Kills a process, given a PID, for UNIX.
60 | #[cfg(target_family = "unix")]
61 | pub fn kill_process_given_pid(pid: Pid, signal: usize) -> anyhow::Result<()> {
62 | // SAFETY: the signal should be valid, and we act properly on an error (exit
63 | // code not 0).
64 | let output = unsafe { libc::kill(pid, signal as i32) };
65 |
66 | if output != 0 {
67 | // We had an error...
68 | let err_code = std::io::Error::last_os_error().raw_os_error();
69 | let err = match err_code {
70 | Some(libc::ESRCH) => "the target process did not exist.",
71 | Some(libc::EPERM) => {
72 | "the calling process does not have the permissions to terminate the target process(es)."
73 | }
74 | Some(libc::EINVAL) => "an invalid signal was specified.",
75 | _ => "Unknown error occurred.",
76 | };
77 |
78 | if let Some(err_code) = err_code {
79 | bail!(format!("Error code {err_code} - {err}"))
80 | } else {
81 | bail!(format!("Error code unknown - {err}"))
82 | };
83 | }
84 |
85 | Ok(())
86 | }
87 |
--------------------------------------------------------------------------------
/src/bin/main.rs:
--------------------------------------------------------------------------------
1 | use bottom::{reset_stdout, start_bottom};
2 |
3 | fn main() -> anyhow::Result<()> {
4 | let mut run_error_hook = false;
5 |
6 | start_bottom(&mut run_error_hook).inspect_err(|_| {
7 | if run_error_hook {
8 | reset_stdout();
9 | }
10 | })
11 | }
12 |
--------------------------------------------------------------------------------
/src/bin/schema.rs:
--------------------------------------------------------------------------------
1 | #![cfg(feature = "generate_schema")]
2 |
3 | use bottom::{options::config, widgets};
4 | use clap::Parser;
5 | use itertools::Itertools;
6 | use serde_json::Value;
7 | use strum::VariantArray;
8 |
9 | #[derive(Parser)]
10 | struct SchemaOptions {
11 | /// The version of the schema.
12 | version: Option,
13 | }
14 |
15 | fn generate_schema(schema_options: SchemaOptions) -> anyhow::Result<()> {
16 | let mut schema = schemars::schema_for!(config::Config);
17 | {
18 | // TODO: Maybe make this case insensitive? See https://stackoverflow.com/a/68639341
19 |
20 | match schema
21 | .as_object_mut()
22 | .unwrap()
23 | .get_mut("$defs")
24 | .unwrap()
25 | .get_mut("ProcColumn")
26 | .unwrap()
27 | {
28 | Value::Object(proc_columns) => {
29 | let enums = proc_columns.get_mut("enum").unwrap();
30 | *enums = widgets::ProcColumn::VARIANTS
31 | .iter()
32 | .flat_map(|var| var.get_schema_names())
33 | .sorted()
34 | .map(|v| serde_json::Value::String(v.to_string()))
35 | .dedup()
36 | .collect();
37 | }
38 | _ => anyhow::bail!("missing proc columns definition"),
39 | }
40 |
41 | match schema
42 | .as_object_mut()
43 | .unwrap()
44 | .get_mut("$defs")
45 | .unwrap()
46 | .get_mut("DiskColumn")
47 | .unwrap()
48 | {
49 | Value::Object(disk_columns) => {
50 | let enums = disk_columns.get_mut("enum").unwrap();
51 | *enums = widgets::DiskColumn::VARIANTS
52 | .iter()
53 | .flat_map(|var| var.get_schema_names())
54 | .sorted()
55 | .map(|v| serde_json::Value::String(v.to_string()))
56 | .dedup()
57 | .collect();
58 | }
59 | _ => anyhow::bail!("missing disk columns definition"),
60 | }
61 | }
62 |
63 | let version = schema_options.version.unwrap_or("nightly".to_string());
64 | schema.insert(
65 | "$id".into(),
66 | format!("https://github.com/ClementTsang/bottom/blob/main/schema/{version}/bottom.json")
67 | .into(),
68 | );
69 |
70 | schema.insert(
71 | "description".into(),
72 | format!(
73 | "https://bottom.pages.dev/{}/configuration/config-file/",
74 | if version == "nightly" {
75 | "nightly"
76 | } else {
77 | "stable"
78 | }
79 | )
80 | .into(),
81 | );
82 |
83 | schema.insert(
84 | "title".into(),
85 | format!("Schema for bottom's config file ({version})").into(),
86 | );
87 |
88 | println!("{}", serde_json::to_string_pretty(&schema).unwrap());
89 |
90 | Ok(())
91 | }
92 |
93 | fn main() -> anyhow::Result<()> {
94 | let schema_options = SchemaOptions::parse();
95 | generate_schema(schema_options)?;
96 |
97 | Ok(())
98 | }
99 |
--------------------------------------------------------------------------------
/src/canvas/components.rs:
--------------------------------------------------------------------------------
1 | //! Lower-level components used throughout bottom.
2 |
3 | pub mod data_table;
4 | pub mod pipe_gauge;
5 | pub mod time_graph;
6 | pub mod widget_carousel;
7 |
--------------------------------------------------------------------------------
/src/canvas/components/data_table/data_type.rs:
--------------------------------------------------------------------------------
1 | use std::{borrow::Cow, num::NonZeroU16};
2 |
3 | use tui::widgets::Row;
4 |
5 | use super::{ColumnHeader, DataTableColumn};
6 | use crate::canvas::Painter;
7 |
8 | pub trait DataToCell
9 | where
10 | H: ColumnHeader,
11 | {
12 | /// Given data, a column, and its corresponding width, return the string in
13 | /// the cell that will be displayed in the
14 | /// [`DataTable`](super::DataTable).
15 | fn to_cell(&self, column: &H, calculated_width: NonZeroU16) -> Option>;
16 |
17 | /// Apply styling to the generated [`Row`] of cells.
18 | ///
19 | /// The default implementation just returns the `row` that is passed in.
20 | #[inline(always)]
21 | fn style_row<'a>(&self, row: Row<'a>, _painter: &Painter) -> Row<'a> {
22 | row
23 | }
24 |
25 | /// Returns the desired column widths in light of having seen data.
26 | fn column_widths>(data: &[Self], columns: &[C]) -> Vec
27 | where
28 | Self: Sized;
29 | }
30 |
--------------------------------------------------------------------------------
/src/canvas/components/data_table/props.rs:
--------------------------------------------------------------------------------
1 | use std::borrow::Cow;
2 |
3 | pub struct DataTableProps {
4 | /// An optional title for the table.
5 | pub title: Option>,
6 |
7 | /// The size of the gap between the header and rows.
8 | pub table_gap: u16,
9 |
10 | /// Whether this table determines column widths from left to right.
11 | pub left_to_right: bool,
12 |
13 | /// Whether this table is a basic table. This affects the borders.
14 | pub is_basic: bool,
15 |
16 | /// Whether to show the table scroll position.
17 | pub show_table_scroll_position: bool,
18 |
19 | /// Whether to show the current entry as highlighted when not focused.
20 | pub show_current_entry_when_unfocused: bool,
21 | }
22 |
--------------------------------------------------------------------------------
/src/canvas/components/data_table/state.rs:
--------------------------------------------------------------------------------
1 | use std::num::NonZeroU16;
2 |
3 | use tui::{layout::Rect, widgets::TableState};
4 |
5 | #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
6 | pub enum ScrollDirection {
7 | // UP means scrolling up --- this usually DECREMENTS
8 | Up,
9 |
10 | // DOWN means scrolling down --- this usually INCREMENTS
11 | #[default]
12 | Down,
13 | }
14 |
15 | /// Internal state representation of a [`DataTable`](super::DataTable).
16 | pub struct DataTableState {
17 | /// The index from where to start displaying the rows.
18 | pub display_start_index: usize,
19 |
20 | /// The current scroll position.
21 | pub current_index: usize,
22 |
23 | /// The direction of the last attempted scroll.
24 | pub scroll_direction: ScrollDirection,
25 |
26 | /// ratatui's internal table state.
27 | pub table_state: TableState,
28 |
29 | /// The calculated widths.
30 | pub calculated_widths: Vec,
31 |
32 | /// The current inner [`Rect`].
33 | pub inner_rect: Rect,
34 | }
35 |
36 | impl Default for DataTableState {
37 | fn default() -> Self {
38 | Self {
39 | display_start_index: 0,
40 | current_index: 0,
41 | scroll_direction: ScrollDirection::Down,
42 | calculated_widths: vec![],
43 | table_state: TableState::default(),
44 | inner_rect: Rect::default(),
45 | }
46 | }
47 | }
48 |
49 | impl DataTableState {
50 | /// Gets the starting position of a table.
51 | pub fn get_start_position(&mut self, num_rows: usize, is_force_redraw: bool) {
52 | let start_index = if is_force_redraw {
53 | 0
54 | } else {
55 | self.display_start_index
56 | };
57 | let current_scroll_position = self.current_index;
58 | let scroll_direction = self.scroll_direction;
59 |
60 | self.display_start_index = match scroll_direction {
61 | ScrollDirection::Down => {
62 | if current_scroll_position < start_index + num_rows {
63 | // If, using the current scroll position, we can see the element
64 | // (so within that and + num_rows) just reuse the current previously
65 | // scrolled position.
66 | start_index
67 | } else if current_scroll_position >= num_rows {
68 | // If the current position past the last element visible in the list,
69 | // then skip until we can see that element.
70 | current_scroll_position - num_rows + 1
71 | } else {
72 | // Else, if it is not past the last element visible, do not omit anything.
73 | 0
74 | }
75 | }
76 | ScrollDirection::Up => {
77 | if current_scroll_position <= start_index {
78 | // If it's past the first element, then show from that element downwards
79 | current_scroll_position
80 | } else if current_scroll_position >= start_index + num_rows {
81 | current_scroll_position - num_rows + 1
82 | } else {
83 | start_index
84 | }
85 | }
86 | };
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/src/canvas/components/data_table/styling.rs:
--------------------------------------------------------------------------------
1 | use tui::{style::Style, widgets::BorderType};
2 |
3 | use crate::options::config::style::Styles;
4 |
5 | #[derive(Default)]
6 | pub struct DataTableStyling {
7 | pub header_style: Style,
8 | pub border_style: Style,
9 | pub border_type: BorderType,
10 | pub highlighted_border_style: Style,
11 | pub text_style: Style,
12 | pub highlighted_text_style: Style,
13 | pub title_style: Style,
14 | }
15 |
16 | impl DataTableStyling {
17 | pub fn from_palette(styles: &Styles) -> Self {
18 | Self {
19 | header_style: styles.table_header_style,
20 | border_style: styles.border_style,
21 | border_type: styles.border_type,
22 | highlighted_border_style: styles.highlighted_border_style,
23 | text_style: styles.text_style,
24 | highlighted_text_style: styles.selected_text_style,
25 | title_style: styles.widget_title_style,
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/src/canvas/dialogs.rs:
--------------------------------------------------------------------------------
1 | pub mod dd_dialog;
2 | pub mod help_dialog;
3 |
--------------------------------------------------------------------------------
/src/canvas/drawing_utils.rs:
--------------------------------------------------------------------------------
1 | use std::time::Instant;
2 |
3 | use tui::{
4 | layout::Rect,
5 | widgets::{Block, BorderType, Borders},
6 | };
7 |
8 | use super::SIDE_BORDERS;
9 |
10 | /// Determine whether a graph x-label should be hidden.
11 | pub fn should_hide_x_label(
12 | always_hide_time: bool, autohide_time: bool, timer: &mut Option, draw_loc: Rect,
13 | ) -> bool {
14 | use crate::constants::*;
15 |
16 | if always_hide_time || (autohide_time && timer.is_none()) {
17 | true
18 | } else if let Some(time) = timer {
19 | if Instant::now().duration_since(*time).as_millis() < AUTOHIDE_TIMEOUT_MILLISECONDS.into() {
20 | false
21 | } else {
22 | *timer = None;
23 | true
24 | }
25 | } else {
26 | draw_loc.height < TIME_LABEL_HEIGHT_LIMIT
27 | }
28 | }
29 |
30 | /// Return a widget block.
31 | pub fn widget_block(is_basic: bool, is_selected: bool, border_type: BorderType) -> Block<'static> {
32 | let mut block = Block::default().border_type(border_type);
33 |
34 | if is_basic {
35 | if is_selected {
36 | block = block.borders(SIDE_BORDERS);
37 | } else {
38 | block = block.borders(Borders::empty());
39 | }
40 | } else {
41 | block = block.borders(Borders::all());
42 | }
43 |
44 | block
45 | }
46 |
47 | /// Return a dialog block.
48 | pub fn dialog_block(border_type: BorderType) -> Block<'static> {
49 | Block::default()
50 | .border_type(border_type)
51 | .borders(Borders::all())
52 | }
53 |
54 | #[cfg(test)]
55 | mod test {
56 |
57 | use super::*;
58 |
59 | #[test]
60 | fn test_should_hide_x_label() {
61 | use std::time::{Duration, Instant};
62 |
63 | use tui::layout::Rect;
64 |
65 | use crate::constants::*;
66 |
67 | let rect = Rect::new(0, 0, 10, 10);
68 | let small_rect = Rect::new(0, 0, 10, 6);
69 |
70 | let mut under_timer = Some(Instant::now());
71 | let mut over_timer =
72 | Instant::now().checked_sub(Duration::from_millis(AUTOHIDE_TIMEOUT_MILLISECONDS + 100));
73 |
74 | assert!(should_hide_x_label(true, false, &mut None, rect));
75 | assert!(should_hide_x_label(false, true, &mut None, rect));
76 | assert!(should_hide_x_label(false, false, &mut None, small_rect));
77 |
78 | assert!(!should_hide_x_label(
79 | false,
80 | true,
81 | &mut under_timer,
82 | small_rect
83 | ));
84 | assert!(under_timer.is_some());
85 |
86 | assert!(should_hide_x_label(
87 | false,
88 | true,
89 | &mut over_timer,
90 | small_rect
91 | ));
92 | assert!(over_timer.is_none());
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/src/canvas/widgets.rs:
--------------------------------------------------------------------------------
1 | pub mod cpu_basic;
2 | pub mod cpu_graph;
3 | pub mod disk_table;
4 | pub mod mem_basic;
5 | pub mod mem_graph;
6 | pub mod network_basic;
7 | pub mod network_graph;
8 | pub mod process_table;
9 | pub mod temperature_table;
10 |
11 | #[cfg(feature = "battery")]
12 | pub mod battery_display;
13 |
--------------------------------------------------------------------------------
/src/canvas/widgets/disk_table.rs:
--------------------------------------------------------------------------------
1 | use tui::{Frame, layout::Rect};
2 |
3 | use crate::{
4 | app,
5 | canvas::{
6 | Painter,
7 | components::data_table::{DrawInfo, SelectionState},
8 | },
9 | };
10 |
11 | impl Painter {
12 | pub fn draw_disk_table(
13 | &self, f: &mut Frame<'_>, app_state: &mut app::App, draw_loc: Rect, widget_id: u64,
14 | ) {
15 | let recalculate_column_widths = app_state.should_get_widget_bounds();
16 | if let Some(disk_widget_state) = app_state
17 | .states
18 | .disk_state
19 | .widget_states
20 | .get_mut(&widget_id)
21 | {
22 | let is_on_widget = app_state.current_widget.widget_id == widget_id;
23 |
24 | let draw_info = DrawInfo {
25 | loc: draw_loc,
26 | force_redraw: app_state.is_force_redraw,
27 | recalculate_column_widths,
28 | selection_state: SelectionState::new(app_state.is_expanded, is_on_widget),
29 | };
30 |
31 | disk_widget_state.table.draw(
32 | f,
33 | &draw_info,
34 | app_state.widget_map.get_mut(&widget_id),
35 | self,
36 | );
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/canvas/widgets/network_basic.rs:
--------------------------------------------------------------------------------
1 | use tui::{
2 | Frame,
3 | layout::{Constraint, Direction, Layout, Rect},
4 | text::{Line, Span},
5 | widgets::{Block, Paragraph},
6 | };
7 |
8 | use crate::{
9 | app::App,
10 | canvas::{Painter, drawing_utils::widget_block},
11 | utils::data_units::{convert_bits, get_unit_prefix},
12 | };
13 |
14 | impl Painter {
15 | pub fn draw_basic_network(
16 | &self, f: &mut Frame<'_>, app_state: &mut App, draw_loc: Rect, widget_id: u64,
17 | ) {
18 | let divided_loc = Layout::default()
19 | .direction(Direction::Horizontal)
20 | .constraints([Constraint::Percentage(50), Constraint::Percentage(50)])
21 | .split(draw_loc);
22 |
23 | let net_loc = Layout::default()
24 | .direction(Direction::Horizontal)
25 | .constraints([Constraint::Percentage(100)])
26 | .horizontal_margin(1)
27 | .split(divided_loc[0]);
28 |
29 | let total_loc = Layout::default()
30 | .direction(Direction::Horizontal)
31 | .constraints([Constraint::Percentage(100)])
32 | .horizontal_margin(1)
33 | .split(divided_loc[1]);
34 |
35 | if app_state.current_widget.widget_id == widget_id {
36 | f.render_widget(
37 | widget_block(true, true, self.styles.border_type)
38 | .border_style(self.styles.highlighted_border_style),
39 | draw_loc,
40 | );
41 | }
42 |
43 | let use_binary_prefix = app_state.app_config_fields.network_use_binary_prefix;
44 | let network_data = &(app_state.data_store.get_data().network_harvest);
45 | let rx = get_unit_prefix(network_data.rx, use_binary_prefix);
46 | let tx = get_unit_prefix(network_data.tx, use_binary_prefix);
47 | let total_rx = convert_bits(network_data.total_rx, use_binary_prefix);
48 | let total_tx = convert_bits(network_data.total_tx, use_binary_prefix);
49 |
50 | let rx_label = format!("RX: {:.1}{}", rx.0, rx.1);
51 | let tx_label = format!("TX: {:.1}{}", tx.0, tx.1);
52 | let total_rx_label = format!("Total RX: {:.1}{}", total_rx.0, total_rx.1);
53 | let total_tx_label = format!("Total TX: {:.1}{}", total_tx.0, total_tx.1);
54 |
55 | let net_text = vec![
56 | Line::from(Span::styled(rx_label, self.styles.rx_style)),
57 | Line::from(Span::styled(tx_label, self.styles.tx_style)),
58 | ];
59 |
60 | let total_net_text = vec![
61 | Line::from(Span::styled(total_rx_label, self.styles.total_rx_style)),
62 | Line::from(Span::styled(total_tx_label, self.styles.total_tx_style)),
63 | ];
64 |
65 | f.render_widget(Paragraph::new(net_text).block(Block::default()), net_loc[0]);
66 |
67 | f.render_widget(
68 | Paragraph::new(total_net_text).block(Block::default()),
69 | total_loc[0],
70 | );
71 |
72 | // Update draw loc in widget map
73 | if app_state.should_get_widget_bounds() {
74 | if let Some(widget) = app_state.widget_map.get_mut(&widget_id) {
75 | widget.top_left_corner = Some((draw_loc.x, draw_loc.y));
76 | widget.bottom_right_corner =
77 | Some((draw_loc.x + draw_loc.width, draw_loc.y + draw_loc.height));
78 | }
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/src/canvas/widgets/temperature_table.rs:
--------------------------------------------------------------------------------
1 | use tui::{Frame, layout::Rect};
2 |
3 | use crate::{
4 | app,
5 | canvas::{
6 | Painter,
7 | components::data_table::{DrawInfo, SelectionState},
8 | },
9 | };
10 |
11 | impl Painter {
12 | pub fn draw_temp_table(
13 | &self, f: &mut Frame<'_>, app_state: &mut app::App, draw_loc: Rect, widget_id: u64,
14 | ) {
15 | let recalculate_column_widths = app_state.should_get_widget_bounds();
16 | if let Some(temp_widget_state) = app_state
17 | .states
18 | .temp_state
19 | .widget_states
20 | .get_mut(&widget_id)
21 | {
22 | let is_on_widget = app_state.current_widget.widget_id == widget_id;
23 |
24 | let draw_info = DrawInfo {
25 | loc: draw_loc,
26 | force_redraw: app_state.is_force_redraw,
27 | recalculate_column_widths,
28 | selection_state: SelectionState::new(app_state.is_expanded, is_on_widget),
29 | };
30 |
31 | temp_widget_state.table.draw(
32 | f,
33 | &draw_info,
34 | app_state.widget_map.get_mut(&widget_id),
35 | self,
36 | );
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/collection/cpu.rs:
--------------------------------------------------------------------------------
1 | //! Data collection for CPU usage and load average.
2 |
3 | pub mod sysinfo;
4 | pub use self::sysinfo::*;
5 |
6 | pub type LoadAvgHarvest = [f32; 3];
7 |
8 | #[derive(Debug, Clone, Copy)]
9 | pub enum CpuDataType {
10 | Avg,
11 | Cpu(usize),
12 | }
13 |
14 | #[derive(Debug, Clone)]
15 | pub struct CpuData {
16 | pub data_type: CpuDataType,
17 | pub cpu_usage: f64,
18 | }
19 |
20 | pub type CpuHarvest = Vec;
21 |
--------------------------------------------------------------------------------
/src/collection/cpu/sysinfo.rs:
--------------------------------------------------------------------------------
1 | //! CPU stats through sysinfo.
2 | //! Supports FreeBSD.
3 |
4 | use sysinfo::System;
5 |
6 | use super::{CpuData, CpuDataType, CpuHarvest};
7 | use crate::collection::error::CollectionResult;
8 |
9 | pub fn get_cpu_data_list(sys: &System, show_average_cpu: bool) -> CollectionResult {
10 | let mut cpus = vec![];
11 |
12 | if show_average_cpu {
13 | let cpu = sys.global_cpu_info();
14 |
15 | cpus.push(CpuData {
16 | data_type: CpuDataType::Avg,
17 | cpu_usage: cpu.cpu_usage() as f64,
18 | })
19 | }
20 |
21 | cpus.extend(
22 | sys.cpus()
23 | .iter()
24 | .enumerate()
25 | .map(|(i, cpu)| CpuData {
26 | data_type: CpuDataType::Cpu(i),
27 | cpu_usage: cpu.cpu_usage() as f64,
28 | })
29 | .collect::>(),
30 | );
31 |
32 | Ok(cpus)
33 | }
34 |
35 | #[cfg(target_family = "unix")]
36 | pub(crate) fn get_load_avg() -> crate::collection::cpu::LoadAvgHarvest {
37 | // The API for sysinfo apparently wants you to call it like this, rather than
38 | // using a &System.
39 | let sysinfo::LoadAvg { one, five, fifteen } = sysinfo::System::load_average();
40 |
41 | [one as f32, five as f32, fifteen as f32]
42 | }
43 |
--------------------------------------------------------------------------------
/src/collection/disks/freebsd.rs:
--------------------------------------------------------------------------------
1 | //! Disk stats for FreeBSD.
2 |
3 | use std::io;
4 |
5 | use hashbrown::HashMap;
6 | use serde::Deserialize;
7 |
8 | use super::{DiskHarvest, IoHarvest, keep_disk_entry};
9 | use crate::collection::{DataCollector, deserialize_xo, disks::IoData, error::CollectionResult};
10 |
11 | #[derive(Deserialize, Debug, Default)]
12 | #[serde(rename_all = "kebab-case")]
13 | struct StorageSystemInformation {
14 | filesystem: Vec,
15 | }
16 |
17 | #[derive(Deserialize, Debug)]
18 | #[serde(rename_all = "kebab-case")]
19 | struct FileSystem {
20 | name: String,
21 | total_blocks: u64,
22 | used_blocks: u64,
23 | available_blocks: u64,
24 | mounted_on: String,
25 | }
26 |
27 | pub fn get_io_usage() -> CollectionResult {
28 | // TODO: Should this (and other I/O collectors) fail fast? In general, should
29 | // collection ever fail fast?
30 | let mut io_harvest: HashMap> =
31 | get_disk_info().map(|storage_system_information| {
32 | storage_system_information
33 | .filesystem
34 | .into_iter()
35 | .map(|disk| (disk.name, None))
36 | .collect()
37 | })?;
38 |
39 | #[cfg(feature = "zfs")]
40 | {
41 | use crate::collection::disks::zfs_io_counters;
42 | if let Ok(zfs_io) = zfs_io_counters::zfs_io_stats() {
43 | for io in zfs_io.into_iter() {
44 | let mount_point = io.device_name().to_string_lossy();
45 | io_harvest.insert(
46 | mount_point.to_string(),
47 | Some(IoData {
48 | read_bytes: io.read_bytes(),
49 | write_bytes: io.write_bytes(),
50 | }),
51 | );
52 | }
53 | }
54 | }
55 | Ok(io_harvest)
56 | }
57 |
58 | pub fn get_disk_usage(collector: &DataCollector) -> CollectionResult> {
59 | let disk_filter = &collector.filters.disk_filter;
60 | let mount_filter = &collector.filters.mount_filter;
61 | let vec_disks: Vec = get_disk_info().map(|storage_system_information| {
62 | storage_system_information
63 | .filesystem
64 | .into_iter()
65 | .filter_map(|disk| {
66 | if keep_disk_entry(&disk.name, &disk.mounted_on, disk_filter, mount_filter) {
67 | Some(DiskHarvest {
68 | free_space: Some(disk.available_blocks * 1024),
69 | used_space: Some(disk.used_blocks * 1024),
70 | total_space: Some(disk.total_blocks * 1024),
71 | mount_point: disk.mounted_on,
72 | name: disk.name,
73 | })
74 | } else {
75 | None
76 | }
77 | })
78 | .collect()
79 | })?;
80 |
81 | Ok(vec_disks)
82 | }
83 |
84 | fn get_disk_info() -> io::Result {
85 | // TODO: Ideally we don't have to shell out to a new program.
86 | let output = std::process::Command::new("df")
87 | .args(["--libxo", "json", "-k", "-t", "ufs,msdosfs,zfs"])
88 | .output()?;
89 | deserialize_xo("storage-system-information", &output.stdout)
90 | }
91 |
--------------------------------------------------------------------------------
/src/collection/disks/io_counters.rs:
--------------------------------------------------------------------------------
1 | use std::ffi::OsStr;
2 |
3 | #[derive(Debug, Default)]
4 | pub struct IoCounters {
5 | name: String,
6 | read_bytes: u64,
7 | write_bytes: u64,
8 | }
9 |
10 | impl IoCounters {
11 | pub fn new(name: String, read_bytes: u64, write_bytes: u64) -> Self {
12 | Self {
13 | name,
14 | read_bytes,
15 | write_bytes,
16 | }
17 | }
18 |
19 | pub(crate) fn device_name(&self) -> &OsStr {
20 | OsStr::new(&self.name)
21 | }
22 |
23 | pub(crate) fn read_bytes(&self) -> u64 {
24 | self.read_bytes
25 | }
26 |
27 | pub(crate) fn write_bytes(&self) -> u64 {
28 | self.write_bytes
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/collection/disks/other.rs:
--------------------------------------------------------------------------------
1 | //! Fallback disk info using sysinfo.
2 |
3 | use super::{DiskHarvest, keep_disk_entry};
4 | use crate::collection::DataCollector;
5 |
6 | pub(crate) fn get_disk_usage(collector: &DataCollector) -> anyhow::Result> {
7 | let disks = &collector.sys.disks;
8 | let disk_filter = &collector.filters.disk_filter;
9 | let mount_filter = &collector.filters.mount_filter;
10 |
11 | Ok(disks
12 | .iter()
13 | .filter_map(|disk| {
14 | let name = {
15 | let name = disk.name();
16 |
17 | if name.is_empty() {
18 | "No Name".to_string()
19 | } else {
20 | name.to_os_string()
21 | .into_string()
22 | .unwrap_or_else(|_| "Name Unavailable".to_string())
23 | }
24 | };
25 |
26 | let mount_point = disk
27 | .mount_point()
28 | .as_os_str()
29 | .to_os_string()
30 | .into_string()
31 | .unwrap_or_else(|_| "Mount Unavailable".to_string());
32 |
33 | if keep_disk_entry(&name, &mount_point, disk_filter, mount_filter) {
34 | let free_space = disk.available_space();
35 | let total_space = disk.total_space();
36 | let used_space = total_space - free_space;
37 |
38 | Some(DiskHarvest {
39 | name,
40 | mount_point,
41 | free_space: Some(free_space),
42 | used_space: Some(used_space),
43 | total_space: Some(total_space),
44 | })
45 | } else {
46 | None
47 | }
48 | })
49 | .collect())
50 | }
51 |
--------------------------------------------------------------------------------
/src/collection/disks/unix.rs:
--------------------------------------------------------------------------------
1 | //! Disk stats for Unix-like systems that aren't supported through other means.
2 | //! Officially, for now, this means Linux and macOS.
3 |
4 | mod file_systems;
5 |
6 | mod usage;
7 |
8 | cfg_if::cfg_if! {
9 | if #[cfg(target_os = "linux")] {
10 | mod linux;
11 | pub use linux::*;
12 | } else if #[cfg(target_os = "macos")] {
13 | mod other;
14 | use other::*;
15 |
16 | mod macos;
17 | pub use macos::*;
18 | } else {
19 | mod other;
20 | use other::*;
21 | }
22 | }
23 |
24 | use file_systems::*;
25 | use usage::*;
26 |
27 | use super::{DiskHarvest, keep_disk_entry};
28 | use crate::collection::DataCollector;
29 |
30 | /// Returns the disk usage of the mounted (and for now, physical) disks.
31 | pub fn get_disk_usage(collector: &DataCollector) -> anyhow::Result> {
32 | let disk_filter = &collector.filters.disk_filter;
33 | let mount_filter = &collector.filters.mount_filter;
34 | let mut vec_disks: Vec = Vec::new();
35 |
36 | for partition in physical_partitions()? {
37 | let name = partition.get_device_name();
38 | let mount_point = partition.mount_point().to_string_lossy().to_string();
39 |
40 | // Precedence ordering in the case where name and mount filters disagree,
41 | // "allow" takes precedence over "deny".
42 | //
43 | // For implementation, we do this as follows:
44 | // 1. Is the entry allowed through any filter? That is, does it match an entry
45 | // in a filter where `is_list_ignored` is `false`? If so, we always keep this
46 | // entry.
47 | // 2. Is the entry denied through any filter? That is, does it match an entry in
48 | // a filter where `is_list_ignored` is `true`? If so, we always deny this
49 | // entry.
50 | // 3. Anything else is allowed.
51 |
52 | if keep_disk_entry(&name, &mount_point, disk_filter, mount_filter) {
53 | // The usage line can fail in some cases (for example, if you use Void Linux +
54 | // LUKS, see https://github.com/ClementTsang/bottom/issues/419 for details).
55 | if let Ok(usage) = partition.usage() {
56 | let total = usage.total();
57 |
58 | vec_disks.push(DiskHarvest {
59 | free_space: Some(usage.free()),
60 | used_space: Some(total - usage.available()),
61 | total_space: Some(total),
62 | mount_point,
63 | name,
64 | });
65 | } else {
66 | vec_disks.push(DiskHarvest {
67 | free_space: None,
68 | used_space: None,
69 | total_space: None,
70 | mount_point,
71 | name,
72 | });
73 | }
74 | }
75 | }
76 |
77 | Ok(vec_disks)
78 | }
79 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/linux/mod.rs:
--------------------------------------------------------------------------------
1 | mod counters;
2 | mod partition;
3 |
4 | pub use counters::*;
5 | pub(crate) use partition::*;
6 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/counters.rs:
--------------------------------------------------------------------------------
1 | //! Based on [heim's implementation](https://github.com/heim-rs/heim/blob/master/heim-disk/src/sys/macos/counters.rs).
2 |
3 | use super::io_kit::{self, get_dict, get_disks, get_i64, get_string};
4 | use crate::collection::disks::IoCounters;
5 |
6 | fn get_device_io(device: io_kit::IoObject) -> anyhow::Result {
7 | let parent = device.service_parent()?;
8 |
9 | // XXX: Re: Conform check being disabled.
10 | //
11 | // Okay, so this is weird.
12 | //
13 | // The problem is that if I have this check - this is what sources like psutil
14 | // use, for example (see https://github.com/giampaolo/psutil/blob/7eadee31db2f038763a3a6f978db1ea76bbc4674/psutil/_psutil_osx.c#LL1422C20-L1422C20)
15 | // then this will only return stuff like disk0.
16 | //
17 | // The problem with this is that there is *never* a disk0 *disk* entry to
18 | // correspond to this, so there will be entries like disk1 or whatnot.
19 | // Someone's done some digging on the gopsutil repo (https://github.com/shirou/gopsutil/issues/855#issuecomment-610016435), and it seems
20 | // like this is a consequence of how Apple does logical volumes.
21 | //
22 | // So with all that said, what I've found is that I *can* still get a mapping -
23 | // but I have to disable the conform check, which... is weird. I'm not sure
24 | // if this is valid at all. But it *does* seem to match Activity Monitor
25 | // with regards to disk activity, so... I guess we can leave this for
26 | // now...?
27 |
28 | // if !parent.conforms_to_block_storage_driver() {
29 | // anyhow::bail!("{parent:?}, the parent of {device:?} does not conform to
30 | // IOBlockStorageDriver") }
31 |
32 | let disk_props = device.properties()?;
33 | let parent_props = parent.properties()?;
34 |
35 | let name = get_string(&disk_props, "BSD Name")?;
36 | let stats = get_dict(&parent_props, "Statistics")?;
37 |
38 | let read_bytes = get_i64(&stats, "Bytes (Read)")? as u64;
39 | let write_bytes = get_i64(&stats, "Bytes (Write)")? as u64;
40 |
41 | // let read_count = stats.get_i64("Operations (Read)")? as u64;
42 | // let write_count = stats.get_i64("Operations (Write)")? as u64;
43 |
44 | Ok(IoCounters::new(name, read_bytes, write_bytes))
45 | }
46 |
47 | /// Returns an iterator of disk I/O stats. Pulls data through IOKit.
48 | pub fn io_stats() -> anyhow::Result> {
49 | Ok(get_disks()?.filter_map(|d| get_device_io(d).ok()).collect())
50 | }
51 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/io_kit.rs:
--------------------------------------------------------------------------------
1 | mod bindings;
2 | mod io_disks;
3 | mod io_iterator;
4 | mod io_object;
5 |
6 | pub use io_disks::get_disks;
7 | pub use io_iterator::*;
8 | pub use io_object::*;
9 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/io_kit/bindings.rs:
--------------------------------------------------------------------------------
1 | //! C FFI bindings for [IOKit](https://developer.apple.com/documentation/iokit/).
2 | //!
3 | //! Based on [heim](https://github.com/heim-rs/heim/blob/master/heim-common/src/sys/macos/iokit/io_master_port.rs)
4 | //! and [sysinfo's implementation](https://github.com/GuillaumeGomez/sysinfo/blob/master/src/apple/macos/ffi.rs).
5 | //!
6 | //! Ideally, we can remove this if sysinfo ever gains disk I/O capabilities.
7 |
8 | use core_foundation::{
9 | base::{CFAllocatorRef, mach_port_t},
10 | dictionary::CFMutableDictionaryRef,
11 | };
12 | use libc::c_char;
13 | use mach2::{kern_return::kern_return_t, port::MACH_PORT_NULL};
14 |
15 | #[expect(non_camel_case_types)]
16 | pub type io_object_t = mach_port_t;
17 |
18 | #[expect(non_camel_case_types)]
19 | pub type io_iterator_t = io_object_t;
20 | #[expect(non_camel_case_types)]
21 | pub type io_registry_entry_t = io_object_t;
22 |
23 | pub type IOOptionBits = u32;
24 |
25 | /// See https://github.com/1kc/librazermacos/pull/27#issuecomment-1042368531.
26 | #[expect(non_upper_case_globals)]
27 | pub const kIOMasterPortDefault: mach_port_t = MACH_PORT_NULL;
28 |
29 | #[expect(non_upper_case_globals)]
30 | pub const kIOServicePlane: &str = "IOService\0";
31 |
32 | #[expect(non_upper_case_globals)]
33 | pub const kIOMediaClass: &str = "IOMedia\0";
34 |
35 | // SAFETY: Bindings like this are inherently unsafe. See [here](https://developer.apple.com/documentation/iokit) for
36 | // more details.
37 | unsafe extern "C" {
38 |
39 | pub fn IOServiceGetMatchingServices(
40 | mainPort: mach_port_t, matching: CFMutableDictionaryRef, existing: *mut io_iterator_t,
41 | ) -> kern_return_t;
42 |
43 | pub fn IOServiceMatching(name: *const c_char) -> CFMutableDictionaryRef;
44 |
45 | pub fn IOIteratorNext(iterator: io_iterator_t) -> io_object_t;
46 |
47 | pub fn IOObjectRelease(obj: io_object_t) -> kern_return_t;
48 |
49 | pub fn IORegistryEntryGetParentEntry(
50 | entry: io_registry_entry_t, plane: *const libc::c_char, parent: *mut io_registry_entry_t,
51 | ) -> kern_return_t;
52 |
53 | // pub fn IOObjectConformsTo(object: io_object_t, className: *const
54 | // libc::c_char) -> mach2::boolean::boolean_t;
55 |
56 | pub fn IORegistryEntryCreateCFProperties(
57 | entry: io_registry_entry_t, properties: *mut CFMutableDictionaryRef,
58 | allocator: CFAllocatorRef, options: IOOptionBits,
59 | ) -> kern_return_t;
60 |
61 | }
62 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/io_kit/io_disks.rs:
--------------------------------------------------------------------------------
1 | use anyhow::bail;
2 | use mach2::kern_return;
3 |
4 | use super::{IoIterator, bindings::*};
5 |
6 | pub fn get_disks() -> anyhow::Result {
7 | let mut media_iter: io_iterator_t = 0;
8 |
9 | // SAFETY: This is a safe syscall via IOKit, all the arguments should be safe.
10 | let result = unsafe {
11 | IOServiceGetMatchingServices(
12 | kIOMasterPortDefault,
13 | IOServiceMatching(kIOMediaClass.as_ptr().cast()),
14 | &mut media_iter,
15 | )
16 | };
17 |
18 | if result == kern_return::KERN_SUCCESS {
19 | Ok(media_iter.into())
20 | } else {
21 | bail!("IOServiceGetMatchingServices failed, error code {result}");
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/io_kit/io_iterator.rs:
--------------------------------------------------------------------------------
1 | //! Based on [heim's](https://github.com/heim-rs/heim/blob/master/heim-common/src/sys/macos/iokit/io_iterator.rs).
2 | //! implementation.
3 |
4 | use std::ops::{Deref, DerefMut};
5 |
6 | use mach2::kern_return;
7 |
8 | use super::{bindings::*, io_object::IoObject};
9 |
10 | /// Safe wrapper around the IOKit `io_iterator_t` type.
11 | #[derive(Debug)]
12 | pub struct IoIterator(io_iterator_t);
13 |
14 | impl From for IoIterator {
15 | fn from(iter: io_iterator_t) -> IoIterator {
16 | IoIterator(iter)
17 | }
18 | }
19 |
20 | impl Deref for IoIterator {
21 | type Target = io_iterator_t;
22 |
23 | fn deref(&self) -> &Self::Target {
24 | &self.0
25 | }
26 | }
27 |
28 | impl DerefMut for IoIterator {
29 | fn deref_mut(&mut self) -> &mut Self::Target {
30 | &mut self.0
31 | }
32 | }
33 |
34 | impl Iterator for IoIterator {
35 | type Item = IoObject;
36 |
37 | fn next(&mut self) -> Option {
38 | // Basically, we just stop when we hit 0.
39 |
40 | // SAFETY: IOKit call, the passed argument (an `io_iterator_t`) is what is
41 | // expected.
42 | match unsafe { IOIteratorNext(self.0) } {
43 | 0 => None,
44 | io_object => Some(IoObject::from(io_object)),
45 | }
46 | }
47 | }
48 |
49 | impl Drop for IoIterator {
50 | fn drop(&mut self) {
51 | // SAFETY: IOKit call, the passed argument (an `io_iterator_t`) is what is
52 | // expected.
53 | let result = unsafe { IOObjectRelease(self.0) };
54 | assert_eq!(result, kern_return::KERN_SUCCESS);
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/macos/mod.rs:
--------------------------------------------------------------------------------
1 | mod counters;
2 | mod io_kit;
3 |
4 | pub use counters::*;
5 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/other/bindings.rs:
--------------------------------------------------------------------------------
1 | //! Based on [heim's](https://github.com/heim-rs/heim/blob/master/heim-disk/src/sys/unix/bindings/mod.rs)
2 | //! implementation.
3 |
4 | use std::io::Error;
5 |
6 | const MNT_NOWAIT: libc::c_int = 2;
7 |
8 | // SAFETY: Bindings like this are inherently unsafe.
9 | unsafe extern "C" {
10 | fn getfsstat64(buf: *mut libc::statfs, bufsize: libc::c_int, flags: libc::c_int)
11 | -> libc::c_int;
12 | }
13 |
14 | /// Returns all the mounts on the system at the moment.
15 | pub(crate) fn mounts() -> anyhow::Result> {
16 | // SAFETY: System API FFI call, arguments should be correct.
17 | let expected_len = unsafe { getfsstat64(std::ptr::null_mut(), 0, MNT_NOWAIT) };
18 |
19 | let mut mounts: Vec = Vec::with_capacity(expected_len as usize);
20 |
21 | // SAFETY: System API FFI call, arguments should be correct.
22 | let result = unsafe {
23 | getfsstat64(
24 | mounts.as_mut_ptr(),
25 | std::mem::size_of::() as libc::c_int * expected_len,
26 | MNT_NOWAIT,
27 | )
28 | };
29 |
30 | if result == -1 {
31 | Err(anyhow::Error::from(Error::last_os_error()).context("getfsstat64"))
32 | } else {
33 | debug_assert_eq!(
34 | expected_len, result,
35 | "Expected {expected_len} statfs entries, but instead got {result} entries",
36 | );
37 |
38 | // SAFETY: We have a debug assert check, and if `result` is not correct (-1), we
39 | // check against it. Otherwise, getfsstat64 should return the number of
40 | // statfs structures if it succeeded.
41 | //
42 | // Source: https://man.freebsd.org/cgi/man.cgi?query=getfsstat&sektion=2&format=html
43 | unsafe {
44 | mounts.set_len(result as usize);
45 | }
46 | Ok(mounts)
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/other/mod.rs:
--------------------------------------------------------------------------------
1 | mod bindings;
2 | mod partition;
3 |
4 | pub(crate) use partition::*;
5 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/other/partition.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | ffi::{CStr, CString},
3 | os::unix::prelude::OsStrExt,
4 | path::{Path, PathBuf},
5 | str::FromStr,
6 | };
7 |
8 | use anyhow::bail;
9 |
10 | use super::bindings;
11 | use crate::collection::disks::unix::{FileSystem, Usage};
12 |
13 | pub(crate) struct Partition {
14 | device: String,
15 | mount_point: PathBuf,
16 | fs_type: FileSystem,
17 | }
18 |
19 | impl Partition {
20 | /// Returns the mount point for this partition.
21 | #[inline]
22 | pub fn mount_point(&self) -> &Path {
23 | self.mount_point.as_path()
24 | }
25 |
26 | /// Returns the [`FileSystem`] of this partition.
27 | #[inline]
28 | pub fn fs_type(&self) -> &FileSystem {
29 | &self.fs_type
30 | }
31 |
32 | /// Returns the usage stats for this partition.
33 | pub fn usage(&self) -> anyhow::Result {
34 | let path = CString::new(self.mount_point().as_os_str().as_bytes())?;
35 | let mut vfs = std::mem::MaybeUninit::::uninit();
36 |
37 | // SAFETY: System API call. Arguments should be correct.
38 | let result = unsafe { libc::statvfs(path.as_ptr(), vfs.as_mut_ptr()) };
39 |
40 | if result == 0 {
41 | // SAFETY: We check that it succeeded (result is 0), which means vfs should be
42 | // populated.
43 | Ok(Usage::new(unsafe { vfs.assume_init() }))
44 | } else {
45 | bail!("statvfs failed to get the disk usage for disk {path:?}")
46 | }
47 | }
48 |
49 | /// Returns the device name.
50 | #[inline]
51 | pub fn get_device_name(&self) -> String {
52 | self.device.clone()
53 | }
54 | }
55 |
56 | fn partitions_iter() -> anyhow::Result> {
57 | let mounts = bindings::mounts()?;
58 |
59 | unsafe fn ptr_to_cow<'a>(ptr: *const i8) -> std::borrow::Cow<'a, str> {
60 | unsafe { CStr::from_ptr(ptr).to_string_lossy() }
61 | }
62 |
63 | Ok(mounts.into_iter().map(|stat| {
64 | // SAFETY: Should be a non-null pointer.
65 | let device = unsafe { ptr_to_cow(stat.f_mntfromname.as_ptr()).to_string() };
66 |
67 | let fs_type = {
68 | // SAFETY: Should be a non-null pointer.
69 | let fs_type_str = unsafe { ptr_to_cow(stat.f_fstypename.as_ptr()) };
70 | FileSystem::from_str(&fs_type_str).unwrap_or(FileSystem::Other(fs_type_str.to_string()))
71 | };
72 |
73 | let mount_point = {
74 | // SAFETY: Should be a non-null pointer.
75 | let path_str = unsafe { ptr_to_cow(stat.f_mntonname.as_ptr()).to_string() };
76 | PathBuf::from(path_str)
77 | };
78 |
79 | Partition {
80 | device,
81 | mount_point,
82 | fs_type,
83 | }
84 | }))
85 | }
86 |
87 | #[expect(dead_code)]
88 | /// Returns a [`Vec`] containing all partitions.
89 | pub(crate) fn partitions() -> anyhow::Result> {
90 | partitions_iter().map(|iter| iter.collect())
91 | }
92 |
93 | /// Returns a [`Vec`] containing all *physical* partitions. This is defined by
94 | /// [`FileSystem::is_physical()`].
95 | pub(crate) fn physical_partitions() -> anyhow::Result> {
96 | partitions_iter().map(|iter| {
97 | iter.filter(|partition| partition.fs_type().is_physical())
98 | .collect()
99 | })
100 | }
101 |
--------------------------------------------------------------------------------
/src/collection/disks/unix/usage.rs:
--------------------------------------------------------------------------------
1 | pub struct Usage(libc::statvfs);
2 |
3 | // Note that x86 returns `u32` values while x86-64 returns `u64`s, so we convert
4 | // everything to `u64` for consistency.
5 | #[expect(clippy::useless_conversion)]
6 | impl Usage {
7 | pub(crate) fn new(vfs: libc::statvfs) -> Self {
8 | Self(vfs)
9 | }
10 |
11 | /// Returns the total number of bytes available.
12 | pub fn total(&self) -> u64 {
13 | u64::from(self.0.f_blocks) * u64::from(self.0.f_frsize)
14 | }
15 |
16 | /// Returns the available number of bytes used. Note this is not necessarily
17 | /// the same as [`Usage::free`].
18 | pub fn available(&self) -> u64 {
19 | u64::from(self.0.f_bfree) * u64::from(self.0.f_frsize)
20 | }
21 |
22 | #[expect(dead_code)]
23 | /// Returns the total number of bytes used. Equal to `total - available` on
24 | /// Unix.
25 | pub fn used(&self) -> u64 {
26 | let avail_to_root = u64::from(self.0.f_bfree) * u64::from(self.0.f_frsize);
27 | self.total() - avail_to_root
28 | }
29 |
30 | /// Returns the total number of bytes free. Note this is not necessarily the
31 | /// same as [`Usage::available`].
32 | pub fn free(&self) -> u64 {
33 | u64::from(self.0.f_bavail) * u64::from(self.0.f_frsize)
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/collection/disks/windows.rs:
--------------------------------------------------------------------------------
1 | //! Disk stats via sysinfo.
2 |
3 | mod bindings;
4 |
5 | use bindings::*;
6 | use itertools::Itertools;
7 |
8 | use super::{DiskHarvest, keep_disk_entry};
9 | use crate::collection::{DataCollector, disks::IoCounters};
10 |
11 | /// Returns I/O stats.
12 | pub(crate) fn io_stats() -> anyhow::Result> {
13 | let volume_io = all_volume_io()?;
14 |
15 | Ok(volume_io
16 | .into_iter()
17 | .map_ok(|(performance, volume_name)| {
18 | let name = volume_name;
19 | let read_bytes = performance.BytesRead as u64;
20 | let write_bytes = performance.BytesWritten as u64;
21 |
22 | IoCounters::new(name, read_bytes, write_bytes)
23 | })
24 | .flatten()
25 | .collect::>())
26 | }
27 |
28 | pub(crate) fn get_disk_usage(collector: &DataCollector) -> anyhow::Result> {
29 | let disks = &collector.sys.disks;
30 | let disk_filter = &collector.filters.disk_filter;
31 | let mount_filter = &collector.filters.mount_filter;
32 |
33 | Ok(disks
34 | .iter()
35 | .filter_map(|disk| {
36 | let name = {
37 | let name = disk.name();
38 |
39 | if name.is_empty() {
40 | "No Name".to_string()
41 | } else {
42 | name.to_os_string()
43 | .into_string()
44 | .unwrap_or_else(|_| "Name Unavailable".to_string())
45 | }
46 | };
47 |
48 | let mount_point = disk
49 | .mount_point()
50 | .as_os_str()
51 | .to_os_string()
52 | .into_string()
53 | .unwrap_or_else(|_| "Mount Unavailable".to_string());
54 |
55 | let volume_name = volume_name_from_mount(&mount_point).ok();
56 |
57 | if keep_disk_entry(&name, &mount_point, disk_filter, mount_filter) {
58 | let free_space = disk.available_space();
59 | let total_space = disk.total_space();
60 | let used_space = total_space - free_space;
61 |
62 | Some(DiskHarvest {
63 | name,
64 | mount_point,
65 | volume_name,
66 | free_space: Some(free_space),
67 | used_space: Some(used_space),
68 | total_space: Some(total_space),
69 | })
70 | } else {
71 | None
72 | }
73 | })
74 | .collect())
75 | }
76 |
--------------------------------------------------------------------------------
/src/collection/error.rs:
--------------------------------------------------------------------------------
1 | use anyhow::anyhow;
2 |
3 | /// An error to do with data collection.
4 | #[derive(Debug)]
5 | pub enum CollectionError {
6 | /// A general error to propagate back up. A wrapper around [`anyhow::Error`].
7 | General(anyhow::Error),
8 |
9 | /// The collection is unsupported.
10 | #[allow(
11 | dead_code,
12 | reason = "this is not used if everything is supported for the platform"
13 | )]
14 | Unsupported,
15 | }
16 |
17 | impl std::fmt::Display for CollectionError {
18 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
19 | match self {
20 | CollectionError::General(err) => err.fmt(f),
21 | CollectionError::Unsupported => {
22 | write!(
23 | f,
24 | "bottom does not support this type of data collection for this platform."
25 | )
26 | }
27 | }
28 | }
29 | }
30 |
31 | impl std::error::Error for CollectionError {}
32 |
33 | /// A [`Result`] with the error type being a [`DataCollectionError`].
34 | pub(crate) type CollectionResult = Result;
35 |
36 | impl From for CollectionError {
37 | fn from(err: std::io::Error) -> Self {
38 | Self::General(err.into())
39 | }
40 | }
41 |
42 | impl From<&'static str> for CollectionError {
43 | fn from(msg: &'static str) -> Self {
44 | Self::General(anyhow!(msg))
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/src/collection/linux/utils.rs:
--------------------------------------------------------------------------------
1 | use std::{fs, path::Path};
2 |
3 | /// Whether the temperature should *actually* be read during enumeration.
4 | /// Will return false if the state is not D0/unknown, or if it does not support
5 | /// `device/power_state`.
6 | ///
7 | /// `path` is a path to the device itself (e.g. `/sys/class/hwmon/hwmon1/device`).
8 | #[inline]
9 | pub fn is_device_awake(device: &Path) -> bool {
10 | // Whether the temperature should *actually* be read during enumeration.
11 | // Set to false if the device is in ACPI D3cold.
12 | // Documented at https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-devices-power_state
13 | let power_state = device.join("power_state");
14 | if power_state.exists() {
15 | if let Ok(state) = fs::read_to_string(power_state) {
16 | let state = state.trim();
17 | // The zenpower3 kernel module (incorrectly?) reports "unknown", causing this
18 | // check to fail and temperatures to appear as zero instead of
19 | // having the file not exist.
20 | //
21 | // Their self-hosted git instance has disabled sign up, so this bug cant be
22 | // reported either.
23 | state == "D0" || state == "unknown"
24 | } else {
25 | true
26 | }
27 | } else {
28 | true
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/src/collection/memory.rs:
--------------------------------------------------------------------------------
1 | //! Memory data collection.
2 |
3 | use std::num::NonZeroU64;
4 |
5 | #[cfg(not(target_os = "windows"))]
6 | pub(crate) use self::sysinfo::get_cache_usage;
7 | pub(crate) use self::sysinfo::{get_ram_usage, get_swap_usage};
8 |
9 | pub mod sysinfo;
10 |
11 | // cfg_if::cfg_if! {
12 | // if #[cfg(target_os = "windows")] {
13 | // mod windows;
14 | // pub(crate) use self::windows::get_committed_usage;
15 | // }
16 | // }
17 |
18 | #[cfg(feature = "zfs")]
19 | pub mod arc;
20 |
21 | #[derive(Debug, Clone)]
22 | pub struct MemData {
23 | pub used_bytes: u64,
24 | pub total_bytes: NonZeroU64,
25 | }
26 |
27 | impl MemData {
28 | /// Return the use percentage.
29 | #[inline]
30 | pub fn percentage(&self) -> f64 {
31 | let used = self.used_bytes as f64;
32 | let total = self.total_bytes.get() as f64;
33 |
34 | used / total * 100.0
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/collection/memory/arc.rs:
--------------------------------------------------------------------------------
1 | use super::MemData;
2 |
3 | /// Return ARC usage.
4 | #[cfg(feature = "zfs")]
5 | pub(crate) fn get_arc_usage() -> Option {
6 | use std::num::NonZeroU64;
7 |
8 | let (mem_total, mem_used) = {
9 | cfg_if::cfg_if! {
10 | if #[cfg(target_os = "linux")] {
11 | // TODO: [OPT] is this efficient?
12 | use std::fs::read_to_string;
13 | if let Ok(arc_stats) = read_to_string("/proc/spl/kstat/zfs/arcstats") {
14 | let mut mem_arc = 0;
15 | let mut mem_total = 0;
16 | let mut zfs_keys_read: u8 = 0;
17 | const ZFS_KEYS_NEEDED: u8 = 2;
18 |
19 | for line in arc_stats.lines() {
20 | if let Some((label, value)) = line.split_once(' ') {
21 | let to_write = match label {
22 | "size" => &mut mem_arc,
23 | "c_max" => &mut mem_total,
24 | _ => {
25 | continue;
26 | }
27 | };
28 |
29 | if let Some((_type, number)) = value.trim_start().rsplit_once(' ') {
30 | // Parse the value, remember it's in bytes!
31 | if let Ok(number) = number.parse::() {
32 | *to_write = number;
33 | // We only need a few keys, so we can bail early.
34 | zfs_keys_read += 1;
35 | if zfs_keys_read == ZFS_KEYS_NEEDED {
36 | break;
37 | }
38 | }
39 | }
40 | }
41 | }
42 | (mem_total, mem_arc)
43 | } else {
44 | (0, 0)
45 | }
46 | } else if #[cfg(target_os = "freebsd")] {
47 | use sysctl::Sysctl;
48 | if let (Ok(mem_arc_value), Ok(mem_sys_value)) = (
49 | sysctl::Ctl::new("kstat.zfs.misc.arcstats.size"),
50 | sysctl::Ctl::new("kstat.zfs.misc.arcstats.c_max"),
51 | ) {
52 | if let (Ok(sysctl::CtlValue::U64(arc)), Ok(sysctl::CtlValue::Ulong(mem))) =
53 | (mem_arc_value.value(), mem_sys_value.value())
54 | {
55 | (mem, arc)
56 | } else {
57 | (0, 0)
58 | }
59 | } else {
60 | (0, 0)
61 | }
62 | } else {
63 | (0, 0)
64 | }
65 | }
66 | };
67 |
68 | NonZeroU64::new(mem_total).map(|total_bytes| MemData {
69 | total_bytes,
70 | used_bytes: mem_used,
71 | })
72 | }
73 |
--------------------------------------------------------------------------------
/src/collection/memory/sysinfo.rs:
--------------------------------------------------------------------------------
1 | //! Collecting memory data using sysinfo.
2 |
3 | use std::num::NonZeroU64;
4 |
5 | use sysinfo::System;
6 |
7 | use crate::collection::memory::MemData;
8 |
9 | #[inline]
10 | fn get_usage(used: u64, total: u64) -> Option {
11 | NonZeroU64::new(total).map(|total_bytes| MemData {
12 | total_bytes,
13 | used_bytes: used,
14 | })
15 | }
16 |
17 | /// Returns RAM usage.
18 | pub(crate) fn get_ram_usage(sys: &System) -> Option {
19 | get_usage(sys.used_memory(), sys.total_memory())
20 | }
21 |
22 | /// Returns SWAP usage.
23 | pub(crate) fn get_swap_usage(sys: &System) -> Option {
24 | get_usage(sys.used_swap(), sys.total_swap())
25 | }
26 |
27 | /// Returns cache usage. sysinfo has no way to do this directly but it should
28 | /// equal the difference between the available and free memory. Free memory is
29 | /// defined as memory not containing any data, which means cache and buffer
30 | /// memory are not "free". Available memory is defined as memory able
31 | /// to be allocated by processes, which includes cache and buffer memory. On
32 | /// Windows, this will always be 0. For more information, see [docs](https://docs.rs/sysinfo/latest/sysinfo/struct.System.html#method.available_memory)
33 | /// and [memory explanation](https://askubuntu.com/questions/867068/what-is-available-memory-while-using-free-command)
34 | #[cfg(not(target_os = "windows"))]
35 | pub(crate) fn get_cache_usage(sys: &System) -> Option {
36 | let mem_used = sys.available_memory().saturating_sub(sys.free_memory());
37 | let mem_total = sys.total_memory();
38 |
39 | get_usage(mem_used, mem_total)
40 | }
41 |
--------------------------------------------------------------------------------
/src/collection/memory/windows.rs:
--------------------------------------------------------------------------------
1 | use std::mem::{size_of, zeroed};
2 |
3 | use windows::Win32::System::ProcessStatus::{GetPerformanceInfo, PERFORMANCE_INFORMATION};
4 |
5 | use crate::collection::memory::MemHarvest;
6 |
7 | const PERFORMANCE_INFORMATION_SIZE: u32 = size_of::() as _;
8 |
9 | /// Get the committed memory usage.
10 | ///
11 | /// Code based on [sysinfo's](https://github.com/GuillaumeGomez/sysinfo/blob/6f8178495adcf3ca4696a9ec548586cf6a621bc8/src/windows/system.rs#L169).
12 | pub(crate) fn get_committed_usage() -> Option {
13 | // SAFETY: The safety invariant is that we only touch what's in `perf_info` if it succeeds, and that
14 | // the bindings are "safe" to use with how we call them.
15 | unsafe {
16 | let mut perf_info: PERFORMANCE_INFORMATION = zeroed();
17 | if GetPerformanceInfo(&mut perf_info, PERFORMANCE_INFORMATION_SIZE).is_ok() {
18 | let page_size = perf_info.PageSize;
19 |
20 | let committed_total = page_size.saturating_mul(perf_info.CommitLimit) as u64;
21 | let committed_used = page_size.saturating_mul(perf_info.CommitTotal) as u64;
22 |
23 | Some(MemHarvest {
24 | used_bytes: committed_used,
25 | total_bytes: committed_total,
26 | use_percent: Some(committed_used as f64 / committed_total as f64 * 100.0),
27 | })
28 | } else {
29 | None
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/collection/network.rs:
--------------------------------------------------------------------------------
1 | //! Data collection for network usage/IO.
2 |
3 | pub mod sysinfo;
4 | pub use self::sysinfo::*;
5 |
6 | #[derive(Default, Clone, Debug)]
7 | /// All units in bits.
8 | pub struct NetworkHarvest {
9 | pub rx: u64,
10 | pub tx: u64,
11 | pub total_rx: u64,
12 | pub total_tx: u64,
13 | }
14 |
15 | impl NetworkHarvest {
16 | pub fn first_run_cleanup(&mut self) {
17 | self.rx = 0;
18 | self.tx = 0;
19 | }
20 | }
21 |
--------------------------------------------------------------------------------
/src/collection/network/sysinfo.rs:
--------------------------------------------------------------------------------
1 | //! Gets network data via sysinfo.
2 |
3 | use std::time::Instant;
4 |
5 | use sysinfo::Networks;
6 |
7 | use super::NetworkHarvest;
8 | use crate::app::filter::Filter;
9 |
10 | // TODO: Eventually make it so that this thing also takes individual usage into
11 | // account, so we can show per-interface!
12 | pub fn get_network_data(
13 | networks: &Networks, prev_net_access_time: Instant, prev_net_rx: &mut u64,
14 | prev_net_tx: &mut u64, curr_time: Instant, filter: &Option,
15 | ) -> NetworkHarvest {
16 | let mut total_rx: u64 = 0;
17 | let mut total_tx: u64 = 0;
18 |
19 | for (name, network) in networks {
20 | let to_keep = if let Some(filter) = filter {
21 | filter.should_keep(name)
22 | } else {
23 | true
24 | };
25 |
26 | if to_keep {
27 | total_rx += network.total_received() * 8;
28 | total_tx += network.total_transmitted() * 8;
29 | }
30 | }
31 |
32 | let elapsed_time = curr_time.duration_since(prev_net_access_time).as_secs_f64();
33 |
34 | let (rx, tx) = if elapsed_time == 0.0 {
35 | (0, 0)
36 | } else {
37 | (
38 | ((total_rx.saturating_sub(*prev_net_rx)) as f64 / elapsed_time) as u64,
39 | ((total_tx.saturating_sub(*prev_net_tx)) as f64 / elapsed_time) as u64,
40 | )
41 | };
42 |
43 | *prev_net_rx = total_rx;
44 | *prev_net_tx = total_tx;
45 | NetworkHarvest {
46 | rx,
47 | tx,
48 | total_rx,
49 | total_tx,
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/src/collection/processes/freebsd.rs:
--------------------------------------------------------------------------------
1 | //! Process data collection for FreeBSD. Uses sysinfo.
2 |
3 | use std::{io, process::Command};
4 |
5 | use hashbrown::HashMap;
6 | use serde::{Deserialize, Deserializer};
7 |
8 | use crate::collection::{Pid, deserialize_xo, processes::UnixProcessExt};
9 |
10 | #[derive(Deserialize, Debug, Default)]
11 | #[serde(rename_all = "kebab-case")]
12 | struct ProcessInformation {
13 | process: Vec,
14 | }
15 |
16 | #[derive(Deserialize, Debug)]
17 | #[serde(rename_all = "kebab-case")]
18 | struct ProcessRow {
19 | #[serde(deserialize_with = "pid")]
20 | pid: i32,
21 | #[serde(deserialize_with = "percent_cpu")]
22 | percent_cpu: f64,
23 | }
24 |
25 | pub(crate) struct FreeBSDProcessExt;
26 |
27 | impl UnixProcessExt for FreeBSDProcessExt {
28 | #[inline]
29 | fn has_backup_proc_cpu_fn() -> bool {
30 | true
31 | }
32 |
33 | fn backup_proc_cpu(pids: &[Pid]) -> io::Result> {
34 | if pids.is_empty() {
35 | return Ok(HashMap::new());
36 | }
37 |
38 | let output = Command::new("ps")
39 | .args(["--libxo", "json", "-o", "pid,pcpu", "-p"])
40 | .args(pids.iter().map(i32::to_string))
41 | .output()?;
42 |
43 | deserialize_xo("process-information", &output.stdout).map(
44 | |process_info: ProcessInformation| {
45 | process_info
46 | .process
47 | .into_iter()
48 | .map(|row| (row.pid, row.percent_cpu))
49 | .collect()
50 | },
51 | )
52 | }
53 | }
54 |
55 | fn pid<'de, D>(deserializer: D) -> Result
56 | where
57 | D: Deserializer<'de>,
58 | {
59 | let s = String::deserialize(deserializer)?;
60 | s.parse().map_err(serde::de::Error::custom)
61 | }
62 |
63 | fn percent_cpu<'de, D>(deserializer: D) -> Result
64 | where
65 | D: Deserializer<'de>,
66 | {
67 | let s = String::deserialize(deserializer)?;
68 | s.parse().map_err(serde::de::Error::custom)
69 | }
70 |
--------------------------------------------------------------------------------
/src/collection/processes/macos.rs:
--------------------------------------------------------------------------------
1 | //! Process data collection for macOS. Uses sysinfo and custom bindings.
2 |
3 | mod sysctl_bindings;
4 |
5 | use std::{io, process::Command};
6 |
7 | use hashbrown::HashMap;
8 | use itertools::Itertools;
9 |
10 | use super::UnixProcessExt;
11 | use crate::collection::Pid;
12 |
13 | pub(crate) struct MacOSProcessExt;
14 |
15 | impl UnixProcessExt for MacOSProcessExt {
16 | #[inline]
17 | fn has_backup_proc_cpu_fn() -> bool {
18 | true
19 | }
20 |
21 | fn backup_proc_cpu(pids: &[Pid]) -> io::Result> {
22 | let output = Command::new("ps")
23 | .args(["-o", "pid=,pcpu=", "-p"])
24 | .arg(
25 | // Has to look like this since otherwise, it you hit a `unstable_name_collisions`
26 | // warning.
27 | Itertools::intersperse(pids.iter().map(i32::to_string), ",".to_string())
28 | .collect::(),
29 | )
30 | .output()?;
31 | let mut result = HashMap::new();
32 | String::from_utf8_lossy(&output.stdout)
33 | .split_whitespace()
34 | .chunks(2)
35 | .into_iter()
36 | .for_each(|chunk| {
37 | let chunk: Vec<&str> = chunk.collect();
38 | if chunk.len() != 2 {
39 | panic!("Unexpected 'ps' output");
40 | }
41 | let pid = chunk[0].parse();
42 | let usage = chunk[1].parse();
43 | if let (Ok(pid), Ok(usage)) = (pid, usage) {
44 | result.insert(pid, usage);
45 | }
46 | });
47 | Ok(result)
48 | }
49 |
50 | fn parent_pid(process_val: &sysinfo::Process) -> Option {
51 | process_val
52 | .parent()
53 | .map(|p| p.as_u32() as _)
54 | .or_else(|| fallback_macos_ppid(process_val.pid().as_u32() as _))
55 | }
56 | }
57 |
58 | fn fallback_macos_ppid(pid: Pid) -> Option {
59 | sysctl_bindings::kinfo_process(pid)
60 | .map(|kinfo| kinfo.kp_eproc.e_ppid)
61 | .ok()
62 | }
63 |
--------------------------------------------------------------------------------
/src/collection/processes/unix.rs:
--------------------------------------------------------------------------------
1 | //! Unix-specific parts of process collection.
2 |
3 | mod user_table;
4 |
5 | use cfg_if::cfg_if;
6 | pub use user_table::*;
7 |
8 | cfg_if! {
9 | if #[cfg(all(target_family = "unix", not(target_os = "linux")))] {
10 | mod process_ext;
11 | pub(crate) use process_ext::*;
12 |
13 | use super::ProcessHarvest;
14 |
15 | use crate::collection::{DataCollector, processes::*};
16 | use crate::collection::error::CollectionResult;
17 |
18 | pub fn sysinfo_process_data(collector: &mut DataCollector) -> CollectionResult> {
19 | let sys = &collector.sys.system;
20 | let use_current_cpu_total = collector.use_current_cpu_total;
21 | let unnormalized_cpu = collector.unnormalized_cpu;
22 | let total_memory = collector.total_memory();
23 | let user_table = &mut collector.user_table;
24 |
25 | cfg_if! {
26 | if #[cfg(target_os = "macos")] {
27 | MacOSProcessExt::sysinfo_process_data(sys, use_current_cpu_total, unnormalized_cpu, total_memory, user_table)
28 | } else if #[cfg(target_os = "freebsd")] {
29 | FreeBSDProcessExt::sysinfo_process_data(sys, use_current_cpu_total, unnormalized_cpu, total_memory, user_table)
30 | } else {
31 | GenericProcessExt::sysinfo_process_data(sys, use_current_cpu_total, unnormalized_cpu, total_memory, user_table)
32 | }
33 | }
34 | }
35 |
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/collection/processes/unix/user_table.rs:
--------------------------------------------------------------------------------
1 | use hashbrown::HashMap;
2 |
3 | use crate::collection::error::{CollectionError, CollectionResult};
4 |
5 | #[derive(Debug, Default)]
6 | pub struct UserTable {
7 | pub uid_user_mapping: HashMap,
8 | }
9 |
10 | impl UserTable {
11 | pub fn get_uid_to_username_mapping(&mut self, uid: libc::uid_t) -> CollectionResult {
12 | if let Some(user) = self.uid_user_mapping.get(&uid) {
13 | Ok(user.clone())
14 | } else {
15 | // SAFETY: getpwuid returns a null pointer if no passwd entry is found for the uid.
16 | let passwd = unsafe { libc::getpwuid(uid) };
17 |
18 | if passwd.is_null() {
19 | Err("passwd is inaccessible".into())
20 | } else {
21 | // SAFETY: We return early if passwd is null.
22 | let username = unsafe { std::ffi::CStr::from_ptr((*passwd).pw_name) }
23 | .to_str()
24 | .map_err(|err| CollectionError::General(err.into()))?
25 | .to_string();
26 | self.uid_user_mapping.insert(uid, username.clone());
27 |
28 | Ok(username)
29 | }
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/src/collection/temperature.rs:
--------------------------------------------------------------------------------
1 | //! Data collection for temperature metrics.
2 | //!
3 | //! For Linux, this is handled by custom code.
4 | //! For everything else, this is handled by sysinfo.
5 |
6 | cfg_if::cfg_if! {
7 | if #[cfg(target_os = "linux")] {
8 | pub mod linux;
9 | pub use self::linux::*;
10 | } else if #[cfg(any(target_os = "freebsd", target_os = "macos", target_os = "windows", target_os = "android", target_os = "ios"))] {
11 | pub mod sysinfo;
12 | pub use self::sysinfo::*;
13 | }
14 | }
15 |
16 | #[derive(Default, Debug, Clone)]
17 | pub struct TempSensorData {
18 | /// The name of the sensor.
19 | pub name: String,
20 |
21 | /// The temperature in Celsius.
22 | pub temperature: Option,
23 | }
24 |
--------------------------------------------------------------------------------
/src/collection/temperature/sysinfo.rs:
--------------------------------------------------------------------------------
1 | //! Gets temperature data via sysinfo.
2 |
3 | use anyhow::Result;
4 |
5 | use super::TempSensorData;
6 | use crate::app::filter::Filter;
7 |
8 | pub fn get_temperature_data(
9 | components: &sysinfo::Components, filter: &Option,
10 | ) -> Result