├── .github └── workflows │ └── CICD.yml ├── .gitignore ├── .pre-commit-config.yaml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── build.rs ├── ci ├── before_deploy.ps1 ├── before_deploy.sh ├── how2publish.txt ├── install.sh └── script.sh ├── completions ├── _dust ├── _dust.ps1 ├── dust.bash ├── dust.elv └── dust.fish ├── config └── config.toml ├── man-page └── dust.1 ├── media └── snap.png ├── src ├── cli.rs ├── config.rs ├── dir_walker.rs ├── display.rs ├── display_node.rs ├── filter.rs ├── filter_type.rs ├── main.rs ├── node.rs ├── platform.rs ├── progress.rs └── utils.rs └── tests ├── test_dir └── many │ ├── a_file │ └── hello_file ├── test_dir2 ├── dir │ └── hello ├── dir_name_clash ├── dir_substring │ └── hello └── long_dir_name_what_a_very_long_dir_name_what_happens_when_this_goes_over_80_characters_i_wonder ├── test_dir_hidden_entries ├── .hidden_file └── .secret ├── test_dir_matching ├── andy │ └── dup_name │ │ └── hello └── dave │ └── dup_name │ └── hello ├── test_dir_unicode ├── ラウトは難しいです!.japan └── 👩.unicode ├── test_exact_output.rs ├── test_flags.rs ├── tests.rs └── tests_symlinks.rs /.github/workflows/CICD.yml: -------------------------------------------------------------------------------- 1 | name: CICD 2 | 3 | # spell-checker:ignore CICD CODECOV MSVC MacOS Peltoche SHAs buildable clippy esac fakeroot gnueabihf halium libssl mkdir musl popd printf pushd rustfmt softprops toolchain 4 | 5 | env: 6 | PROJECT_NAME: dust 7 | PROJECT_DESC: "du + rust = dust" 8 | PROJECT_AUTH: "bootandy" 9 | RUST_MIN_SRV: "1.31.0" 10 | 11 | on: [push, pull_request] 12 | 13 | jobs: 14 | style: 15 | name: Style 16 | runs-on: ${{ matrix.job.os }} 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | job: 21 | - { os: ubuntu-latest } 22 | - { os: macos-latest } 23 | - { os: windows-latest } 24 | steps: 25 | - uses: actions/checkout@v1 26 | - name: Initialize workflow variables 27 | id: vars 28 | shell: bash 29 | run: | 30 | # 'windows-latest' `cargo fmt` is bugged for this project (see reasons @ GH:rust-lang/rustfmt #3324, #3590, #3688 ; waiting for repair) 31 | JOB_DO_FORMAT_TESTING="true" 32 | case ${{ matrix.job.os }} in windows-latest) unset JOB_DO_FORMAT_TESTING ;; esac; 33 | echo set-output name=JOB_DO_FORMAT_TESTING::${JOB_DO_FORMAT_TESTING:-/false} 34 | echo ::set-output name=JOB_DO_FORMAT_TESTING::${JOB_DO_FORMAT_TESTING} 35 | # target-specific options 36 | # * CARGO_FEATURES_OPTION 37 | CARGO_FEATURES_OPTION='' ; 38 | if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features "${{ matrix.job.features }}"' ; fi 39 | echo set-output name=CARGO_FEATURES_OPTION::${CARGO_FEATURES_OPTION} 40 | echo ::set-output name=CARGO_FEATURES_OPTION::${CARGO_FEATURES_OPTION} 41 | - name: Install `rust` toolchain 42 | uses: actions-rs/toolchain@v1 43 | with: 44 | toolchain: stable 45 | override: true 46 | profile: minimal # minimal component installation (ie, no documentation) 47 | components: rustfmt, clippy 48 | - name: Install wget for Windows 49 | if: matrix.job.os == 'windows-latest' 50 | run: choco install wget --no-progress 51 | - name: typos-action 52 | uses: crate-ci/typos@v1.28.4 53 | - name: "`fmt` testing" 54 | if: steps.vars.outputs.JOB_DO_FORMAT_TESTING 55 | uses: actions-rs/cargo@v1 56 | with: 57 | command: fmt 58 | args: --all -- --check 59 | - name: "`clippy` testing" 60 | if: success() || failure() # run regardless of prior step ("`fmt` testing") success/failure 61 | uses: actions-rs/cargo@v1 62 | with: 63 | command: clippy 64 | args: ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} -- -D warnings 65 | 66 | min_version: 67 | name: MinSRV # Minimum supported rust version 68 | runs-on: ubuntu-latest 69 | steps: 70 | - uses: actions/checkout@v1 71 | - name: Install `rust` toolchain (v${{ env.RUST_MIN_SRV }}) 72 | uses: actions-rs/toolchain@v1 73 | with: 74 | toolchain: ${{ env.RUST_MIN_SRV }} 75 | profile: minimal # minimal component installation (ie, no documentation) 76 | - name: Test 77 | uses: actions-rs/cargo@v1 78 | with: 79 | command: test 80 | 81 | build: 82 | name: Build 83 | runs-on: ${{ matrix.job.os }} 84 | strategy: 85 | fail-fast: false 86 | matrix: 87 | job: 88 | # { os, target, cargo-options, features, use-cross, toolchain } 89 | - { 90 | os: ubuntu-latest, 91 | target: aarch64-unknown-linux-gnu, 92 | use-cross: use-cross, 93 | } 94 | - { 95 | os: ubuntu-latest, 96 | target: aarch64-unknown-linux-musl, 97 | use-cross: use-cross, 98 | } 99 | - { 100 | os: ubuntu-latest, 101 | target: arm-unknown-linux-gnueabihf, 102 | use-cross: use-cross, 103 | } 104 | - { 105 | os: ubuntu-latest, 106 | target: arm-unknown-linux-musleabi, 107 | use-cross: use-cross, 108 | } 109 | - { 110 | os: ubuntu-latest, 111 | target: i686-unknown-linux-gnu, 112 | use-cross: use-cross, 113 | } 114 | - { 115 | os: ubuntu-latest, 116 | target: i686-unknown-linux-musl, 117 | use-cross: use-cross, 118 | } 119 | - { 120 | os: ubuntu-latest, 121 | target: x86_64-unknown-linux-gnu, 122 | use-cross: use-cross, 123 | } 124 | - { 125 | os: ubuntu-latest, 126 | target: x86_64-unknown-linux-musl, 127 | use-cross: use-cross, 128 | } 129 | - { os: macos-latest, target: x86_64-apple-darwin } 130 | - { os: windows-latest, target: i686-pc-windows-gnu } 131 | - { os: windows-latest, target: i686-pc-windows-msvc } 132 | - { os: windows-latest, target: x86_64-pc-windows-gnu } ## !maint: [rivy; 2020-01-21] may break due to rust bug; follow possible solution from GH:rust-lang/rust#47048 (refs: GH:rust-lang/rust#47048 , GH:rust-lang/rust#53454 , GH:bike-barn/hermit#172 ) 133 | - { os: windows-latest, target: x86_64-pc-windows-msvc } 134 | steps: 135 | - uses: actions/checkout@v1 136 | - name: Install any prerequisites 137 | shell: bash 138 | run: | 139 | case ${{ matrix.job.target }} in 140 | arm-unknown-linux-gnueabihf) sudo apt-get -y update ; sudo apt-get -y install gcc-arm-linux-gnueabihf ;; 141 | aarch64-unknown-linux-gnu) sudo apt-get -y update ; sudo apt-get -y install binutils-aarch64-linux-gnu ;; 142 | esac 143 | - name: Initialize workflow variables 144 | id: vars 145 | shell: bash 146 | run: | 147 | # toolchain 148 | TOOLCHAIN="stable" ## default to "stable" toolchain 149 | # * specify alternate TOOLCHAIN for *-pc-windows-gnu targets; gnu targets on Windows are broken for the standard *-pc-windows-msvc toolchain (refs: , , ) 150 | case ${{ matrix.job.target }} in *-pc-windows-gnu) TOOLCHAIN="stable-${{ matrix.job.target }}" ;; esac; 151 | # * use requested TOOLCHAIN if specified 152 | if [ -n "${{ matrix.job.toolchain }}" ]; then TOOLCHAIN="${{ matrix.job.toolchain }}" ; fi 153 | echo set-output name=TOOLCHAIN::${TOOLCHAIN} 154 | echo ::set-output name=TOOLCHAIN::${TOOLCHAIN} 155 | # staging directory 156 | STAGING='_staging' 157 | echo set-output name=STAGING::${STAGING} 158 | echo ::set-output name=STAGING::${STAGING} 159 | # determine EXE suffix 160 | EXE_suffix="" ; case ${{ matrix.job.target }} in *-pc-windows-*) EXE_suffix=".exe" ;; esac; 161 | echo set-output name=EXE_suffix::${EXE_suffix} 162 | echo ::set-output name=EXE_suffix::${EXE_suffix} 163 | # parse commit reference info 164 | REF_NAME=${GITHUB_REF#refs/*/} 165 | unset REF_BRANCH ; case ${GITHUB_REF} in refs/heads/*) REF_BRANCH=${GITHUB_REF#refs/heads/} ;; esac; 166 | unset REF_TAG ; case ${GITHUB_REF} in refs/tags/*) REF_TAG=${GITHUB_REF#refs/tags/} ;; esac; 167 | REF_SHAS=${GITHUB_SHA:0:8} 168 | echo set-output name=REF_NAME::${REF_NAME} 169 | echo set-output name=REF_BRANCH::${REF_BRANCH} 170 | echo set-output name=REF_TAG::${REF_TAG} 171 | echo set-output name=REF_SHAS::${REF_SHAS} 172 | echo ::set-output name=REF_NAME::${REF_NAME} 173 | echo ::set-output name=REF_BRANCH::${REF_BRANCH} 174 | echo ::set-output name=REF_TAG::${REF_TAG} 175 | echo ::set-output name=REF_SHAS::${REF_SHAS} 176 | # parse target 177 | unset TARGET_ARCH ; case ${{ matrix.job.target }} in arm-unknown-linux-gnueabihf) TARGET_ARCH=arm ;; aarch-*) TARGET_ARCH=aarch64 ;; i686-*) TARGET_ARCH=i686 ;; x86_64-*) TARGET_ARCH=x86_64 ;; esac; 178 | echo set-output name=TARGET_ARCH::${TARGET_ARCH} 179 | echo ::set-output name=TARGET_ARCH::${TARGET_ARCH} 180 | unset TARGET_OS ; case ${{ matrix.job.target }} in *-linux-*) TARGET_OS=linux ;; *-apple-*) TARGET_OS=macos ;; *-windows-*) TARGET_OS=windows ;; esac; 181 | echo set-output name=TARGET_OS::${TARGET_OS} 182 | echo ::set-output name=TARGET_OS::${TARGET_OS} 183 | # package name 184 | PKG_suffix=".tar.gz" ; case ${{ matrix.job.target }} in *-pc-windows-*) PKG_suffix=".zip" ;; esac; 185 | PKG_BASENAME=${PROJECT_NAME}-${REF_TAG:-$REF_SHAS}-${{ matrix.job.target }} 186 | PKG_NAME=${PKG_BASENAME}${PKG_suffix} 187 | echo set-output name=PKG_suffix::${PKG_suffix} 188 | echo set-output name=PKG_BASENAME::${PKG_BASENAME} 189 | echo set-output name=PKG_NAME::${PKG_NAME} 190 | echo ::set-output name=PKG_suffix::${PKG_suffix} 191 | echo ::set-output name=PKG_BASENAME::${PKG_BASENAME} 192 | echo ::set-output name=PKG_NAME::${PKG_NAME} 193 | # deployable tag? (ie, leading "vM" or "M"; M == version number) 194 | unset DEPLOY ; if [[ $REF_TAG =~ ^[vV]?[0-9].* ]]; then DEPLOY='true' ; fi 195 | echo set-output name=DEPLOY::${DEPLOY:-/false} 196 | echo ::set-output name=DEPLOY::${DEPLOY} 197 | # target-specific options 198 | # * CARGO_FEATURES_OPTION 199 | CARGO_FEATURES_OPTION='' ; 200 | if [ -n "${{ matrix.job.features }}" ]; then CARGO_FEATURES_OPTION='--features "${{ matrix.job.features }}"' ; fi 201 | echo set-output name=CARGO_FEATURES_OPTION::${CARGO_FEATURES_OPTION} 202 | echo ::set-output name=CARGO_FEATURES_OPTION::${CARGO_FEATURES_OPTION} 203 | # * CARGO_USE_CROSS (truthy) 204 | CARGO_USE_CROSS='true' ; case '${{ matrix.job.use-cross }}' in ''|0|f|false|n|no) unset CARGO_USE_CROSS ;; esac; 205 | echo set-output name=CARGO_USE_CROSS::${CARGO_USE_CROSS:-/false} 206 | echo ::set-output name=CARGO_USE_CROSS::${CARGO_USE_CROSS} 207 | # # * `arm` cannot be tested on ubuntu-* hosts (b/c testing is currently primarily done via comparison of target outputs with built-in outputs and the `arm` target is not executable on the host) 208 | JOB_DO_TESTING="true" 209 | case ${{ matrix.job.target }} in arm-*|aarch64-*) unset JOB_DO_TESTING ;; esac; 210 | echo set-output name=JOB_DO_TESTING::${JOB_DO_TESTING:-/false} 211 | echo ::set-output name=JOB_DO_TESTING::${JOB_DO_TESTING} 212 | # # * test only binary for arm-type targets 213 | unset CARGO_TEST_OPTIONS 214 | unset CARGO_TEST_OPTIONS ; case ${{ matrix.job.target }} in arm-*|aarch64-*) CARGO_TEST_OPTIONS="--bin ${PROJECT_NAME}" ;; esac; 215 | echo set-output name=CARGO_TEST_OPTIONS::${CARGO_TEST_OPTIONS} 216 | echo ::set-output name=CARGO_TEST_OPTIONS::${CARGO_TEST_OPTIONS} 217 | # * strip executable? 218 | STRIP="strip" ; case ${{ matrix.job.target }} in arm-unknown-linux-gnueabihf) STRIP="arm-linux-gnueabihf-strip" ;; *-pc-windows-msvc) STRIP="" ;; aarch64-unknown-linux-gnu) STRIP="aarch64-linux-gnu-strip" ;; aarch64-unknown-linux-musl) STRIP="" ;; armv7-unknown-linux-musleabi) STRIP="" ;; arm-unknown-linux-musleabi) STRIP="" ;; esac; 219 | 220 | 221 | echo set-output name=STRIP::${STRIP} 222 | echo ::set-output name=STRIP::${STRIP} 223 | - name: Create all needed build/work directories 224 | shell: bash 225 | run: | 226 | mkdir -p '${{ steps.vars.outputs.STAGING }}' 227 | mkdir -p '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}' 228 | - name: rust toolchain ~ install 229 | uses: actions-rs/toolchain@v1 230 | with: 231 | toolchain: ${{ steps.vars.outputs.TOOLCHAIN }} 232 | target: ${{ matrix.job.target }} 233 | override: true 234 | profile: minimal # minimal component installation (ie, no documentation) 235 | - name: Info 236 | shell: bash 237 | run: | 238 | gcc --version || true 239 | rustup -V 240 | rustup toolchain list 241 | rustup default 242 | cargo -V 243 | rustc -V 244 | - name: Build 245 | uses: actions-rs/cargo@v1 246 | with: 247 | use-cross: ${{ steps.vars.outputs.CARGO_USE_CROSS }} 248 | command: build 249 | args: --release --target=${{ matrix.job.target }} ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} 250 | - name: Install cargo-deb 251 | uses: actions-rs/cargo@v1 252 | with: 253 | command: install 254 | args: cargo-deb 255 | if: matrix.job.target == 'i686-unknown-linux-musl' || matrix.job.target == 'x86_64-unknown-linux-musl' 256 | - name: Build deb 257 | uses: actions-rs/cargo@v1 258 | with: 259 | command: deb 260 | args: --no-build --target=${{ matrix.job.target }} 261 | if: matrix.job.target == 'i686-unknown-linux-musl' || matrix.job.target == 'x86_64-unknown-linux-musl' 262 | - name: Test 263 | uses: actions-rs/cargo@v1 264 | with: 265 | use-cross: ${{ steps.vars.outputs.CARGO_USE_CROSS }} 266 | command: test 267 | args: --target=${{ matrix.job.target }} ${{ steps.vars.outputs.CARGO_TEST_OPTIONS}} ${{ matrix.job.cargo-options }} ${{ steps.vars.outputs.CARGO_FEATURES_OPTION }} 268 | - name: Archive executable artifacts 269 | uses: actions/upload-artifact@master 270 | with: 271 | name: ${{ env.PROJECT_NAME }}-${{ matrix.job.target }} 272 | path: target/${{ matrix.job.target }}/release/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }} 273 | - name: Archive deb artifacts 274 | uses: actions/upload-artifact@master 275 | with: 276 | name: ${{ env.PROJECT_NAME }}-${{ matrix.job.target }}.deb 277 | path: target/${{ matrix.job.target }}/debian 278 | if: matrix.job.target == 'i686-unknown-linux-musl' || matrix.job.target == 'x86_64-unknown-linux-musl' 279 | - name: Package 280 | shell: bash 281 | run: | 282 | # binary 283 | cp 'target/${{ matrix.job.target }}/release/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}' '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' 284 | # `strip` binary (if needed) 285 | if [ -n "${{ steps.vars.outputs.STRIP }}" ]; then "${{ steps.vars.outputs.STRIP }}" '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/${{ env.PROJECT_NAME }}${{ steps.vars.outputs.EXE_suffix }}' ; fi 286 | # README and LICENSE 287 | cp README.md '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' 288 | cp LICENSE '${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_BASENAME }}/' 289 | # base compressed package 290 | pushd '${{ steps.vars.outputs.STAGING }}/' >/dev/null 291 | case ${{ matrix.job.target }} in 292 | *-pc-windows-*) 7z -y a '${{ steps.vars.outputs.PKG_NAME }}' '${{ steps.vars.outputs.PKG_BASENAME }}'/* | tail -2 ;; 293 | *) tar czf '${{ steps.vars.outputs.PKG_NAME }}' '${{ steps.vars.outputs.PKG_BASENAME }}'/* ;; 294 | esac; 295 | popd >/dev/null 296 | - name: Publish 297 | uses: softprops/action-gh-release@v1 298 | if: steps.vars.outputs.DEPLOY 299 | with: 300 | files: | 301 | ${{ steps.vars.outputs.STAGING }}/${{ steps.vars.outputs.PKG_NAME }} 302 | target/${{ matrix.job.target }}/debian/*.deb 303 | 304 | env: 305 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 306 | 307 | ## fix! [rivy; 2020-22-01] `cargo tarpaulin` is unable to test this repo at the moment; alternate recipe or another testing framework? 308 | # coverage: 309 | # name: Code Coverage 310 | # runs-on: ${{ matrix.job.os }} 311 | # strategy: 312 | # fail-fast: true 313 | # matrix: 314 | # # job: [ { os: ubuntu-latest }, { os: macos-latest }, { os: windows-latest } ] 315 | # job: [ { os: ubuntu-latest } ] ## cargo-tarpaulin is currently only available on linux 316 | # steps: 317 | # - uses: actions/checkout@v1 318 | # # - name: Reattach HEAD ## may be needed for accurate code coverage info 319 | # # run: git checkout ${{ github.head_ref }} 320 | # - name: Initialize workflow variables 321 | # id: vars 322 | # shell: bash 323 | # run: | 324 | # # staging directory 325 | # STAGING='_staging' 326 | # echo set-output name=STAGING::${STAGING} 327 | # echo ::set-output name=STAGING::${STAGING} 328 | # # check for CODECOV_TOKEN availability (work-around for inaccessible 'secrets' object for 'if'; see ) 329 | # unset HAS_CODECOV_TOKEN 330 | # if [ -n $CODECOV_TOKEN ]; then HAS_CODECOV_TOKEN='true' ; fi 331 | # echo set-output name=HAS_CODECOV_TOKEN::${HAS_CODECOV_TOKEN} 332 | # echo ::set-output name=HAS_CODECOV_TOKEN::${HAS_CODECOV_TOKEN} 333 | # env: 334 | # CODECOV_TOKEN: "${{ secrets.CODECOV_TOKEN }}" 335 | # - name: Create all needed build/work directories 336 | # shell: bash 337 | # run: | 338 | # mkdir -p '${{ steps.vars.outputs.STAGING }}/work' 339 | # - name: Install required packages 340 | # run: | 341 | # sudo apt-get -y install libssl-dev 342 | # pushd '${{ steps.vars.outputs.STAGING }}/work' >/dev/null 343 | # wget --no-verbose https://github.com/xd009642/tarpaulin/releases/download/0.9.3/cargo-tarpaulin-0.9.3-travis.tar.gz 344 | # tar xf cargo-tarpaulin-0.9.3-travis.tar.gz 345 | # cp cargo-tarpaulin "$(dirname -- "$(which cargo)")"/ 346 | # popd >/dev/null 347 | # - name: Generate coverage 348 | # run: | 349 | # cargo tarpaulin --out Xml 350 | # - name: Upload coverage results (CodeCov.io) 351 | # # CODECOV_TOKEN (aka, "Repository Upload Token" for REPO from CodeCov.io) ## set via REPO/Settings/Secrets 352 | # # if: secrets.CODECOV_TOKEN (not supported {yet?}; see ) 353 | # if: steps.vars.outputs.HAS_CODECOV_TOKEN 354 | # run: | 355 | # # CodeCov.io 356 | # cargo tarpaulin --out Xml 357 | # bash <(curl -s https://codecov.io/bash) 358 | # env: 359 | # CODECOV_TOKEN: "${{ secrets.CODECOV_TOKEN }}" 360 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | *.swp 8 | .vscode/* 9 | *.idea/* 10 | 11 | #ignore macos files 12 | .DS_Store -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/doublify/pre-commit-rust 3 | rev: v1.0 4 | hooks: 5 | - id: cargo-check 6 | stages: [commit] 7 | - id: fmt 8 | stages: [commit] 9 | - id: clippy 10 | args: [--all-targets, --all-features] 11 | stages: [commit] 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "du-dust" 3 | description = "A more intuitive version of du" 4 | version = "1.2.1" 5 | authors = ["bootandy ", "nebkor "] 6 | edition = "2024" 7 | readme = "README.md" 8 | 9 | documentation = "https://github.com/bootandy/dust" 10 | homepage = "https://github.com/bootandy/dust" 11 | repository = "https://github.com/bootandy/dust" 12 | 13 | keywords = ["du", "command-line", "disk", "disk-usage"] 14 | categories = ["command-line-utilities"] 15 | license = "Apache-2.0" 16 | 17 | [badges] 18 | travis-ci = { repository = "https://travis-ci.org/bootandy/dust" } 19 | 20 | [[bin]] 21 | name = "dust" 22 | path = "src/main.rs" 23 | 24 | [profile.release] 25 | codegen-units = 1 26 | lto = true 27 | strip = true 28 | 29 | [dependencies] 30 | ansi_term = "0.12" 31 | clap = { version = "4.4", features = ["derive"] } 32 | lscolors = "0.13" 33 | terminal_size = "0.2" 34 | unicode-width = "0.1" 35 | rayon = "1" 36 | thousands = "0.2" 37 | stfu8 = "0.2" 38 | regex = "1" 39 | config-file = "0.2" 40 | serde = { version = "1.0", features = ["derive"] } 41 | serde_json = "1.0" 42 | directories = "4" 43 | sysinfo = "0.27" 44 | ctrlc = "3.4" 45 | chrono = "0.4" 46 | 47 | [target.'cfg(not(target_has_atomic = "64"))'.dependencies] 48 | portable-atomic = "1.4" 49 | 50 | [target.'cfg(windows)'.dependencies] 51 | winapi-util = "0.1" 52 | filesize = "0.2.0" 53 | 54 | [dev-dependencies] 55 | assert_cmd = "2" 56 | tempfile = "=3" 57 | 58 | [build-dependencies] 59 | clap = { version = "4.4", features = ["derive"] } 60 | clap_complete = "4.4" 61 | clap_mangen = "0.2" 62 | 63 | [[test]] 64 | name = "integration" 65 | path = "tests/tests.rs" 66 | 67 | [package.metadata.binstall] 68 | pkg-url = "{ repo }/releases/download/v{ version }/dust-v{ version }-{ target }{ archive-suffix }" 69 | bin-dir = "dust-v{ version }-{ target }/{ bin }{ binary-ext }" 70 | 71 | [package.metadata.deb] 72 | section = "utils" 73 | assets = [ 74 | [ 75 | "target/release/dust", 76 | "usr/bin/", 77 | "755", 78 | ], 79 | [ 80 | "LICENSE", 81 | "usr/share/doc/du-dust/", 82 | "644", 83 | ], 84 | [ 85 | "README.md", 86 | "usr/share/doc/du-dust/README", 87 | "644", 88 | ], 89 | [ 90 | "man-page/dust.1", 91 | "usr/share/man/man1/dust.1", 92 | "644", 93 | ], 94 | [ 95 | "completions/dust.bash", 96 | "usr/share/bash-completion/completions/dust", 97 | "644", 98 | ], 99 | ] 100 | extended-description = """\ 101 | Dust is meant to give you an instant overview of which directories are using 102 | disk space without requiring sort or head. Dust will print a maximum of one 103 | 'Did not have permissions message'. 104 | """ 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2023] [andrew boot] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://github.com/bootandy/dust/actions/workflows/CICD.yml/badge.svg)](https://github.com/bootandy/dust/actions) 2 | 3 | 4 | # Dust 5 | 6 | du + rust = dust. Like du but more intuitive. 7 | 8 | # Why 9 | 10 | Because I want an easy way to see where my disk is being used. 11 | 12 | # Demo 13 | 14 | ![Example](media/snap.png) 15 | 16 | ## Install 17 | 18 | #### Cargo Packaging status 19 | 20 | - `cargo install du-dust` 21 | 22 | #### 🍺 Homebrew (Mac OS) 23 | 24 | - `brew install dust` 25 | 26 | #### 🍺 Homebrew (Linux) 27 | 28 | - `brew install dust` 29 | 30 | #### [Snap](https://ubuntu.com/core/services/guide/snaps-intro) Ubuntu and [supported systems](https://snapcraft.io/docs/installing-snapd) 31 | 32 | - `snap install dust` 33 | 34 | Note: `dust` installed through `snap` can only access files stored in the `/home` directory. See daniejstriata/dust-snap#2 for more information. 35 | 36 | #### [Pacstall](https://github.com/pacstall/pacstall) (Debian/Ubuntu) 37 | 38 | - `pacstall -I dust-bin` 39 | 40 | #### Anaconda (conda-forge) 41 | 42 | - `conda install -c conda-forge dust` 43 | 44 | #### [deb-get](https://github.com/wimpysworld/deb-get) (Debian/Ubuntu) 45 | 46 | - `deb-get install du-dust` 47 | 48 | #### [x-cmd](https://www.x-cmd.com/pkg/#VPContent) 49 | 50 | - `x env use dust` 51 | 52 | #### Windows: 53 | 54 | - `scoop install dust` 55 | - Windows GNU version - works 56 | - Windows MSVC - requires: [VCRUNTIME140.dll](https://docs.microsoft.com/en-gb/cpp/windows/latest-supported-vc-redist?view=msvc-170) 57 | 58 | #### Download 59 | 60 | - Download Linux/Mac binary from [Releases](https://github.com/bootandy/dust/releases) 61 | - unzip file: `tar -xvf _downloaded_file.tar.gz` 62 | - move file to executable path: `sudo mv dust /usr/local/bin/` 63 | 64 | ## Overview 65 | 66 | Dust is meant to give you an instant overview of which directories are using disk space without requiring sort or head. Dust will print a maximum of one 'Did not have permissions message'. 67 | 68 | Dust will list a slightly-less-than-the-terminal-height number of the biggest subdirectories or files and will smartly recurse down the tree to find the larger ones. There is no need for a '-d' flag or a '-h' flag. The largest subdirectories will be colored. 69 | 70 | The different colors on the bars: These represent the combined tree hierarchy & disk usage. The shades of grey are used to indicate which parent folder a subfolder belongs to. For instance, look at the above screenshot. `.steam` is a folder taking 44% of the space. From the `.steam` bar is a light grey line that goes up. All these folders are inside `.steam` so if you delete `.steam` all that stuff will be gone too. 71 | 72 | ## Usage 73 | 74 | ``` 75 | Usage: dust 76 | Usage: dust 77 | Usage: dust 78 | Usage: dust -p (full-path - Show fullpath of the subdirectories) 79 | Usage: dust -s (apparent-size - shows the length of the file as opposed to the amount of disk space it uses) 80 | Usage: dust -n 30 (Shows 30 directories instead of the default [default is terminal height]) 81 | Usage: dust -d 3 (Shows 3 levels of subdirectories) 82 | Usage: dust -D (Show only directories (eg dust -D)) 83 | Usage: dust -F (Show only files - finds your largest files) 84 | Usage: dust -r (reverse order of output) 85 | Usage: dust -o si/b/kb/kib/mb/mib/gb/gib (si - prints sizes in powers of 1000. Others print size in that format). 86 | Usage: dust -X ignore (ignore all files and directories with the name 'ignore') 87 | Usage: dust -x (Only show directories on the same filesystem) 88 | Usage: dust -b (Do not show percentages or draw ASCII bars) 89 | Usage: dust -B (--bars-on-right - Percent bars moved to right side of screen) 90 | Usage: dust -i (Do not show hidden files) 91 | Usage: dust -c (No colors [monochrome]) 92 | Usage: dust -C (Force colors) 93 | Usage: dust -f (Count files instead of diskspace) 94 | Usage: dust -t (Group by filetype) 95 | Usage: dust -z 10M (min-size, Only include files larger than 10M) 96 | Usage: dust -e regex (Only include files matching this regex (eg dust -e "\.png$" would match png files)) 97 | Usage: dust -v regex (Exclude files matching this regex (eg dust -v "\.png$" would ignore png files)) 98 | Usage: dust -L (dereference-links - Treat sym links as directories and go into them) 99 | Usage: dust -P (Disable the progress indicator) 100 | Usage: dust -R (For screen readers. Removes bars/symbols. Adds new column: depth level. (May want to use -p for full path too)) 101 | Usage: dust -S (Custom Stack size - Use if you see: 'fatal runtime error: stack overflow' (default allocation: low memory=1048576, high memory=1073741824)"), 102 | Usage: dust --skip-total (No total row will be displayed) 103 | Usage: dust -z 40000/30MB/20kib (Exclude output files/directories below size 40000 bytes / 30MB / 20KiB) 104 | Usage: dust -j (Prints JSON representation of directories, try: dust -j | jq) 105 | Usage: dust --files0-from=FILE (Reads null-terminated file paths from FILE); If FILE is - then read from stdin 106 | Usage: dust --collapse=node-modules will keep the node-modules folder collapsed in display instead of recursively opening it 107 | ``` 108 | 109 | ## Config file 110 | 111 | Dust has a config file where the above options can be set. 112 | Either: `~/.config/dust/config.toml` or `~/.dust.toml` 113 | ``` 114 | $ cat ~/.config/dust/config.toml 115 | reverse=true 116 | ``` 117 | 118 | ## Alternatives 119 | 120 | - [NCDU](https://dev.yorhel.nl/ncdu) 121 | - [dutree](https://github.com/nachoparker/dutree) 122 | - [dua](https://github.com/Byron/dua-cli/) 123 | - [pdu](https://github.com/KSXGitHub/parallel-disk-usage) 124 | - [dirstat-rs](https://github.com/scullionw/dirstat-rs) 125 | - du -d 1 -h | sort -h 126 | 127 | Note: Apparent-size is calculated slightly differently in dust to gdu. In dust each hard link is counted as using file_length space. In gdu only the first entry is counted. 128 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | use clap::CommandFactory; 2 | use clap_complete::{generate_to, shells::*}; 3 | use clap_mangen::Man; 4 | use std::fs::File; 5 | use std::io::Error; 6 | use std::path::Path; 7 | 8 | include!("src/cli.rs"); 9 | 10 | fn main() -> Result<(), Error> { 11 | let outdir = "completions"; 12 | let app_name = "dust"; 13 | let mut cmd = Cli::command(); 14 | 15 | generate_to(Bash, &mut cmd, app_name, outdir)?; 16 | generate_to(Zsh, &mut cmd, app_name, outdir)?; 17 | generate_to(Fish, &mut cmd, app_name, outdir)?; 18 | generate_to(PowerShell, &mut cmd, app_name, outdir)?; 19 | generate_to(Elvish, &mut cmd, app_name, outdir)?; 20 | 21 | let file = Path::new("man-page").join("dust.1"); 22 | std::fs::create_dir_all("man-page")?; 23 | let mut file = File::create(file)?; 24 | 25 | Man::new(cmd).render(&mut file)?; 26 | 27 | Ok(()) 28 | } 29 | -------------------------------------------------------------------------------- /ci/before_deploy.ps1: -------------------------------------------------------------------------------- 1 | # This script takes care of packaging the build artifacts that will go in the 2 | # release zipfile 3 | 4 | $SRC_DIR = $PWD.Path 5 | $STAGE = [System.Guid]::NewGuid().ToString() 6 | 7 | Set-Location $ENV:Temp 8 | New-Item -Type Directory -Name $STAGE 9 | Set-Location $STAGE 10 | 11 | $ZIP = "$SRC_DIR\$($Env:CRATE_NAME)-$($Env:APPVEYOR_REPO_TAG_NAME)-$($Env:TARGET).zip" 12 | 13 | # TODO Update this to package the right artifacts 14 | Copy-Item "$SRC_DIR\target\$($Env:TARGET)\release\dust" '.\' 15 | 16 | 7z a "$ZIP" * 17 | 18 | Push-AppveyorArtifact "$ZIP" 19 | 20 | Remove-Item *.* -Force 21 | Set-Location .. 22 | Remove-Item $STAGE 23 | Set-Location $SRC_DIR 24 | -------------------------------------------------------------------------------- /ci/before_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script takes care of building your crate and packaging it for release 3 | 4 | set -ex 5 | 6 | main() { 7 | local src=$(pwd) \ 8 | stage= 9 | 10 | case $TRAVIS_OS_NAME in 11 | linux) 12 | stage=$(mktemp -d) 13 | ;; 14 | osx) 15 | stage=$(mktemp -d -t tmp) 16 | ;; 17 | esac 18 | 19 | test -f Cargo.lock || cargo generate-lockfile 20 | 21 | # TODO Update this to build the artifacts that matter to you 22 | cross rustc --bin dust --target $TARGET --release -- -C lto 23 | 24 | # TODO Update this to package the right artifacts 25 | cp target/$TARGET/release/dust $stage/ 26 | 27 | cd $stage 28 | tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz * 29 | cd $src 30 | 31 | rm -rf $stage 32 | } 33 | 34 | main 35 | -------------------------------------------------------------------------------- /ci/how2publish.txt: -------------------------------------------------------------------------------- 1 | # ----------- To do a release --------- 2 | 3 | # ----------- Pre release --------- 4 | # Compare times of runs to check no drastic slow down: 5 | # hyperfine 'target/release/dust /home/andy' 6 | # hyperfine 'dust /home/andy' 7 | 8 | # ----------- Release --------- 9 | # inc version in cargo.toml 10 | # cargo build --release 11 | # commit changed files 12 | # merge to master in github 13 | 14 | # tag a commit and push (increment version in Cargo.toml first): 15 | # git tag v0.4.5 16 | # git push origin v0.4.5 17 | 18 | # cargo publish to put it in crates.io 19 | 20 | # Optional: To install locally 21 | #cargo install --path . 22 | -------------------------------------------------------------------------------- /ci/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | main() { 5 | local target= 6 | if [ $TRAVIS_OS_NAME = linux ]; then 7 | target=x86_64-unknown-linux-musl 8 | sort=sort 9 | else 10 | target=x86_64-apple-darwin 11 | sort=gsort # for `sort --sort-version`, from brew's coreutils. 12 | fi 13 | 14 | # This fetches latest stable release 15 | local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \ 16 | | cut -d/ -f3 \ 17 | | grep -E '^v[0.1.0-9.]+$' \ 18 | | $sort --version-sort \ 19 | | tail -n1) 20 | curl -LSfs https://japaric.github.io/trust/install.sh | \ 21 | sh -s -- \ 22 | --force \ 23 | --git japaric/cross \ 24 | --tag $tag \ 25 | --target $target 26 | } 27 | 28 | main 29 | -------------------------------------------------------------------------------- /ci/script.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script takes care of testing your crate 3 | 4 | set -ex 5 | 6 | # TODO This is the "test phase", tweak it as you see fit 7 | main() { 8 | cross build --target $TARGET 9 | cross build --target $TARGET --release 10 | 11 | if [ ! -z $DISABLE_TESTS ]; then 12 | return 13 | fi 14 | 15 | cross test --target $TARGET 16 | cross test --target $TARGET --release 17 | 18 | cross run --target $TARGET 19 | cross run --target $TARGET --release 20 | } 21 | 22 | # we don't run the "test phase" when doing deploys 23 | if [ -z $TRAVIS_TAG ]; then 24 | main 25 | fi 26 | -------------------------------------------------------------------------------- /completions/_dust: -------------------------------------------------------------------------------- 1 | #compdef dust 2 | 3 | autoload -U is-at-least 4 | 5 | _dust() { 6 | typeset -A opt_args 7 | typeset -a _arguments_options 8 | local ret=1 9 | 10 | if is-at-least 5.2; then 11 | _arguments_options=(-s -S -C) 12 | else 13 | _arguments_options=(-s -C) 14 | fi 15 | 16 | local context curcontext="$curcontext" state line 17 | _arguments "${_arguments_options[@]}" : \ 18 | '-d+[Depth to show]:DEPTH:_default' \ 19 | '--depth=[Depth to show]:DEPTH:_default' \ 20 | '-T+[Number of threads to use]:THREADS:_default' \ 21 | '--threads=[Number of threads to use]:THREADS:_default' \ 22 | '--config=[Specify a config file to use]:FILE:_files' \ 23 | '-n+[Number of lines of output to show. (Default is terminal_height - 10)]:NUMBER:_default' \ 24 | '--number-of-lines=[Number of lines of output to show. (Default is terminal_height - 10)]:NUMBER:_default' \ 25 | '*-X+[Exclude any file or directory with this path]:PATH:_files' \ 26 | '*--ignore-directory=[Exclude any file or directory with this path]:PATH:_files' \ 27 | '-I+[Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter]:FILE:_files' \ 28 | '--ignore-all-in-file=[Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter]:FILE:_files' \ 29 | '-z+[Minimum size file to include in output]:MIN_SIZE:_default' \ 30 | '--min-size=[Minimum size file to include in output]:MIN_SIZE:_default' \ 31 | '(-e --filter -t --file-types)*-v+[Exclude filepaths matching this regex. To ignore png files type\: -v "\\.png\$"]:REGEX:_default' \ 32 | '(-e --filter -t --file-types)*--invert-filter=[Exclude filepaths matching this regex. To ignore png files type\: -v "\\.png\$"]:REGEX:_default' \ 33 | '(-t --file-types)*-e+[Only include filepaths matching this regex. For png files type\: -e "\\.png\$"]:REGEX:_default' \ 34 | '(-t --file-types)*--filter=[Only include filepaths matching this regex. For png files type\: -e "\\.png\$"]:REGEX:_default' \ 35 | '-w+[Specify width of output overriding the auto detection of terminal width]:WIDTH:_default' \ 36 | '--terminal-width=[Specify width of output overriding the auto detection of terminal width]:WIDTH:_default' \ 37 | '-o+[Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size]:FORMAT:((si\:"SI prefix (powers of 1000)" 38 | b\:"byte (B)" 39 | k\:"kibibyte (KiB)" 40 | m\:"mebibyte (MiB)" 41 | g\:"gibibyte (GiB)" 42 | t\:"tebibyte (TiB)" 43 | kb\:"kilobyte (kB)" 44 | mb\:"megabyte (MB)" 45 | gb\:"gigabyte (GB)" 46 | tb\:"terabyte (TB)"))' \ 47 | '--output-format=[Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size]:FORMAT:((si\:"SI prefix (powers of 1000)" 48 | b\:"byte (B)" 49 | k\:"kibibyte (KiB)" 50 | m\:"mebibyte (MiB)" 51 | g\:"gibibyte (GiB)" 52 | t\:"tebibyte (TiB)" 53 | kb\:"kilobyte (kB)" 54 | mb\:"megabyte (MB)" 55 | gb\:"gigabyte (GB)" 56 | tb\:"terabyte (TB)"))' \ 57 | '-S+[Specify memory to use as stack size - use if you see\: '\''fatal runtime error\: stack overflow'\'' (default low memory=1048576, high memory=1073741824)]:STACK_SIZE:_default' \ 58 | '--stack-size=[Specify memory to use as stack size - use if you see\: '\''fatal runtime error\: stack overflow'\'' (default low memory=1048576, high memory=1073741824)]:STACK_SIZE:_default' \ 59 | '-M+[+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => \[curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)]:MTIME:_default' \ 60 | '--mtime=[+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => \[curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)]:MTIME:_default' \ 61 | '-A+[just like -mtime, but based on file access time]:ATIME:_default' \ 62 | '--atime=[just like -mtime, but based on file access time]:ATIME:_default' \ 63 | '-y+[just like -mtime, but based on file change time]:CTIME:_default' \ 64 | '--ctime=[just like -mtime, but based on file change time]:CTIME:_default' \ 65 | '--files0-from=[run dust on NUL-terminated file names specified in file; if argument is -, then read names from standard input]:FILES0_FROM:_files' \ 66 | '*--collapse=[Keep these directories collapsed]:COLLAPSE:_files' \ 67 | '-m+[Directory '\''size'\'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time]:FILETIME:((a\:"last accessed time" 68 | c\:"last changed time" 69 | m\:"last modified time"))' \ 70 | '--filetime=[Directory '\''size'\'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time]:FILETIME:((a\:"last accessed time" 71 | c\:"last changed time" 72 | m\:"last modified time"))' \ 73 | '-p[Subdirectories will not have their path shortened]' \ 74 | '--full-paths[Subdirectories will not have their path shortened]' \ 75 | '-L[dereference sym links - Treat sym links as directories and go into them]' \ 76 | '--dereference-links[dereference sym links - Treat sym links as directories and go into them]' \ 77 | '-x[Only count the files and directories on the same filesystem as the supplied directory]' \ 78 | '--limit-filesystem[Only count the files and directories on the same filesystem as the supplied directory]' \ 79 | '-s[Use file length instead of blocks]' \ 80 | '--apparent-size[Use file length instead of blocks]' \ 81 | '-r[Print tree upside down (biggest highest)]' \ 82 | '--reverse[Print tree upside down (biggest highest)]' \ 83 | '-c[No colors will be printed (Useful for commands like\: watch)]' \ 84 | '--no-colors[No colors will be printed (Useful for commands like\: watch)]' \ 85 | '-C[Force colors print]' \ 86 | '--force-colors[Force colors print]' \ 87 | '-b[No percent bars or percentages will be displayed]' \ 88 | '--no-percent-bars[No percent bars or percentages will be displayed]' \ 89 | '-B[percent bars moved to right side of screen]' \ 90 | '--bars-on-right[percent bars moved to right side of screen]' \ 91 | '-R[For screen readers. Removes bars. Adds new column\: depth level (May want to use -p too for full path)]' \ 92 | '--screen-reader[For screen readers. Removes bars. Adds new column\: depth level (May want to use -p too for full path)]' \ 93 | '--skip-total[No total row will be displayed]' \ 94 | '-f[Directory '\''size'\'' is number of child files instead of disk size]' \ 95 | '--filecount[Directory '\''size'\'' is number of child files instead of disk size]' \ 96 | '-i[Do not display hidden files]' \ 97 | '--ignore-hidden[Do not display hidden files]' \ 98 | '(-d --depth -D --only-dir)-t[show only these file types]' \ 99 | '(-d --depth -D --only-dir)--file-types[show only these file types]' \ 100 | '-P[Disable the progress indication]' \ 101 | '--no-progress[Disable the progress indication]' \ 102 | '--print-errors[Print path with errors]' \ 103 | '(-F --only-file -t --file-types)-D[Only directories will be displayed]' \ 104 | '(-F --only-file -t --file-types)--only-dir[Only directories will be displayed]' \ 105 | '(-D --only-dir)-F[Only files will be displayed. (Finds your largest files)]' \ 106 | '(-D --only-dir)--only-file[Only files will be displayed. (Finds your largest files)]' \ 107 | '-j[Output the directory tree as json to the current directory]' \ 108 | '--output-json[Output the directory tree as json to the current directory]' \ 109 | '-h[Print help (see more with '\''--help'\'')]' \ 110 | '--help[Print help (see more with '\''--help'\'')]' \ 111 | '-V[Print version]' \ 112 | '--version[Print version]' \ 113 | '*::params -- Input files or directories:_files' \ 114 | && ret=0 115 | } 116 | 117 | (( $+functions[_dust_commands] )) || 118 | _dust_commands() { 119 | local commands; commands=() 120 | _describe -t commands 'dust commands' commands "$@" 121 | } 122 | 123 | if [ "$funcstack[1]" = "_dust" ]; then 124 | _dust "$@" 125 | else 126 | compdef _dust dust 127 | fi 128 | -------------------------------------------------------------------------------- /completions/_dust.ps1: -------------------------------------------------------------------------------- 1 | 2 | using namespace System.Management.Automation 3 | using namespace System.Management.Automation.Language 4 | 5 | Register-ArgumentCompleter -Native -CommandName 'dust' -ScriptBlock { 6 | param($wordToComplete, $commandAst, $cursorPosition) 7 | 8 | $commandElements = $commandAst.CommandElements 9 | $command = @( 10 | 'dust' 11 | for ($i = 1; $i -lt $commandElements.Count; $i++) { 12 | $element = $commandElements[$i] 13 | if ($element -isnot [StringConstantExpressionAst] -or 14 | $element.StringConstantType -ne [StringConstantType]::BareWord -or 15 | $element.Value.StartsWith('-') -or 16 | $element.Value -eq $wordToComplete) { 17 | break 18 | } 19 | $element.Value 20 | }) -join ';' 21 | 22 | $completions = @(switch ($command) { 23 | 'dust' { 24 | [CompletionResult]::new('-d', '-d', [CompletionResultType]::ParameterName, 'Depth to show') 25 | [CompletionResult]::new('--depth', '--depth', [CompletionResultType]::ParameterName, 'Depth to show') 26 | [CompletionResult]::new('-T', '-T ', [CompletionResultType]::ParameterName, 'Number of threads to use') 27 | [CompletionResult]::new('--threads', '--threads', [CompletionResultType]::ParameterName, 'Number of threads to use') 28 | [CompletionResult]::new('--config', '--config', [CompletionResultType]::ParameterName, 'Specify a config file to use') 29 | [CompletionResult]::new('-n', '-n', [CompletionResultType]::ParameterName, 'Number of lines of output to show. (Default is terminal_height - 10)') 30 | [CompletionResult]::new('--number-of-lines', '--number-of-lines', [CompletionResultType]::ParameterName, 'Number of lines of output to show. (Default is terminal_height - 10)') 31 | [CompletionResult]::new('-X', '-X ', [CompletionResultType]::ParameterName, 'Exclude any file or directory with this path') 32 | [CompletionResult]::new('--ignore-directory', '--ignore-directory', [CompletionResultType]::ParameterName, 'Exclude any file or directory with this path') 33 | [CompletionResult]::new('-I', '-I ', [CompletionResultType]::ParameterName, 'Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter') 34 | [CompletionResult]::new('--ignore-all-in-file', '--ignore-all-in-file', [CompletionResultType]::ParameterName, 'Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter') 35 | [CompletionResult]::new('-z', '-z', [CompletionResultType]::ParameterName, 'Minimum size file to include in output') 36 | [CompletionResult]::new('--min-size', '--min-size', [CompletionResultType]::ParameterName, 'Minimum size file to include in output') 37 | [CompletionResult]::new('-v', '-v', [CompletionResultType]::ParameterName, 'Exclude filepaths matching this regex. To ignore png files type: -v "\.png$"') 38 | [CompletionResult]::new('--invert-filter', '--invert-filter', [CompletionResultType]::ParameterName, 'Exclude filepaths matching this regex. To ignore png files type: -v "\.png$"') 39 | [CompletionResult]::new('-e', '-e', [CompletionResultType]::ParameterName, 'Only include filepaths matching this regex. For png files type: -e "\.png$"') 40 | [CompletionResult]::new('--filter', '--filter', [CompletionResultType]::ParameterName, 'Only include filepaths matching this regex. For png files type: -e "\.png$"') 41 | [CompletionResult]::new('-w', '-w', [CompletionResultType]::ParameterName, 'Specify width of output overriding the auto detection of terminal width') 42 | [CompletionResult]::new('--terminal-width', '--terminal-width', [CompletionResultType]::ParameterName, 'Specify width of output overriding the auto detection of terminal width') 43 | [CompletionResult]::new('-o', '-o', [CompletionResultType]::ParameterName, 'Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size') 44 | [CompletionResult]::new('--output-format', '--output-format', [CompletionResultType]::ParameterName, 'Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size') 45 | [CompletionResult]::new('-S', '-S ', [CompletionResultType]::ParameterName, 'Specify memory to use as stack size - use if you see: ''fatal runtime error: stack overflow'' (default low memory=1048576, high memory=1073741824)') 46 | [CompletionResult]::new('--stack-size', '--stack-size', [CompletionResultType]::ParameterName, 'Specify memory to use as stack size - use if you see: ''fatal runtime error: stack overflow'' (default low memory=1048576, high memory=1073741824)') 47 | [CompletionResult]::new('-M', '-M ', [CompletionResultType]::ParameterName, '+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)') 48 | [CompletionResult]::new('--mtime', '--mtime', [CompletionResultType]::ParameterName, '+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)') 49 | [CompletionResult]::new('-A', '-A ', [CompletionResultType]::ParameterName, 'just like -mtime, but based on file access time') 50 | [CompletionResult]::new('--atime', '--atime', [CompletionResultType]::ParameterName, 'just like -mtime, but based on file access time') 51 | [CompletionResult]::new('-y', '-y', [CompletionResultType]::ParameterName, 'just like -mtime, but based on file change time') 52 | [CompletionResult]::new('--ctime', '--ctime', [CompletionResultType]::ParameterName, 'just like -mtime, but based on file change time') 53 | [CompletionResult]::new('--files0-from', '--files0-from', [CompletionResultType]::ParameterName, 'run dust on NUL-terminated file names specified in file; if argument is -, then read names from standard input') 54 | [CompletionResult]::new('--collapse', '--collapse', [CompletionResultType]::ParameterName, 'Keep these directories collapsed') 55 | [CompletionResult]::new('-m', '-m', [CompletionResultType]::ParameterName, 'Directory ''size'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time') 56 | [CompletionResult]::new('--filetime', '--filetime', [CompletionResultType]::ParameterName, 'Directory ''size'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time') 57 | [CompletionResult]::new('-p', '-p', [CompletionResultType]::ParameterName, 'Subdirectories will not have their path shortened') 58 | [CompletionResult]::new('--full-paths', '--full-paths', [CompletionResultType]::ParameterName, 'Subdirectories will not have their path shortened') 59 | [CompletionResult]::new('-L', '-L ', [CompletionResultType]::ParameterName, 'dereference sym links - Treat sym links as directories and go into them') 60 | [CompletionResult]::new('--dereference-links', '--dereference-links', [CompletionResultType]::ParameterName, 'dereference sym links - Treat sym links as directories and go into them') 61 | [CompletionResult]::new('-x', '-x', [CompletionResultType]::ParameterName, 'Only count the files and directories on the same filesystem as the supplied directory') 62 | [CompletionResult]::new('--limit-filesystem', '--limit-filesystem', [CompletionResultType]::ParameterName, 'Only count the files and directories on the same filesystem as the supplied directory') 63 | [CompletionResult]::new('-s', '-s', [CompletionResultType]::ParameterName, 'Use file length instead of blocks') 64 | [CompletionResult]::new('--apparent-size', '--apparent-size', [CompletionResultType]::ParameterName, 'Use file length instead of blocks') 65 | [CompletionResult]::new('-r', '-r', [CompletionResultType]::ParameterName, 'Print tree upside down (biggest highest)') 66 | [CompletionResult]::new('--reverse', '--reverse', [CompletionResultType]::ParameterName, 'Print tree upside down (biggest highest)') 67 | [CompletionResult]::new('-c', '-c', [CompletionResultType]::ParameterName, 'No colors will be printed (Useful for commands like: watch)') 68 | [CompletionResult]::new('--no-colors', '--no-colors', [CompletionResultType]::ParameterName, 'No colors will be printed (Useful for commands like: watch)') 69 | [CompletionResult]::new('-C', '-C ', [CompletionResultType]::ParameterName, 'Force colors print') 70 | [CompletionResult]::new('--force-colors', '--force-colors', [CompletionResultType]::ParameterName, 'Force colors print') 71 | [CompletionResult]::new('-b', '-b', [CompletionResultType]::ParameterName, 'No percent bars or percentages will be displayed') 72 | [CompletionResult]::new('--no-percent-bars', '--no-percent-bars', [CompletionResultType]::ParameterName, 'No percent bars or percentages will be displayed') 73 | [CompletionResult]::new('-B', '-B ', [CompletionResultType]::ParameterName, 'percent bars moved to right side of screen') 74 | [CompletionResult]::new('--bars-on-right', '--bars-on-right', [CompletionResultType]::ParameterName, 'percent bars moved to right side of screen') 75 | [CompletionResult]::new('-R', '-R ', [CompletionResultType]::ParameterName, 'For screen readers. Removes bars. Adds new column: depth level (May want to use -p too for full path)') 76 | [CompletionResult]::new('--screen-reader', '--screen-reader', [CompletionResultType]::ParameterName, 'For screen readers. Removes bars. Adds new column: depth level (May want to use -p too for full path)') 77 | [CompletionResult]::new('--skip-total', '--skip-total', [CompletionResultType]::ParameterName, 'No total row will be displayed') 78 | [CompletionResult]::new('-f', '-f', [CompletionResultType]::ParameterName, 'Directory ''size'' is number of child files instead of disk size') 79 | [CompletionResult]::new('--filecount', '--filecount', [CompletionResultType]::ParameterName, 'Directory ''size'' is number of child files instead of disk size') 80 | [CompletionResult]::new('-i', '-i', [CompletionResultType]::ParameterName, 'Do not display hidden files') 81 | [CompletionResult]::new('--ignore-hidden', '--ignore-hidden', [CompletionResultType]::ParameterName, 'Do not display hidden files') 82 | [CompletionResult]::new('-t', '-t', [CompletionResultType]::ParameterName, 'show only these file types') 83 | [CompletionResult]::new('--file-types', '--file-types', [CompletionResultType]::ParameterName, 'show only these file types') 84 | [CompletionResult]::new('-P', '-P ', [CompletionResultType]::ParameterName, 'Disable the progress indication') 85 | [CompletionResult]::new('--no-progress', '--no-progress', [CompletionResultType]::ParameterName, 'Disable the progress indication') 86 | [CompletionResult]::new('--print-errors', '--print-errors', [CompletionResultType]::ParameterName, 'Print path with errors') 87 | [CompletionResult]::new('-D', '-D ', [CompletionResultType]::ParameterName, 'Only directories will be displayed') 88 | [CompletionResult]::new('--only-dir', '--only-dir', [CompletionResultType]::ParameterName, 'Only directories will be displayed') 89 | [CompletionResult]::new('-F', '-F ', [CompletionResultType]::ParameterName, 'Only files will be displayed. (Finds your largest files)') 90 | [CompletionResult]::new('--only-file', '--only-file', [CompletionResultType]::ParameterName, 'Only files will be displayed. (Finds your largest files)') 91 | [CompletionResult]::new('-j', '-j', [CompletionResultType]::ParameterName, 'Output the directory tree as json to the current directory') 92 | [CompletionResult]::new('--output-json', '--output-json', [CompletionResultType]::ParameterName, 'Output the directory tree as json to the current directory') 93 | [CompletionResult]::new('-h', '-h', [CompletionResultType]::ParameterName, 'Print help (see more with ''--help'')') 94 | [CompletionResult]::new('--help', '--help', [CompletionResultType]::ParameterName, 'Print help (see more with ''--help'')') 95 | [CompletionResult]::new('-V', '-V ', [CompletionResultType]::ParameterName, 'Print version') 96 | [CompletionResult]::new('--version', '--version', [CompletionResultType]::ParameterName, 'Print version') 97 | break 98 | } 99 | }) 100 | 101 | $completions.Where{ $_.CompletionText -like "$wordToComplete*" } | 102 | Sort-Object -Property ListItemText 103 | } 104 | -------------------------------------------------------------------------------- /completions/dust.bash: -------------------------------------------------------------------------------- 1 | _dust() { 2 | local i cur prev opts cmd 3 | COMPREPLY=() 4 | cur="${COMP_WORDS[COMP_CWORD]}" 5 | prev="${COMP_WORDS[COMP_CWORD-1]}" 6 | cmd="" 7 | opts="" 8 | 9 | for i in ${COMP_WORDS[@]} 10 | do 11 | case "${cmd},${i}" in 12 | ",$1") 13 | cmd="dust" 14 | ;; 15 | *) 16 | ;; 17 | esac 18 | done 19 | 20 | case "${cmd}" in 21 | dust) 22 | opts="-d -T -n -p -X -I -L -x -s -r -c -C -b -B -z -R -f -i -v -e -t -w -P -D -F -o -S -j -M -A -y -m -h -V --depth --threads --config --number-of-lines --full-paths --ignore-directory --ignore-all-in-file --dereference-links --limit-filesystem --apparent-size --reverse --no-colors --force-colors --no-percent-bars --bars-on-right --min-size --screen-reader --skip-total --filecount --ignore-hidden --invert-filter --filter --file-types --terminal-width --no-progress --print-errors --only-dir --only-file --output-format --stack-size --output-json --mtime --atime --ctime --files0-from --collapse --filetime --help --version [PATH]..." 23 | if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then 24 | COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) 25 | return 0 26 | fi 27 | case "${prev}" in 28 | --depth) 29 | COMPREPLY=($(compgen -f "${cur}")) 30 | return 0 31 | ;; 32 | -d) 33 | COMPREPLY=($(compgen -f "${cur}")) 34 | return 0 35 | ;; 36 | --threads) 37 | COMPREPLY=($(compgen -f "${cur}")) 38 | return 0 39 | ;; 40 | -T) 41 | COMPREPLY=($(compgen -f "${cur}")) 42 | return 0 43 | ;; 44 | --config) 45 | local oldifs 46 | if [ -n "${IFS+x}" ]; then 47 | oldifs="$IFS" 48 | fi 49 | IFS=$'\n' 50 | COMPREPLY=($(compgen -f "${cur}")) 51 | if [ -n "${oldifs+x}" ]; then 52 | IFS="$oldifs" 53 | fi 54 | if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then 55 | compopt -o filenames 56 | fi 57 | return 0 58 | ;; 59 | --number-of-lines) 60 | COMPREPLY=($(compgen -f "${cur}")) 61 | return 0 62 | ;; 63 | -n) 64 | COMPREPLY=($(compgen -f "${cur}")) 65 | return 0 66 | ;; 67 | --ignore-directory) 68 | COMPREPLY=($(compgen -f "${cur}")) 69 | return 0 70 | ;; 71 | -X) 72 | COMPREPLY=($(compgen -f "${cur}")) 73 | return 0 74 | ;; 75 | --ignore-all-in-file) 76 | local oldifs 77 | if [ -n "${IFS+x}" ]; then 78 | oldifs="$IFS" 79 | fi 80 | IFS=$'\n' 81 | COMPREPLY=($(compgen -f "${cur}")) 82 | if [ -n "${oldifs+x}" ]; then 83 | IFS="$oldifs" 84 | fi 85 | if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then 86 | compopt -o filenames 87 | fi 88 | return 0 89 | ;; 90 | -I) 91 | local oldifs 92 | if [ -n "${IFS+x}" ]; then 93 | oldifs="$IFS" 94 | fi 95 | IFS=$'\n' 96 | COMPREPLY=($(compgen -f "${cur}")) 97 | if [ -n "${oldifs+x}" ]; then 98 | IFS="$oldifs" 99 | fi 100 | if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then 101 | compopt -o filenames 102 | fi 103 | return 0 104 | ;; 105 | --min-size) 106 | COMPREPLY=($(compgen -f "${cur}")) 107 | return 0 108 | ;; 109 | -z) 110 | COMPREPLY=($(compgen -f "${cur}")) 111 | return 0 112 | ;; 113 | --invert-filter) 114 | COMPREPLY=($(compgen -f "${cur}")) 115 | return 0 116 | ;; 117 | -v) 118 | COMPREPLY=($(compgen -f "${cur}")) 119 | return 0 120 | ;; 121 | --filter) 122 | COMPREPLY=($(compgen -f "${cur}")) 123 | return 0 124 | ;; 125 | -e) 126 | COMPREPLY=($(compgen -f "${cur}")) 127 | return 0 128 | ;; 129 | --terminal-width) 130 | COMPREPLY=($(compgen -f "${cur}")) 131 | return 0 132 | ;; 133 | -w) 134 | COMPREPLY=($(compgen -f "${cur}")) 135 | return 0 136 | ;; 137 | --output-format) 138 | COMPREPLY=($(compgen -W "si b k m g t kb mb gb tb" -- "${cur}")) 139 | return 0 140 | ;; 141 | -o) 142 | COMPREPLY=($(compgen -W "si b k m g t kb mb gb tb" -- "${cur}")) 143 | return 0 144 | ;; 145 | --stack-size) 146 | COMPREPLY=($(compgen -f "${cur}")) 147 | return 0 148 | ;; 149 | -S) 150 | COMPREPLY=($(compgen -f "${cur}")) 151 | return 0 152 | ;; 153 | --mtime) 154 | COMPREPLY=($(compgen -f "${cur}")) 155 | return 0 156 | ;; 157 | -M) 158 | COMPREPLY=($(compgen -f "${cur}")) 159 | return 0 160 | ;; 161 | --atime) 162 | COMPREPLY=($(compgen -f "${cur}")) 163 | return 0 164 | ;; 165 | -A) 166 | COMPREPLY=($(compgen -f "${cur}")) 167 | return 0 168 | ;; 169 | --ctime) 170 | COMPREPLY=($(compgen -f "${cur}")) 171 | return 0 172 | ;; 173 | -y) 174 | COMPREPLY=($(compgen -f "${cur}")) 175 | return 0 176 | ;; 177 | --files0-from) 178 | COMPREPLY=($(compgen -f "${cur}")) 179 | return 0 180 | ;; 181 | --collapse) 182 | COMPREPLY=($(compgen -f "${cur}")) 183 | return 0 184 | ;; 185 | --filetime) 186 | COMPREPLY=($(compgen -W "a c m" -- "${cur}")) 187 | return 0 188 | ;; 189 | -m) 190 | COMPREPLY=($(compgen -W "a c m" -- "${cur}")) 191 | return 0 192 | ;; 193 | *) 194 | COMPREPLY=() 195 | ;; 196 | esac 197 | COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) 198 | return 0 199 | ;; 200 | esac 201 | } 202 | 203 | if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then 204 | complete -F _dust -o nosort -o bashdefault -o default dust 205 | else 206 | complete -F _dust -o bashdefault -o default dust 207 | fi 208 | -------------------------------------------------------------------------------- /completions/dust.elv: -------------------------------------------------------------------------------- 1 | 2 | use builtin; 3 | use str; 4 | 5 | set edit:completion:arg-completer[dust] = {|@words| 6 | fn spaces {|n| 7 | builtin:repeat $n ' ' | str:join '' 8 | } 9 | fn cand {|text desc| 10 | edit:complex-candidate $text &display=$text' '(spaces (- 14 (wcswidth $text)))$desc 11 | } 12 | var command = 'dust' 13 | for word $words[1..-1] { 14 | if (str:has-prefix $word '-') { 15 | break 16 | } 17 | set command = $command';'$word 18 | } 19 | var completions = [ 20 | &'dust'= { 21 | cand -d 'Depth to show' 22 | cand --depth 'Depth to show' 23 | cand -T 'Number of threads to use' 24 | cand --threads 'Number of threads to use' 25 | cand --config 'Specify a config file to use' 26 | cand -n 'Number of lines of output to show. (Default is terminal_height - 10)' 27 | cand --number-of-lines 'Number of lines of output to show. (Default is terminal_height - 10)' 28 | cand -X 'Exclude any file or directory with this path' 29 | cand --ignore-directory 'Exclude any file or directory with this path' 30 | cand -I 'Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter' 31 | cand --ignore-all-in-file 'Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter' 32 | cand -z 'Minimum size file to include in output' 33 | cand --min-size 'Minimum size file to include in output' 34 | cand -v 'Exclude filepaths matching this regex. To ignore png files type: -v "\.png$"' 35 | cand --invert-filter 'Exclude filepaths matching this regex. To ignore png files type: -v "\.png$"' 36 | cand -e 'Only include filepaths matching this regex. For png files type: -e "\.png$"' 37 | cand --filter 'Only include filepaths matching this regex. For png files type: -e "\.png$"' 38 | cand -w 'Specify width of output overriding the auto detection of terminal width' 39 | cand --terminal-width 'Specify width of output overriding the auto detection of terminal width' 40 | cand -o 'Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size' 41 | cand --output-format 'Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size' 42 | cand -S 'Specify memory to use as stack size - use if you see: ''fatal runtime error: stack overflow'' (default low memory=1048576, high memory=1073741824)' 43 | cand --stack-size 'Specify memory to use as stack size - use if you see: ''fatal runtime error: stack overflow'' (default low memory=1048576, high memory=1073741824)' 44 | cand -M '+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)' 45 | cand --mtime '+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)' 46 | cand -A 'just like -mtime, but based on file access time' 47 | cand --atime 'just like -mtime, but based on file access time' 48 | cand -y 'just like -mtime, but based on file change time' 49 | cand --ctime 'just like -mtime, but based on file change time' 50 | cand --files0-from 'run dust on NUL-terminated file names specified in file; if argument is -, then read names from standard input' 51 | cand --collapse 'Keep these directories collapsed' 52 | cand -m 'Directory ''size'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time' 53 | cand --filetime 'Directory ''size'' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time' 54 | cand -p 'Subdirectories will not have their path shortened' 55 | cand --full-paths 'Subdirectories will not have their path shortened' 56 | cand -L 'dereference sym links - Treat sym links as directories and go into them' 57 | cand --dereference-links 'dereference sym links - Treat sym links as directories and go into them' 58 | cand -x 'Only count the files and directories on the same filesystem as the supplied directory' 59 | cand --limit-filesystem 'Only count the files and directories on the same filesystem as the supplied directory' 60 | cand -s 'Use file length instead of blocks' 61 | cand --apparent-size 'Use file length instead of blocks' 62 | cand -r 'Print tree upside down (biggest highest)' 63 | cand --reverse 'Print tree upside down (biggest highest)' 64 | cand -c 'No colors will be printed (Useful for commands like: watch)' 65 | cand --no-colors 'No colors will be printed (Useful for commands like: watch)' 66 | cand -C 'Force colors print' 67 | cand --force-colors 'Force colors print' 68 | cand -b 'No percent bars or percentages will be displayed' 69 | cand --no-percent-bars 'No percent bars or percentages will be displayed' 70 | cand -B 'percent bars moved to right side of screen' 71 | cand --bars-on-right 'percent bars moved to right side of screen' 72 | cand -R 'For screen readers. Removes bars. Adds new column: depth level (May want to use -p too for full path)' 73 | cand --screen-reader 'For screen readers. Removes bars. Adds new column: depth level (May want to use -p too for full path)' 74 | cand --skip-total 'No total row will be displayed' 75 | cand -f 'Directory ''size'' is number of child files instead of disk size' 76 | cand --filecount 'Directory ''size'' is number of child files instead of disk size' 77 | cand -i 'Do not display hidden files' 78 | cand --ignore-hidden 'Do not display hidden files' 79 | cand -t 'show only these file types' 80 | cand --file-types 'show only these file types' 81 | cand -P 'Disable the progress indication' 82 | cand --no-progress 'Disable the progress indication' 83 | cand --print-errors 'Print path with errors' 84 | cand -D 'Only directories will be displayed' 85 | cand --only-dir 'Only directories will be displayed' 86 | cand -F 'Only files will be displayed. (Finds your largest files)' 87 | cand --only-file 'Only files will be displayed. (Finds your largest files)' 88 | cand -j 'Output the directory tree as json to the current directory' 89 | cand --output-json 'Output the directory tree as json to the current directory' 90 | cand -h 'Print help (see more with ''--help'')' 91 | cand --help 'Print help (see more with ''--help'')' 92 | cand -V 'Print version' 93 | cand --version 'Print version' 94 | } 95 | ] 96 | $completions[$command] 97 | } 98 | -------------------------------------------------------------------------------- /completions/dust.fish: -------------------------------------------------------------------------------- 1 | complete -c dust -s d -l depth -d 'Depth to show' -r 2 | complete -c dust -s T -l threads -d 'Number of threads to use' -r 3 | complete -c dust -l config -d 'Specify a config file to use' -r -F 4 | complete -c dust -s n -l number-of-lines -d 'Number of lines of output to show. (Default is terminal_height - 10)' -r 5 | complete -c dust -s X -l ignore-directory -d 'Exclude any file or directory with this path' -r -F 6 | complete -c dust -s I -l ignore-all-in-file -d 'Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by --invert_filter' -r -F 7 | complete -c dust -s z -l min-size -d 'Minimum size file to include in output' -r 8 | complete -c dust -s v -l invert-filter -d 'Exclude filepaths matching this regex. To ignore png files type: -v "\\.png$"' -r 9 | complete -c dust -s e -l filter -d 'Only include filepaths matching this regex. For png files type: -e "\\.png$"' -r 10 | complete -c dust -s w -l terminal-width -d 'Specify width of output overriding the auto detection of terminal width' -r 11 | complete -c dust -s o -l output-format -d 'Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size' -r -f -a "si\t'SI prefix (powers of 1000)' 12 | b\t'byte (B)' 13 | k\t'kibibyte (KiB)' 14 | m\t'mebibyte (MiB)' 15 | g\t'gibibyte (GiB)' 16 | t\t'tebibyte (TiB)' 17 | kb\t'kilobyte (kB)' 18 | mb\t'megabyte (MB)' 19 | gb\t'gigabyte (GB)' 20 | tb\t'terabyte (TB)'" 21 | complete -c dust -s S -l stack-size -d 'Specify memory to use as stack size - use if you see: \'fatal runtime error: stack overflow\' (default low memory=1048576, high memory=1073741824)' -r 22 | complete -c dust -s M -l mtime -d '+/-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞)' -r 23 | complete -c dust -s A -l atime -d 'just like -mtime, but based on file access time' -r 24 | complete -c dust -s y -l ctime -d 'just like -mtime, but based on file change time' -r 25 | complete -c dust -l files0-from -d 'run dust on NUL-terminated file names specified in file; if argument is -, then read names from standard input' -r -F 26 | complete -c dust -l collapse -d 'Keep these directories collapsed' -r -F 27 | complete -c dust -s m -l filetime -d 'Directory \'size\' is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time' -r -f -a "a\t'last accessed time' 28 | c\t'last changed time' 29 | m\t'last modified time'" 30 | complete -c dust -s p -l full-paths -d 'Subdirectories will not have their path shortened' 31 | complete -c dust -s L -l dereference-links -d 'dereference sym links - Treat sym links as directories and go into them' 32 | complete -c dust -s x -l limit-filesystem -d 'Only count the files and directories on the same filesystem as the supplied directory' 33 | complete -c dust -s s -l apparent-size -d 'Use file length instead of blocks' 34 | complete -c dust -s r -l reverse -d 'Print tree upside down (biggest highest)' 35 | complete -c dust -s c -l no-colors -d 'No colors will be printed (Useful for commands like: watch)' 36 | complete -c dust -s C -l force-colors -d 'Force colors print' 37 | complete -c dust -s b -l no-percent-bars -d 'No percent bars or percentages will be displayed' 38 | complete -c dust -s B -l bars-on-right -d 'percent bars moved to right side of screen' 39 | complete -c dust -s R -l screen-reader -d 'For screen readers. Removes bars. Adds new column: depth level (May want to use -p too for full path)' 40 | complete -c dust -l skip-total -d 'No total row will be displayed' 41 | complete -c dust -s f -l filecount -d 'Directory \'size\' is number of child files instead of disk size' 42 | complete -c dust -s i -l ignore-hidden -d 'Do not display hidden files' 43 | complete -c dust -s t -l file-types -d 'show only these file types' 44 | complete -c dust -s P -l no-progress -d 'Disable the progress indication' 45 | complete -c dust -l print-errors -d 'Print path with errors' 46 | complete -c dust -s D -l only-dir -d 'Only directories will be displayed' 47 | complete -c dust -s F -l only-file -d 'Only files will be displayed. (Finds your largest files)' 48 | complete -c dust -s j -l output-json -d 'Output the directory tree as json to the current directory' 49 | complete -c dust -s h -l help -d 'Print help (see more with \'--help\')' 50 | complete -c dust -s V -l version -d 'Print version' 51 | -------------------------------------------------------------------------------- /config/config.toml: -------------------------------------------------------------------------------- 1 | # Sample Config file, works with toml and yaml 2 | # Place in either: 3 | # ~/.config/dust/config.toml 4 | # ~/.dust.toml 5 | 6 | # Print tree upside down (biggest highest) 7 | reverse=true 8 | 9 | # Subdirectories will not have their path shortened 10 | display-full-paths=true 11 | 12 | # Use file length instead of blocks 13 | display-apparent-size=true 14 | 15 | # No colors will be printed 16 | no-colors=true 17 | 18 | # No percent bars or percentages will be displayed 19 | no-bars=true 20 | 21 | # No total row will be displayed 22 | skip-total=true 23 | 24 | # Do not display hidden files 25 | ignore-hidden=true 26 | 27 | # print sizes in powers of 1000 (e.g., 1.1G) 28 | output-format="si" -------------------------------------------------------------------------------- /man-page/dust.1: -------------------------------------------------------------------------------- 1 | .ie \n(.g .ds Aq \(aq 2 | .el .ds Aq ' 3 | .TH Dust 1 "Dust 1.2.1" 4 | .SH NAME 5 | Dust \- Like du but more intuitive 6 | .SH SYNOPSIS 7 | \fBdust\fR [\fB\-d\fR|\fB\-\-depth\fR] [\fB\-T\fR|\fB\-\-threads\fR] [\fB\-\-config\fR] [\fB\-n\fR|\fB\-\-number\-of\-lines\fR] [\fB\-p\fR|\fB\-\-full\-paths\fR] [\fB\-X\fR|\fB\-\-ignore\-directory\fR] [\fB\-I\fR|\fB\-\-ignore\-all\-in\-file\fR] [\fB\-L\fR|\fB\-\-dereference\-links\fR] [\fB\-x\fR|\fB\-\-limit\-filesystem\fR] [\fB\-s\fR|\fB\-\-apparent\-size\fR] [\fB\-r\fR|\fB\-\-reverse\fR] [\fB\-c\fR|\fB\-\-no\-colors\fR] [\fB\-C\fR|\fB\-\-force\-colors\fR] [\fB\-b\fR|\fB\-\-no\-percent\-bars\fR] [\fB\-B\fR|\fB\-\-bars\-on\-right\fR] [\fB\-z\fR|\fB\-\-min\-size\fR] [\fB\-R\fR|\fB\-\-screen\-reader\fR] [\fB\-\-skip\-total\fR] [\fB\-f\fR|\fB\-\-filecount\fR] [\fB\-i\fR|\fB\-\-ignore\-hidden\fR] [\fB\-v\fR|\fB\-\-invert\-filter\fR] [\fB\-e\fR|\fB\-\-filter\fR] [\fB\-t\fR|\fB\-\-file\-types\fR] [\fB\-w\fR|\fB\-\-terminal\-width\fR] [\fB\-P\fR|\fB\-\-no\-progress\fR] [\fB\-\-print\-errors\fR] [\fB\-D\fR|\fB\-\-only\-dir\fR] [\fB\-F\fR|\fB\-\-only\-file\fR] [\fB\-o\fR|\fB\-\-output\-format\fR] [\fB\-S\fR|\fB\-\-stack\-size\fR] [\fB\-j\fR|\fB\-\-output\-json\fR] [\fB\-M\fR|\fB\-\-mtime\fR] [\fB\-A\fR|\fB\-\-atime\fR] [\fB\-y\fR|\fB\-\-ctime\fR] [\fB\-\-files0\-from\fR] [\fB\-\-collapse\fR] [\fB\-m\fR|\fB\-\-filetime\fR] [\fB\-h\fR|\fB\-\-help\fR] [\fB\-V\fR|\fB\-\-version\fR] [\fIPATH\fR] 8 | .SH DESCRIPTION 9 | Like du but more intuitive 10 | .SH OPTIONS 11 | .TP 12 | \fB\-d\fR, \fB\-\-depth\fR=\fIDEPTH\fR 13 | Depth to show 14 | .TP 15 | \fB\-T\fR, \fB\-\-threads\fR=\fITHREADS\fR 16 | Number of threads to use 17 | .TP 18 | \fB\-\-config\fR=\fIFILE\fR 19 | Specify a config file to use 20 | .TP 21 | \fB\-n\fR, \fB\-\-number\-of\-lines\fR=\fINUMBER\fR 22 | Number of lines of output to show. (Default is terminal_height \- 10) 23 | .TP 24 | \fB\-p\fR, \fB\-\-full\-paths\fR 25 | Subdirectories will not have their path shortened 26 | .TP 27 | \fB\-X\fR, \fB\-\-ignore\-directory\fR=\fIPATH\fR 28 | Exclude any file or directory with this path 29 | .TP 30 | \fB\-I\fR, \fB\-\-ignore\-all\-in\-file\fR=\fIFILE\fR 31 | Exclude any file or directory with a regex matching that listed in this file, the file entries will be added to the ignore regexs provided by \-\-invert_filter 32 | .TP 33 | \fB\-L\fR, \fB\-\-dereference\-links\fR 34 | dereference sym links \- Treat sym links as directories and go into them 35 | .TP 36 | \fB\-x\fR, \fB\-\-limit\-filesystem\fR 37 | Only count the files and directories on the same filesystem as the supplied directory 38 | .TP 39 | \fB\-s\fR, \fB\-\-apparent\-size\fR 40 | Use file length instead of blocks 41 | .TP 42 | \fB\-r\fR, \fB\-\-reverse\fR 43 | Print tree upside down (biggest highest) 44 | .TP 45 | \fB\-c\fR, \fB\-\-no\-colors\fR 46 | No colors will be printed (Useful for commands like: watch) 47 | .TP 48 | \fB\-C\fR, \fB\-\-force\-colors\fR 49 | Force colors print 50 | .TP 51 | \fB\-b\fR, \fB\-\-no\-percent\-bars\fR 52 | No percent bars or percentages will be displayed 53 | .TP 54 | \fB\-B\fR, \fB\-\-bars\-on\-right\fR 55 | percent bars moved to right side of screen 56 | .TP 57 | \fB\-z\fR, \fB\-\-min\-size\fR=\fIMIN_SIZE\fR 58 | Minimum size file to include in output 59 | .TP 60 | \fB\-R\fR, \fB\-\-screen\-reader\fR 61 | For screen readers. Removes bars. Adds new column: depth level (May want to use \-p too for full path) 62 | .TP 63 | \fB\-\-skip\-total\fR 64 | No total row will be displayed 65 | .TP 66 | \fB\-f\fR, \fB\-\-filecount\fR 67 | Directory \*(Aqsize\*(Aq is number of child files instead of disk size 68 | .TP 69 | \fB\-i\fR, \fB\-\-ignore\-hidden\fR 70 | Do not display hidden files 71 | .TP 72 | \fB\-v\fR, \fB\-\-invert\-filter\fR=\fIREGEX\fR 73 | Exclude filepaths matching this regex. To ignore png files type: \-v "\\.png$" 74 | .TP 75 | \fB\-e\fR, \fB\-\-filter\fR=\fIREGEX\fR 76 | Only include filepaths matching this regex. For png files type: \-e "\\.png$" 77 | .TP 78 | \fB\-t\fR, \fB\-\-file\-types\fR 79 | show only these file types 80 | .TP 81 | \fB\-w\fR, \fB\-\-terminal\-width\fR=\fIWIDTH\fR 82 | Specify width of output overriding the auto detection of terminal width 83 | .TP 84 | \fB\-P\fR, \fB\-\-no\-progress\fR 85 | Disable the progress indication 86 | .TP 87 | \fB\-\-print\-errors\fR 88 | Print path with errors 89 | .TP 90 | \fB\-D\fR, \fB\-\-only\-dir\fR 91 | Only directories will be displayed 92 | .TP 93 | \fB\-F\fR, \fB\-\-only\-file\fR 94 | Only files will be displayed. (Finds your largest files) 95 | .TP 96 | \fB\-o\fR, \fB\-\-output\-format\fR=\fIFORMAT\fR 97 | Changes output display size. si will print sizes in powers of 1000. b k m g t kb mb gb tb will print the whole tree in that size 98 | .br 99 | 100 | .br 101 | \fIPossible values:\fR 102 | .RS 14 103 | .IP \(bu 2 104 | si: SI prefix (powers of 1000) 105 | .IP \(bu 2 106 | b: byte (B) 107 | .IP \(bu 2 108 | k: kibibyte (KiB) 109 | .IP \(bu 2 110 | m: mebibyte (MiB) 111 | .IP \(bu 2 112 | g: gibibyte (GiB) 113 | .IP \(bu 2 114 | t: tebibyte (TiB) 115 | .IP \(bu 2 116 | kb: kilobyte (kB) 117 | .IP \(bu 2 118 | mb: megabyte (MB) 119 | .IP \(bu 2 120 | gb: gigabyte (GB) 121 | .IP \(bu 2 122 | tb: terabyte (TB) 123 | .RE 124 | .TP 125 | \fB\-S\fR, \fB\-\-stack\-size\fR=\fISTACK_SIZE\fR 126 | Specify memory to use as stack size \- use if you see: \*(Aqfatal runtime error: stack overflow\*(Aq (default low memory=1048576, high memory=1073741824) 127 | .TP 128 | \fB\-j\fR, \fB\-\-output\-json\fR 129 | Output the directory tree as json to the current directory 130 | .TP 131 | \fB\-M\fR, \fB\-\-mtime\fR=\fIMTIME\fR 132 | +/\-n matches files modified more/less than n days ago , and n matches files modified exactly n days ago, days are rounded down.That is +n => (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and \-n => (𝑐𝑢𝑟𝑟−𝑛, +∞) 133 | .TP 134 | \fB\-A\fR, \fB\-\-atime\fR=\fIATIME\fR 135 | just like \-mtime, but based on file access time 136 | .TP 137 | \fB\-y\fR, \fB\-\-ctime\fR=\fICTIME\fR 138 | just like \-mtime, but based on file change time 139 | .TP 140 | \fB\-\-files0\-from\fR=\fIFILES0_FROM\fR 141 | run dust on NUL\-terminated file names specified in file; if argument is \-, then read names from standard input 142 | .TP 143 | \fB\-\-collapse\fR=\fICOLLAPSE\fR 144 | Keep these directories collapsed 145 | .TP 146 | \fB\-m\fR, \fB\-\-filetime\fR=\fIFILETIME\fR 147 | Directory \*(Aqsize\*(Aq is max filetime of child files instead of disk size. while a/c/m for last accessed/changed/modified time 148 | .br 149 | 150 | .br 151 | \fIPossible values:\fR 152 | .RS 14 153 | .IP \(bu 2 154 | a: last accessed time 155 | .IP \(bu 2 156 | c: last changed time 157 | .IP \(bu 2 158 | m: last modified time 159 | .RE 160 | .TP 161 | \fB\-h\fR, \fB\-\-help\fR 162 | Print help (see a summary with \*(Aq\-h\*(Aq) 163 | .TP 164 | \fB\-V\fR, \fB\-\-version\fR 165 | Print version 166 | .TP 167 | [\fIPATH\fR] 168 | Input files or directories 169 | .SH VERSION 170 | v1.2.1 171 | -------------------------------------------------------------------------------- /media/snap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/media/snap.png -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use clap::{Parser, ValueEnum, ValueHint}; 4 | 5 | // For single thread mode set this variable on your command line: 6 | // export RAYON_NUM_THREADS=1 7 | 8 | /// Like du but more intuitive 9 | #[derive(Debug, Parser)] 10 | #[command(name("Dust"), version)] 11 | pub struct Cli { 12 | /// Depth to show 13 | #[arg(short, long)] 14 | pub depth: Option, 15 | 16 | /// Number of threads to use 17 | #[arg(short('T'), long)] 18 | pub threads: Option, 19 | 20 | /// Specify a config file to use 21 | #[arg(long, value_name("FILE"), value_hint(ValueHint::FilePath))] 22 | pub config: Option, 23 | 24 | /// Number of lines of output to show. (Default is terminal_height - 10) 25 | #[arg(short, long, value_name("NUMBER"))] 26 | pub number_of_lines: Option, 27 | 28 | /// Subdirectories will not have their path shortened 29 | #[arg(short('p'), long)] 30 | pub full_paths: bool, 31 | 32 | /// Exclude any file or directory with this path 33 | #[arg(short('X'), long, value_name("PATH"), value_hint(ValueHint::AnyPath))] 34 | pub ignore_directory: Option>, 35 | 36 | /// Exclude any file or directory with a regex matching that listed in this 37 | /// file, the file entries will be added to the ignore regexs provided by 38 | /// --invert_filter 39 | #[arg(short('I'), long, value_name("FILE"), value_hint(ValueHint::FilePath))] 40 | pub ignore_all_in_file: Option, 41 | 42 | /// dereference sym links - Treat sym links as directories and go into them 43 | #[arg(short('L'), long)] 44 | pub dereference_links: bool, 45 | 46 | /// Only count the files and directories on the same filesystem as the 47 | /// supplied directory 48 | #[arg(short('x'), long)] 49 | pub limit_filesystem: bool, 50 | 51 | /// Use file length instead of blocks 52 | #[arg(short('s'), long)] 53 | pub apparent_size: bool, 54 | 55 | /// Print tree upside down (biggest highest) 56 | #[arg(short, long)] 57 | pub reverse: bool, 58 | 59 | /// No colors will be printed (Useful for commands like: watch) 60 | #[arg(short('c'), long)] 61 | pub no_colors: bool, 62 | 63 | /// Force colors print 64 | #[arg(short('C'), long)] 65 | pub force_colors: bool, 66 | 67 | /// No percent bars or percentages will be displayed 68 | #[arg(short('b'), long)] 69 | pub no_percent_bars: bool, 70 | 71 | /// percent bars moved to right side of screen 72 | #[arg(short('B'), long)] 73 | pub bars_on_right: bool, 74 | 75 | /// Minimum size file to include in output 76 | #[arg(short('z'), long)] 77 | pub min_size: Option, 78 | 79 | /// For screen readers. Removes bars. Adds new column: depth level (May want 80 | /// to use -p too for full path) 81 | #[arg(short('R'), long)] 82 | pub screen_reader: bool, 83 | 84 | /// No total row will be displayed 85 | #[arg(long)] 86 | pub skip_total: bool, 87 | 88 | /// Directory 'size' is number of child files instead of disk size 89 | #[arg(short, long)] 90 | pub filecount: bool, 91 | 92 | /// Do not display hidden files 93 | // Do not use 'h' this is used by 'help' 94 | #[arg(short, long)] 95 | pub ignore_hidden: bool, 96 | 97 | /// Exclude filepaths matching this regex. To ignore png files type: -v 98 | /// "\.png$" 99 | #[arg( 100 | short('v'), 101 | long, 102 | value_name("REGEX"), 103 | conflicts_with("filter"), 104 | conflicts_with("file_types") 105 | )] 106 | pub invert_filter: Option>, 107 | 108 | /// Only include filepaths matching this regex. For png files type: -e 109 | /// "\.png$" 110 | #[arg(short('e'), long, value_name("REGEX"), conflicts_with("file_types"))] 111 | pub filter: Option>, 112 | 113 | /// show only these file types 114 | #[arg(short('t'), long, conflicts_with("depth"), conflicts_with("only_dir"))] 115 | pub file_types: bool, 116 | 117 | /// Specify width of output overriding the auto detection of terminal width 118 | #[arg(short('w'), long, value_name("WIDTH"))] 119 | pub terminal_width: Option, 120 | 121 | /// Disable the progress indication. 122 | #[arg(short('P'), long)] 123 | pub no_progress: bool, 124 | 125 | /// Print path with errors. 126 | #[arg(long)] 127 | pub print_errors: bool, 128 | 129 | /// Only directories will be displayed. 130 | #[arg( 131 | short('D'), 132 | long, 133 | conflicts_with("only_file"), 134 | conflicts_with("file_types") 135 | )] 136 | pub only_dir: bool, 137 | 138 | /// Only files will be displayed. (Finds your largest files) 139 | #[arg(short('F'), long, conflicts_with("only_dir"))] 140 | pub only_file: bool, 141 | 142 | /// Changes output display size. si will print sizes in powers of 1000. b k 143 | /// m g t kb mb gb tb will print the whole tree in that size. 144 | #[arg(short, long, value_enum, value_name("FORMAT"), ignore_case(true))] 145 | pub output_format: Option, 146 | 147 | /// Specify memory to use as stack size - use if you see: 'fatal runtime 148 | /// error: stack overflow' (default low memory=1048576, high 149 | /// memory=1073741824) 150 | #[arg(short('S'), long)] 151 | pub stack_size: Option, 152 | 153 | /// Input files or directories. 154 | #[arg(value_name("PATH"), value_hint(ValueHint::AnyPath))] 155 | pub params: Option>, 156 | 157 | /// Output the directory tree as json to the current directory 158 | #[arg(short('j'), long)] 159 | pub output_json: bool, 160 | 161 | /// +/-n matches files modified more/less than n days ago , and n matches 162 | /// files modified exactly n days ago, days are rounded down.That is +n => 163 | /// (−∞, curr−(n+1)), n => [curr−(n+1), curr−n), and -n => (𝑐𝑢𝑟𝑟−𝑛, +∞) 164 | #[arg(short('M'), long, allow_hyphen_values(true))] 165 | pub mtime: Option, 166 | 167 | /// just like -mtime, but based on file access time 168 | #[arg(short('A'), long, allow_hyphen_values(true))] 169 | pub atime: Option, 170 | 171 | /// just like -mtime, but based on file change time 172 | #[arg(short('y'), long, allow_hyphen_values(true))] 173 | pub ctime: Option, 174 | 175 | /// run dust on NUL-terminated file names specified in file; if argument is 176 | /// -, then read names from standard input 177 | #[arg(long, value_hint(ValueHint::AnyPath))] 178 | pub files0_from: Option, 179 | 180 | /// Keep these directories collapsed 181 | #[arg(long, value_hint(ValueHint::AnyPath))] 182 | pub collapse: Option>, 183 | 184 | /// Directory 'size' is max filetime of child files instead of disk size. 185 | /// while a/c/m for last accessed/changed/modified time 186 | #[arg(short('m'), long, value_enum)] 187 | pub filetime: Option, 188 | } 189 | 190 | #[derive(Clone, Copy, Debug, ValueEnum)] 191 | #[value(rename_all = "lower")] 192 | pub enum OutputFormat { 193 | /// SI prefix (powers of 1000) 194 | SI, 195 | 196 | /// byte (B) 197 | B, 198 | 199 | /// kibibyte (KiB) 200 | #[value(name = "k", alias("kib"))] 201 | KiB, 202 | 203 | /// mebibyte (MiB) 204 | #[value(name = "m", alias("mib"))] 205 | MiB, 206 | 207 | /// gibibyte (GiB) 208 | #[value(name = "g", alias("gib"))] 209 | GiB, 210 | 211 | /// tebibyte (TiB) 212 | #[value(name = "t", alias("tib"))] 213 | TiB, 214 | 215 | /// kilobyte (kB) 216 | KB, 217 | 218 | /// megabyte (MB) 219 | MB, 220 | 221 | /// gigabyte (GB) 222 | GB, 223 | 224 | /// terabyte (TB) 225 | TB, 226 | } 227 | 228 | impl fmt::Display for OutputFormat { 229 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 230 | match self { 231 | Self::SI => write!(f, "si"), 232 | Self::B => write!(f, "b"), 233 | Self::KiB => write!(f, "k"), 234 | Self::MiB => write!(f, "m"), 235 | Self::GiB => write!(f, "g"), 236 | Self::TiB => write!(f, "t"), 237 | Self::KB => write!(f, "kb"), 238 | Self::MB => write!(f, "mb"), 239 | Self::GB => write!(f, "gb"), 240 | Self::TB => write!(f, "tb"), 241 | } 242 | } 243 | } 244 | 245 | #[derive(Clone, Copy, Debug, ValueEnum)] 246 | pub enum FileTime { 247 | /// last accessed time 248 | #[value(name = "a", alias("accessed"))] 249 | Accessed, 250 | 251 | /// last changed time 252 | #[value(name = "c", alias("changed"))] 253 | Changed, 254 | 255 | /// last modified time 256 | #[value(name = "m", alias("modified"))] 257 | Modified, 258 | } 259 | -------------------------------------------------------------------------------- /src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::node::FileTime; 2 | use chrono::{Local, TimeZone}; 3 | use config_file::FromConfigFile; 4 | use regex::Regex; 5 | use serde::Deserialize; 6 | use std::path::Path; 7 | use std::path::PathBuf; 8 | 9 | use crate::cli::Cli; 10 | use crate::dir_walker::Operator; 11 | use crate::display::get_number_format; 12 | 13 | pub static DAY_SECONDS: i64 = 24 * 60 * 60; 14 | 15 | #[derive(Deserialize, Default)] 16 | #[serde(rename_all = "kebab-case")] 17 | pub struct Config { 18 | pub display_full_paths: Option, 19 | pub display_apparent_size: Option, 20 | pub reverse: Option, 21 | pub no_colors: Option, 22 | pub force_colors: Option, 23 | pub no_bars: Option, 24 | pub skip_total: Option, 25 | pub screen_reader: Option, 26 | pub ignore_hidden: Option, 27 | pub output_format: Option, 28 | pub min_size: Option, 29 | pub only_dir: Option, 30 | pub only_file: Option, 31 | pub disable_progress: Option, 32 | pub depth: Option, 33 | pub bars_on_right: Option, 34 | pub stack_size: Option, 35 | pub threads: Option, 36 | pub output_json: Option, 37 | pub print_errors: Option, 38 | pub files0_from: Option, 39 | } 40 | 41 | impl Config { 42 | pub fn get_files_from(&self, options: &Cli) -> Option { 43 | let from_file = &options.files0_from; 44 | match from_file { 45 | None => self.files0_from.as_ref().map(|x| x.to_string()), 46 | Some(x) => Some(x.to_string()), 47 | } 48 | } 49 | pub fn get_no_colors(&self, options: &Cli) -> bool { 50 | Some(true) == self.no_colors || options.no_colors 51 | } 52 | pub fn get_force_colors(&self, options: &Cli) -> bool { 53 | Some(true) == self.force_colors || options.force_colors 54 | } 55 | pub fn get_disable_progress(&self, options: &Cli) -> bool { 56 | Some(true) == self.disable_progress || options.no_progress 57 | } 58 | pub fn get_apparent_size(&self, options: &Cli) -> bool { 59 | Some(true) == self.display_apparent_size || options.apparent_size 60 | } 61 | pub fn get_ignore_hidden(&self, options: &Cli) -> bool { 62 | Some(true) == self.ignore_hidden || options.ignore_hidden 63 | } 64 | pub fn get_full_paths(&self, options: &Cli) -> bool { 65 | Some(true) == self.display_full_paths || options.full_paths 66 | } 67 | pub fn get_reverse(&self, options: &Cli) -> bool { 68 | Some(true) == self.reverse || options.reverse 69 | } 70 | pub fn get_no_bars(&self, options: &Cli) -> bool { 71 | Some(true) == self.no_bars || options.no_percent_bars 72 | } 73 | pub fn get_output_format(&self, options: &Cli) -> String { 74 | let out_fmt = options.output_format; 75 | (match out_fmt { 76 | None => match &self.output_format { 77 | None => "".to_string(), 78 | Some(x) => x.to_string(), 79 | }, 80 | Some(x) => x.to_string(), 81 | }) 82 | .to_lowercase() 83 | } 84 | 85 | pub fn get_filetime(&self, options: &Cli) -> Option { 86 | options.filetime.map(FileTime::from) 87 | } 88 | 89 | pub fn get_skip_total(&self, options: &Cli) -> bool { 90 | Some(true) == self.skip_total || options.skip_total 91 | } 92 | pub fn get_screen_reader(&self, options: &Cli) -> bool { 93 | Some(true) == self.screen_reader || options.screen_reader 94 | } 95 | pub fn get_depth(&self, options: &Cli) -> usize { 96 | if let Some(v) = options.depth { 97 | return v; 98 | } 99 | 100 | self.depth.unwrap_or(usize::MAX) 101 | } 102 | pub fn get_min_size(&self, options: &Cli) -> Option { 103 | let size_from_param = options.min_size.as_ref(); 104 | self._get_min_size(size_from_param) 105 | } 106 | fn _get_min_size(&self, min_size: Option<&String>) -> Option { 107 | let size_from_param = min_size.and_then(|a| convert_min_size(a)); 108 | 109 | if size_from_param.is_none() { 110 | self.min_size 111 | .as_ref() 112 | .and_then(|a| convert_min_size(a.as_ref())) 113 | } else { 114 | size_from_param 115 | } 116 | } 117 | pub fn get_only_dir(&self, options: &Cli) -> bool { 118 | Some(true) == self.only_dir || options.only_dir 119 | } 120 | 121 | pub fn get_print_errors(&self, options: &Cli) -> bool { 122 | Some(true) == self.print_errors || options.print_errors 123 | } 124 | pub fn get_only_file(&self, options: &Cli) -> bool { 125 | Some(true) == self.only_file || options.only_file 126 | } 127 | pub fn get_bars_on_right(&self, options: &Cli) -> bool { 128 | Some(true) == self.bars_on_right || options.bars_on_right 129 | } 130 | pub fn get_custom_stack_size(&self, options: &Cli) -> Option { 131 | let from_cmd_line = options.stack_size; 132 | if from_cmd_line.is_none() { 133 | self.stack_size 134 | } else { 135 | from_cmd_line 136 | } 137 | } 138 | pub fn get_threads(&self, options: &Cli) -> Option { 139 | let from_cmd_line = options.threads; 140 | if from_cmd_line.is_none() { 141 | self.threads 142 | } else { 143 | from_cmd_line 144 | } 145 | } 146 | pub fn get_output_json(&self, options: &Cli) -> bool { 147 | Some(true) == self.output_json || options.output_json 148 | } 149 | 150 | pub fn get_modified_time_operator(&self, options: &Cli) -> Option<(Operator, i64)> { 151 | get_filter_time_operator(options.mtime.as_ref(), get_current_date_epoch_seconds()) 152 | } 153 | 154 | pub fn get_accessed_time_operator(&self, options: &Cli) -> Option<(Operator, i64)> { 155 | get_filter_time_operator(options.atime.as_ref(), get_current_date_epoch_seconds()) 156 | } 157 | 158 | pub fn get_changed_time_operator(&self, options: &Cli) -> Option<(Operator, i64)> { 159 | get_filter_time_operator(options.ctime.as_ref(), get_current_date_epoch_seconds()) 160 | } 161 | } 162 | 163 | fn get_current_date_epoch_seconds() -> i64 { 164 | // calculate current date epoch seconds 165 | let now = Local::now(); 166 | let current_date = now.date_naive(); 167 | 168 | let current_date_time = current_date.and_hms_opt(0, 0, 0).unwrap(); 169 | Local 170 | .from_local_datetime(¤t_date_time) 171 | .unwrap() 172 | .timestamp() 173 | } 174 | 175 | fn get_filter_time_operator( 176 | option_value: Option<&String>, 177 | current_date_epoch_seconds: i64, 178 | ) -> Option<(Operator, i64)> { 179 | match option_value { 180 | Some(val) => { 181 | let time = current_date_epoch_seconds 182 | - val 183 | .parse::() 184 | .unwrap_or_else(|_| panic!("invalid data format")) 185 | .abs() 186 | * DAY_SECONDS; 187 | match val.chars().next().expect("Value should not be empty") { 188 | '+' => Some((Operator::LessThan, time - DAY_SECONDS)), 189 | '-' => Some((Operator::GreaterThan, time)), 190 | _ => Some((Operator::Equal, time - DAY_SECONDS)), 191 | } 192 | } 193 | None => None, 194 | } 195 | } 196 | 197 | fn convert_min_size(input: &str) -> Option { 198 | let re = Regex::new(r"([0-9]+)(\w*)").unwrap(); 199 | 200 | if let Some(cap) = re.captures(input) { 201 | let (_, [digits, letters]) = cap.extract(); 202 | 203 | // Failure to parse should be impossible due to regex match 204 | let digits_as_usize: Option = digits.parse().ok(); 205 | 206 | match digits_as_usize { 207 | Some(parsed_digits) => { 208 | let number_format = get_number_format(&letters.to_lowercase()); 209 | match number_format { 210 | Some((multiple, _)) => Some(parsed_digits * (multiple as usize)), 211 | None => { 212 | if letters.is_empty() { 213 | Some(parsed_digits) 214 | } else { 215 | eprintln!("Ignoring invalid min-size: {input}"); 216 | None 217 | } 218 | } 219 | } 220 | } 221 | None => None, 222 | } 223 | } else { 224 | None 225 | } 226 | } 227 | 228 | fn get_config_locations(base: &Path) -> Vec { 229 | vec![ 230 | base.join(".dust.toml"), 231 | base.join(".config").join("dust").join("config.toml"), 232 | ] 233 | } 234 | 235 | pub fn get_config(conf_path: Option<&String>) -> Config { 236 | match conf_path { 237 | Some(path_str) => { 238 | let path = Path::new(path_str); 239 | if path.exists() { 240 | match Config::from_config_file(path) { 241 | Ok(config) => return config, 242 | Err(e) => { 243 | eprintln!("Ignoring invalid config file '{}': {}", &path.display(), e) 244 | } 245 | } 246 | } else { 247 | eprintln!("Config file {:?} doesn't exists", &path.display()); 248 | } 249 | } 250 | None => { 251 | if let Some(home) = directories::BaseDirs::new() { 252 | for path in get_config_locations(home.home_dir()) { 253 | if path.exists() { 254 | if let Ok(config) = Config::from_config_file(&path) { 255 | return config; 256 | } 257 | } 258 | } 259 | } 260 | } 261 | } 262 | Config { 263 | ..Default::default() 264 | } 265 | } 266 | 267 | #[cfg(test)] 268 | mod tests { 269 | #[allow(unused_imports)] 270 | use super::*; 271 | use chrono::{Datelike, Timelike}; 272 | use clap::Parser; 273 | 274 | #[test] 275 | fn test_get_current_date_epoch_seconds() { 276 | let epoch_seconds = get_current_date_epoch_seconds(); 277 | let dt = Local.timestamp_opt(epoch_seconds, 0).unwrap(); 278 | 279 | assert_eq!(dt.hour(), 0); 280 | assert_eq!(dt.minute(), 0); 281 | assert_eq!(dt.second(), 0); 282 | assert_eq!(dt.date_naive().day(), Local::now().date_naive().day()); 283 | assert_eq!(dt.date_naive().month(), Local::now().date_naive().month()); 284 | assert_eq!(dt.date_naive().year(), Local::now().date_naive().year()); 285 | } 286 | 287 | #[test] 288 | fn test_conversion() { 289 | assert_eq!(convert_min_size("55"), Some(55)); 290 | assert_eq!(convert_min_size("12344321"), Some(12344321)); 291 | assert_eq!(convert_min_size("95RUBBISH"), None); 292 | assert_eq!(convert_min_size("10Ki"), Some(10 * 1024)); 293 | assert_eq!(convert_min_size("10MiB"), Some(10 * 1024usize.pow(2))); 294 | assert_eq!(convert_min_size("10M"), Some(10 * 1024usize.pow(2))); 295 | assert_eq!(convert_min_size("10Mb"), Some(10 * 1000usize.pow(2))); 296 | assert_eq!(convert_min_size("2Gi"), Some(2 * 1024usize.pow(3))); 297 | } 298 | 299 | #[test] 300 | fn test_min_size_from_config_applied_or_overridden() { 301 | let c = Config { 302 | min_size: Some("1KiB".to_owned()), 303 | ..Default::default() 304 | }; 305 | assert_eq!(c._get_min_size(None), Some(1024)); 306 | assert_eq!(c._get_min_size(Some(&"2KiB".into())), Some(2048)); 307 | 308 | assert_eq!(c._get_min_size(Some(&"1kb".into())), Some(1000)); 309 | assert_eq!(c._get_min_size(Some(&"2KB".into())), Some(2000)); 310 | } 311 | 312 | #[test] 313 | fn test_get_depth() { 314 | // No config and no flag. 315 | let c = Config::default(); 316 | let args = get_args(vec![]); 317 | assert_eq!(c.get_depth(&args), usize::MAX); 318 | 319 | // Config is not defined and flag is defined. 320 | let c = Config::default(); 321 | let args = get_args(vec!["dust", "--depth", "5"]); 322 | assert_eq!(c.get_depth(&args), 5); 323 | 324 | // Config is defined and flag is not defined. 325 | let c = Config { 326 | depth: Some(3), 327 | ..Default::default() 328 | }; 329 | let args = get_args(vec![]); 330 | assert_eq!(c.get_depth(&args), 3); 331 | 332 | // Both config and flag are defined. 333 | let c = Config { 334 | depth: Some(3), 335 | ..Default::default() 336 | }; 337 | let args = get_args(vec!["dust", "--depth", "5"]); 338 | assert_eq!(c.get_depth(&args), 5); 339 | } 340 | 341 | fn get_args(args: Vec<&str>) -> Cli { 342 | Cli::parse_from(args) 343 | } 344 | 345 | #[test] 346 | fn test_get_filetime() { 347 | // No config and no flag. 348 | let c = Config::default(); 349 | let args = get_filetime_args(vec!["dust"]); 350 | assert_eq!(c.get_filetime(&args), None); 351 | 352 | // Config is not defined and flag is defined as access time 353 | let c = Config::default(); 354 | let args = get_filetime_args(vec!["dust", "--filetime", "a"]); 355 | assert_eq!(c.get_filetime(&args), Some(FileTime::Accessed)); 356 | 357 | let c = Config::default(); 358 | let args = get_filetime_args(vec!["dust", "--filetime", "accessed"]); 359 | assert_eq!(c.get_filetime(&args), Some(FileTime::Accessed)); 360 | 361 | // Config is not defined and flag is defined as modified time 362 | let c = Config::default(); 363 | let args = get_filetime_args(vec!["dust", "--filetime", "m"]); 364 | assert_eq!(c.get_filetime(&args), Some(FileTime::Modified)); 365 | 366 | let c = Config::default(); 367 | let args = get_filetime_args(vec!["dust", "--filetime", "modified"]); 368 | assert_eq!(c.get_filetime(&args), Some(FileTime::Modified)); 369 | 370 | // Config is not defined and flag is defined as changed time 371 | let c = Config::default(); 372 | let args = get_filetime_args(vec!["dust", "--filetime", "c"]); 373 | assert_eq!(c.get_filetime(&args), Some(FileTime::Changed)); 374 | 375 | let c = Config::default(); 376 | let args = get_filetime_args(vec!["dust", "--filetime", "changed"]); 377 | assert_eq!(c.get_filetime(&args), Some(FileTime::Changed)); 378 | } 379 | 380 | fn get_filetime_args(args: Vec<&str>) -> Cli { 381 | Cli::parse_from(args) 382 | } 383 | } 384 | -------------------------------------------------------------------------------- /src/dir_walker.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::fs; 3 | use std::io::Error; 4 | use std::sync::Arc; 5 | use std::sync::Mutex; 6 | 7 | use crate::node::Node; 8 | use crate::progress::ORDERING; 9 | use crate::progress::Operation; 10 | use crate::progress::PAtomicInfo; 11 | use crate::progress::RuntimeErrors; 12 | use crate::utils::is_filtered_out_due_to_file_time; 13 | use crate::utils::is_filtered_out_due_to_invert_regex; 14 | use crate::utils::is_filtered_out_due_to_regex; 15 | use rayon::iter::ParallelBridge; 16 | use rayon::prelude::ParallelIterator; 17 | use regex::Regex; 18 | use std::path::Path; 19 | use std::path::PathBuf; 20 | 21 | use std::collections::HashSet; 22 | 23 | use crate::node::build_node; 24 | use std::fs::DirEntry; 25 | 26 | use crate::node::FileTime; 27 | use crate::platform::get_metadata; 28 | 29 | #[derive(Debug)] 30 | pub enum Operator { 31 | Equal = 0, 32 | LessThan = 1, 33 | GreaterThan = 2, 34 | } 35 | 36 | pub struct WalkData<'a> { 37 | pub ignore_directories: HashSet, 38 | pub filter_regex: &'a [Regex], 39 | pub invert_filter_regex: &'a [Regex], 40 | pub allowed_filesystems: HashSet, 41 | pub filter_modified_time: Option<(Operator, i64)>, 42 | pub filter_accessed_time: Option<(Operator, i64)>, 43 | pub filter_changed_time: Option<(Operator, i64)>, 44 | pub use_apparent_size: bool, 45 | pub by_filecount: bool, 46 | pub by_filetime: &'a Option, 47 | pub ignore_hidden: bool, 48 | pub follow_links: bool, 49 | pub progress_data: Arc, 50 | pub errors: Arc>, 51 | } 52 | 53 | pub fn walk_it(dirs: HashSet, walk_data: &WalkData) -> Vec { 54 | let mut inodes = HashSet::new(); 55 | let top_level_nodes: Vec<_> = dirs 56 | .into_iter() 57 | .filter_map(|d| { 58 | let prog_data = &walk_data.progress_data; 59 | prog_data.clear_state(&d); 60 | let node = walk(d, walk_data, 0)?; 61 | 62 | prog_data.state.store(Operation::PREPARING, ORDERING); 63 | 64 | clean_inodes(node, &mut inodes, walk_data) 65 | }) 66 | .collect(); 67 | top_level_nodes 68 | } 69 | 70 | // Remove files which have the same inode, we don't want to double count them. 71 | fn clean_inodes(x: Node, inodes: &mut HashSet<(u64, u64)>, walk_data: &WalkData) -> Option { 72 | if !walk_data.use_apparent_size { 73 | if let Some(id) = x.inode_device { 74 | if !inodes.insert(id) { 75 | return None; 76 | } 77 | } 78 | } 79 | 80 | // Sort Nodes so iteration order is predictable 81 | let mut tmp: Vec<_> = x.children; 82 | tmp.sort_by(sort_by_inode); 83 | let new_children: Vec<_> = tmp 84 | .into_iter() 85 | .filter_map(|c| clean_inodes(c, inodes, walk_data)) 86 | .collect(); 87 | 88 | let actual_size = if walk_data.by_filetime.is_some() { 89 | // If by_filetime is Some, directory 'size' is the maximum filetime among child files instead of disk size 90 | new_children 91 | .iter() 92 | .map(|c| c.size) 93 | .chain(std::iter::once(x.size)) 94 | .max() 95 | .unwrap_or(0) 96 | } else { 97 | // If by_filetime is None, directory 'size' is the sum of disk sizes or file counts of child files 98 | x.size + new_children.iter().map(|c| c.size).sum::() 99 | }; 100 | 101 | Some(Node { 102 | name: x.name, 103 | size: actual_size, 104 | children: new_children, 105 | inode_device: x.inode_device, 106 | depth: x.depth, 107 | }) 108 | } 109 | 110 | fn sort_by_inode(a: &Node, b: &Node) -> std::cmp::Ordering { 111 | // Sorting by inode is quicker than by sorting by name/size 112 | match (a.inode_device, b.inode_device) { 113 | (Some(x), Some(y)) => { 114 | if x.0 != y.0 { 115 | x.0.cmp(&y.0) 116 | } else if x.1 != y.1 { 117 | x.1.cmp(&y.1) 118 | } else { 119 | a.name.cmp(&b.name) 120 | } 121 | } 122 | (Some(_), None) => Ordering::Greater, 123 | (None, Some(_)) => Ordering::Less, 124 | (None, None) => a.name.cmp(&b.name), 125 | } 126 | } 127 | 128 | // Check if `path` is inside ignored directory 129 | fn is_ignored_path(path: &Path, walk_data: &WalkData) -> bool { 130 | if walk_data.ignore_directories.contains(path) { 131 | return true; 132 | } 133 | 134 | // Entry is inside an ignored absolute path 135 | // Absolute paths should be canonicalized before being added to `WalkData.ignore_directories` 136 | for ignored_path in walk_data.ignore_directories.iter() { 137 | if !ignored_path.is_absolute() { 138 | continue; 139 | } 140 | let absolute_entry_path = std::fs::canonicalize(path).unwrap_or_default(); 141 | if absolute_entry_path.starts_with(ignored_path) { 142 | return true; 143 | } 144 | } 145 | 146 | false 147 | } 148 | 149 | fn ignore_file(entry: &DirEntry, walk_data: &WalkData) -> bool { 150 | if is_ignored_path(&entry.path(), walk_data) { 151 | return true; 152 | } 153 | 154 | let is_dot_file = entry.file_name().to_str().unwrap_or("").starts_with('.'); 155 | let follow_links = walk_data.follow_links && entry.file_type().is_ok_and(|ft| ft.is_symlink()); 156 | 157 | if !walk_data.allowed_filesystems.is_empty() { 158 | let size_inode_device = get_metadata(entry.path(), false, follow_links); 159 | if let Some((_size, Some((_id, dev)), _gunk)) = size_inode_device { 160 | if !walk_data.allowed_filesystems.contains(&dev) { 161 | return true; 162 | } 163 | } 164 | } 165 | if walk_data.filter_accessed_time.is_some() 166 | || walk_data.filter_modified_time.is_some() 167 | || walk_data.filter_changed_time.is_some() 168 | { 169 | let size_inode_device = get_metadata(entry.path(), false, follow_links); 170 | if let Some((_, _, (modified_time, accessed_time, changed_time))) = size_inode_device { 171 | if entry.path().is_file() 172 | && [ 173 | (&walk_data.filter_modified_time, modified_time), 174 | (&walk_data.filter_accessed_time, accessed_time), 175 | (&walk_data.filter_changed_time, changed_time), 176 | ] 177 | .iter() 178 | .any(|(filter_time, actual_time)| { 179 | is_filtered_out_due_to_file_time(filter_time, *actual_time) 180 | }) 181 | { 182 | return true; 183 | } 184 | } 185 | } 186 | 187 | // Keeping `walk_data.filter_regex.is_empty()` is important for performance reasons, it stops unnecessary work 188 | if !walk_data.filter_regex.is_empty() 189 | && entry.path().is_file() 190 | && is_filtered_out_due_to_regex(walk_data.filter_regex, &entry.path()) 191 | { 192 | return true; 193 | } 194 | 195 | if !walk_data.invert_filter_regex.is_empty() 196 | && entry.path().is_file() 197 | && is_filtered_out_due_to_invert_regex(walk_data.invert_filter_regex, &entry.path()) 198 | { 199 | return true; 200 | } 201 | 202 | is_dot_file && walk_data.ignore_hidden 203 | } 204 | 205 | fn walk(dir: PathBuf, walk_data: &WalkData, depth: usize) -> Option { 206 | let prog_data = &walk_data.progress_data; 207 | let errors = &walk_data.errors; 208 | 209 | let children = if dir.is_dir() { 210 | let read_dir = fs::read_dir(&dir); 211 | match read_dir { 212 | Ok(entries) => { 213 | entries 214 | .into_iter() 215 | .par_bridge() 216 | .filter_map(|entry| { 217 | match entry { 218 | Ok(ref entry) => { 219 | // uncommenting the below line gives simpler code but 220 | // rayon doesn't parallelize as well giving a 3X performance drop 221 | // hence we unravel the recursion a bit 222 | 223 | // return walk(entry.path(), walk_data, depth) 224 | 225 | if !ignore_file(entry, walk_data) { 226 | if let Ok(data) = entry.file_type() { 227 | if data.is_dir() 228 | || (walk_data.follow_links && data.is_symlink()) 229 | { 230 | return walk(entry.path(), walk_data, depth + 1); 231 | } 232 | 233 | let node = build_node( 234 | entry.path(), 235 | vec![], 236 | data.is_symlink(), 237 | data.is_file(), 238 | depth, 239 | walk_data, 240 | ); 241 | 242 | prog_data.num_files.fetch_add(1, ORDERING); 243 | if let Some(ref file) = node { 244 | prog_data 245 | .total_file_size 246 | .fetch_add(file.size, ORDERING); 247 | } 248 | 249 | return node; 250 | } 251 | } 252 | } 253 | Err(ref failed) => { 254 | if handle_error_and_retry(failed, &dir, walk_data) { 255 | return walk(dir.clone(), walk_data, depth); 256 | } 257 | } 258 | } 259 | None 260 | }) 261 | .collect() 262 | } 263 | Err(failed) => { 264 | if handle_error_and_retry(&failed, &dir, walk_data) { 265 | return walk(dir, walk_data, depth); 266 | } else { 267 | vec![] 268 | } 269 | } 270 | } 271 | } else { 272 | if !dir.is_file() { 273 | let mut editable_error = errors.lock().unwrap(); 274 | let bad_file = dir.as_os_str().to_string_lossy().into(); 275 | editable_error.file_not_found.insert(bad_file); 276 | } 277 | vec![] 278 | }; 279 | let is_symlink = if walk_data.follow_links { 280 | match fs::symlink_metadata(&dir) { 281 | Ok(metadata) => metadata.file_type().is_symlink(), 282 | Err(_) => false, 283 | } 284 | } else { 285 | false 286 | }; 287 | build_node(dir, children, is_symlink, false, depth, walk_data) 288 | } 289 | 290 | fn handle_error_and_retry(failed: &Error, dir: &Path, walk_data: &WalkData) -> bool { 291 | let mut editable_error = walk_data.errors.lock().unwrap(); 292 | match failed.kind() { 293 | std::io::ErrorKind::PermissionDenied => { 294 | editable_error 295 | .no_permissions 296 | .insert(dir.to_string_lossy().into()); 297 | } 298 | std::io::ErrorKind::InvalidInput => { 299 | editable_error 300 | .no_permissions 301 | .insert(dir.to_string_lossy().into()); 302 | } 303 | std::io::ErrorKind::NotFound => { 304 | editable_error.file_not_found.insert(failed.to_string()); 305 | } 306 | std::io::ErrorKind::Interrupted => { 307 | editable_error.interrupted_error += 1; 308 | if editable_error.interrupted_error > 3 { 309 | panic!("Multiple Interrupted Errors occurred while scanning filesystem. Aborting"); 310 | } else { 311 | return true; 312 | } 313 | } 314 | _ => { 315 | editable_error.unknown_error.insert(failed.to_string()); 316 | } 317 | } 318 | false 319 | } 320 | 321 | mod tests { 322 | 323 | #[allow(unused_imports)] 324 | use super::*; 325 | 326 | #[cfg(test)] 327 | fn create_node() -> Node { 328 | Node { 329 | name: PathBuf::new(), 330 | size: 10, 331 | children: vec![], 332 | inode_device: Some((5, 6)), 333 | depth: 0, 334 | } 335 | } 336 | 337 | #[cfg(test)] 338 | fn create_walker<'a>(use_apparent_size: bool) -> WalkData<'a> { 339 | use crate::PIndicator; 340 | let indicator = PIndicator::build_me(); 341 | WalkData { 342 | ignore_directories: HashSet::new(), 343 | filter_regex: &[], 344 | invert_filter_regex: &[], 345 | allowed_filesystems: HashSet::new(), 346 | filter_modified_time: Some((Operator::GreaterThan, 0)), 347 | filter_accessed_time: Some((Operator::GreaterThan, 0)), 348 | filter_changed_time: Some((Operator::GreaterThan, 0)), 349 | use_apparent_size, 350 | by_filecount: false, 351 | by_filetime: &None, 352 | ignore_hidden: false, 353 | follow_links: false, 354 | progress_data: indicator.data.clone(), 355 | errors: Arc::new(Mutex::new(RuntimeErrors::default())), 356 | } 357 | } 358 | 359 | #[test] 360 | #[allow(clippy::redundant_clone)] 361 | fn test_should_ignore_file() { 362 | let mut inodes = HashSet::new(); 363 | let n = create_node(); 364 | let walkdata = create_walker(false); 365 | 366 | // First time we insert the node 367 | assert_eq!( 368 | clean_inodes(n.clone(), &mut inodes, &walkdata), 369 | Some(n.clone()) 370 | ); 371 | 372 | // Second time is a duplicate - we ignore it 373 | assert_eq!(clean_inodes(n.clone(), &mut inodes, &walkdata), None); 374 | } 375 | 376 | #[test] 377 | #[allow(clippy::redundant_clone)] 378 | fn test_should_not_ignore_files_if_using_apparent_size() { 379 | let mut inodes = HashSet::new(); 380 | let n = create_node(); 381 | let walkdata = create_walker(true); 382 | 383 | // If using apparent size we include Nodes, even if duplicate inodes 384 | assert_eq!( 385 | clean_inodes(n.clone(), &mut inodes, &walkdata), 386 | Some(n.clone()) 387 | ); 388 | assert_eq!( 389 | clean_inodes(n.clone(), &mut inodes, &walkdata), 390 | Some(n.clone()) 391 | ); 392 | } 393 | 394 | #[test] 395 | fn test_total_ordering_of_sort_by_inode() { 396 | use std::str::FromStr; 397 | 398 | let a = Node { 399 | name: PathBuf::from_str("a").unwrap(), 400 | size: 0, 401 | children: vec![], 402 | inode_device: Some((3, 66310)), 403 | depth: 0, 404 | }; 405 | 406 | let b = Node { 407 | name: PathBuf::from_str("b").unwrap(), 408 | size: 0, 409 | children: vec![], 410 | inode_device: None, 411 | depth: 0, 412 | }; 413 | 414 | let c = Node { 415 | name: PathBuf::from_str("c").unwrap(), 416 | size: 0, 417 | children: vec![], 418 | inode_device: Some((1, 66310)), 419 | depth: 0, 420 | }; 421 | 422 | assert_eq!(sort_by_inode(&a, &b), Ordering::Greater); 423 | assert_eq!(sort_by_inode(&a, &c), Ordering::Greater); 424 | assert_eq!(sort_by_inode(&c, &b), Ordering::Greater); 425 | 426 | assert_eq!(sort_by_inode(&b, &a), Ordering::Less); 427 | assert_eq!(sort_by_inode(&c, &a), Ordering::Less); 428 | assert_eq!(sort_by_inode(&b, &c), Ordering::Less); 429 | } 430 | } 431 | -------------------------------------------------------------------------------- /src/display_node.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::path::PathBuf; 3 | 4 | use serde::ser::SerializeStruct; 5 | use serde::{Serialize, Serializer}; 6 | 7 | use crate::display::human_readable_number; 8 | 9 | #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)] 10 | pub struct DisplayNode { 11 | // Note: the order of fields in important here, for PartialEq and PartialOrd 12 | pub size: u64, 13 | pub name: PathBuf, 14 | pub children: Vec, 15 | } 16 | 17 | impl DisplayNode { 18 | pub fn num_siblings(&self) -> u64 { 19 | self.children.len() as u64 20 | } 21 | 22 | pub fn get_children_from_node(&self, is_reversed: bool) -> impl Iterator { 23 | // we box to avoid the clippy lint warning 24 | let out: Box> = if is_reversed { 25 | Box::new(self.children.iter().rev()) 26 | } else { 27 | Box::new(self.children.iter()) 28 | }; 29 | out 30 | } 31 | } 32 | 33 | // Only used for -j 'json' flag combined with -o 'output_type' flag 34 | // Used to pass the output_type into the custom Serde serializer 35 | thread_local! { 36 | pub static OUTPUT_TYPE: RefCell = const { RefCell::new(String::new()) }; 37 | } 38 | 39 | /* 40 | We need the custom Serialize incase someone uses the -o flag to pass a custom output type in 41 | (show size in Mb / Gb etc). 42 | Sadly this also necessitates a global variable OUTPUT_TYPE as we can not pass the output_type flag 43 | into the serialize method 44 | */ 45 | impl Serialize for DisplayNode { 46 | fn serialize(&self, serializer: S) -> Result 47 | where 48 | S: Serializer, 49 | { 50 | let readable_size = OUTPUT_TYPE 51 | .with(|output_type| human_readable_number(self.size, output_type.borrow().as_str())); 52 | let mut state = serializer.serialize_struct("DisplayNode", 2)?; 53 | state.serialize_field("size", &(readable_size))?; 54 | state.serialize_field("name", &self.name)?; 55 | state.serialize_field("children", &self.children)?; 56 | state.end() 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/filter.rs: -------------------------------------------------------------------------------- 1 | use stfu8::encode_u8; 2 | 3 | use crate::display::get_printable_name; 4 | use crate::display_node::DisplayNode; 5 | use crate::node::FileTime; 6 | use crate::node::Node; 7 | use std::collections::BinaryHeap; 8 | use std::collections::HashMap; 9 | use std::collections::HashSet; 10 | use std::path::Path; 11 | use std::path::PathBuf; 12 | 13 | pub struct AggregateData { 14 | pub min_size: Option, 15 | pub only_dir: bool, 16 | pub only_file: bool, 17 | pub number_of_lines: usize, 18 | pub depth: usize, 19 | pub using_a_filter: bool, 20 | pub short_paths: bool, 21 | } 22 | 23 | pub fn get_biggest( 24 | top_level_nodes: Vec, 25 | display_data: AggregateData, 26 | by_filetime: &Option, 27 | keep_collapsed: HashSet, 28 | ) -> Option { 29 | if top_level_nodes.is_empty() { 30 | // perhaps change this, bring back Error object? 31 | return None; 32 | } 33 | let mut heap = BinaryHeap::new(); 34 | let number_top_level_nodes = top_level_nodes.len(); 35 | let root; 36 | 37 | if number_top_level_nodes > 1 { 38 | let size = if by_filetime.is_some() { 39 | top_level_nodes 40 | .iter() 41 | .map(|node| node.size) 42 | .max() 43 | .unwrap_or(0) 44 | } else { 45 | top_level_nodes.iter().map(|node| node.size).sum() 46 | }; 47 | 48 | let nodes = handle_duplicate_top_level_names(top_level_nodes, display_data.short_paths); 49 | 50 | root = Node { 51 | name: PathBuf::from("(total)"), 52 | size, 53 | children: nodes, 54 | inode_device: None, 55 | depth: 0, 56 | }; 57 | 58 | // Always include the base nodes if we add a 'parent' (total) node 59 | heap = always_add_children(&display_data, &root, heap); 60 | } else { 61 | root = top_level_nodes.into_iter().next().unwrap(); 62 | heap = add_children(&display_data, &root, heap); 63 | } 64 | 65 | Some(fill_remaining_lines( 66 | heap, 67 | &root, 68 | display_data, 69 | keep_collapsed, 70 | )) 71 | } 72 | 73 | pub fn fill_remaining_lines<'a>( 74 | mut heap: BinaryHeap<&'a Node>, 75 | root: &'a Node, 76 | display_data: AggregateData, 77 | keep_collapsed: HashSet, 78 | ) -> DisplayNode { 79 | let mut allowed_nodes = HashMap::new(); 80 | 81 | while allowed_nodes.len() < display_data.number_of_lines { 82 | let line = heap.pop(); 83 | match line { 84 | Some(line) => { 85 | // If we are not doing only_file OR if we are doing 86 | // only_file and it has no children (ie is a file not a dir) 87 | if !display_data.only_file || line.children.is_empty() { 88 | allowed_nodes.insert(line.name.as_path(), line); 89 | } 90 | if !keep_collapsed.contains(&line.name) { 91 | heap = add_children(&display_data, line, heap); 92 | } 93 | } 94 | None => break, 95 | } 96 | } 97 | 98 | if display_data.only_file { 99 | flat_rebuilder(allowed_nodes, root) 100 | } else { 101 | recursive_rebuilder(&allowed_nodes, root) 102 | } 103 | } 104 | 105 | fn add_children<'a>( 106 | display_data: &AggregateData, 107 | file_or_folder: &'a Node, 108 | heap: BinaryHeap<&'a Node>, 109 | ) -> BinaryHeap<&'a Node> { 110 | if display_data.depth > file_or_folder.depth { 111 | always_add_children(display_data, file_or_folder, heap) 112 | } else { 113 | heap 114 | } 115 | } 116 | 117 | fn always_add_children<'a>( 118 | display_data: &AggregateData, 119 | file_or_folder: &'a Node, 120 | mut heap: BinaryHeap<&'a Node>, 121 | ) -> BinaryHeap<&'a Node> { 122 | heap.extend( 123 | file_or_folder 124 | .children 125 | .iter() 126 | .filter(|c| match display_data.min_size { 127 | Some(ms) => c.size > ms as u64, 128 | None => !display_data.using_a_filter || c.name.is_file() || c.size > 0, 129 | }) 130 | .filter(|c| { 131 | if display_data.only_dir { 132 | c.name.is_dir() 133 | } else { 134 | true 135 | } 136 | }), 137 | ); 138 | heap 139 | } 140 | 141 | // Finds children of current, if in allowed_nodes adds them as children to new DisplayNode 142 | fn recursive_rebuilder(allowed_nodes: &HashMap<&Path, &Node>, current: &Node) -> DisplayNode { 143 | let new_children: Vec<_> = current 144 | .children 145 | .iter() 146 | .filter(|c| allowed_nodes.contains_key(c.name.as_path())) 147 | .map(|c| recursive_rebuilder(allowed_nodes, c)) 148 | .collect(); 149 | 150 | build_display_node(new_children, current) 151 | } 152 | 153 | // Applies all allowed nodes as children to current node 154 | fn flat_rebuilder(allowed_nodes: HashMap<&Path, &Node>, current: &Node) -> DisplayNode { 155 | let new_children: Vec = allowed_nodes 156 | .into_values() 157 | .map(|v| DisplayNode { 158 | name: v.name.clone(), 159 | size: v.size, 160 | children: vec![], 161 | }) 162 | .collect::>(); 163 | build_display_node(new_children, current) 164 | } 165 | 166 | fn build_display_node(mut new_children: Vec, current: &Node) -> DisplayNode { 167 | new_children.sort_by(|lhs, rhs| lhs.cmp(rhs).reverse()); 168 | DisplayNode { 169 | name: current.name.clone(), 170 | size: current.size, 171 | children: new_children, 172 | } 173 | } 174 | 175 | fn names_have_dup(top_level_nodes: &Vec) -> bool { 176 | let mut stored = HashSet::new(); 177 | for node in top_level_nodes { 178 | let name = get_printable_name(&node.name, true); 179 | if stored.contains(&name) { 180 | return true; 181 | } 182 | stored.insert(name); 183 | } 184 | false 185 | } 186 | 187 | fn handle_duplicate_top_level_names(top_level_nodes: Vec, short_paths: bool) -> Vec { 188 | // If we have top level names that are the same - we need to tweak them: 189 | if short_paths && names_have_dup(&top_level_nodes) { 190 | let mut new_top_nodes = top_level_nodes.clone(); 191 | let mut dir_walk_up_count = 0; 192 | 193 | while names_have_dup(&new_top_nodes) && dir_walk_up_count < 10 { 194 | dir_walk_up_count += 1; 195 | let mut newer = vec![]; 196 | 197 | for node in new_top_nodes.iter() { 198 | let mut folders = node.name.iter().rev(); 199 | // Get parent folder (if second time round get grandparent and so on) 200 | for _ in 0..dir_walk_up_count { 201 | folders.next(); 202 | } 203 | match folders.next() { 204 | // Add (parent_name) to path of Node 205 | Some(data) => { 206 | let parent = encode_u8(data.as_encoded_bytes()); 207 | let current_node = node.name.display(); 208 | let n = Node { 209 | name: PathBuf::from(format!("{current_node}({parent})")), 210 | size: node.size, 211 | children: node.children.clone(), 212 | inode_device: node.inode_device, 213 | depth: node.depth, 214 | }; 215 | newer.push(n) 216 | } 217 | // Node does not have a parent 218 | None => newer.push(node.clone()), 219 | } 220 | } 221 | new_top_nodes = newer; 222 | } 223 | new_top_nodes 224 | } else { 225 | top_level_nodes 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /src/filter_type.rs: -------------------------------------------------------------------------------- 1 | use crate::display_node::DisplayNode; 2 | use crate::node::FileTime; 3 | use crate::node::Node; 4 | use std::collections::HashMap; 5 | use std::ffi::OsStr; 6 | use std::path::PathBuf; 7 | 8 | #[derive(PartialEq, Eq, PartialOrd, Ord)] 9 | struct ExtensionNode<'a> { 10 | size: u64, 11 | extension: Option<&'a OsStr>, 12 | } 13 | 14 | pub fn get_all_file_types( 15 | top_level_nodes: &[Node], 16 | n: usize, 17 | by_filetime: &Option, 18 | ) -> Option { 19 | let ext_nodes = { 20 | let mut extension_cumulative_sizes = HashMap::new(); 21 | build_by_all_file_types(top_level_nodes, &mut extension_cumulative_sizes); 22 | 23 | let mut extension_cumulative_sizes: Vec> = extension_cumulative_sizes 24 | .iter() 25 | .map(|(&extension, &size)| ExtensionNode { extension, size }) 26 | .collect(); 27 | 28 | extension_cumulative_sizes.sort_by(|lhs, rhs| lhs.cmp(rhs).reverse()); 29 | 30 | extension_cumulative_sizes 31 | }; 32 | 33 | let mut ext_nodes_iter = ext_nodes.iter(); 34 | 35 | // First, collect the first N - 1 nodes... 36 | let mut displayed: Vec = ext_nodes_iter 37 | .by_ref() 38 | .take(if n > 1 { n - 1 } else { 1 }) 39 | .map(|node| DisplayNode { 40 | name: PathBuf::from( 41 | node.extension 42 | .map(|ext| format!(".{}", ext.to_string_lossy())) 43 | .unwrap_or_else(|| "(no extension)".to_owned()), 44 | ), 45 | size: node.size, 46 | children: vec![], 47 | }) 48 | .collect(); 49 | 50 | // ...then, aggregate the remaining nodes (if any) into a single "(others)" node 51 | if ext_nodes_iter.len() > 0 { 52 | let actual_size = if by_filetime.is_some() { 53 | ext_nodes_iter.map(|node| node.size).max().unwrap_or(0) 54 | } else { 55 | ext_nodes_iter.map(|node| node.size).sum() 56 | }; 57 | displayed.push(DisplayNode { 58 | name: PathBuf::from("(others)"), 59 | size: actual_size, 60 | children: vec![], 61 | }); 62 | } 63 | 64 | let actual_size: u64 = if by_filetime.is_some() { 65 | displayed.iter().map(|node| node.size).max().unwrap_or(0) 66 | } else { 67 | displayed.iter().map(|node| node.size).sum() 68 | }; 69 | 70 | let result = DisplayNode { 71 | name: PathBuf::from("(total)"), 72 | size: actual_size, 73 | children: displayed, 74 | }; 75 | 76 | Some(result) 77 | } 78 | 79 | fn build_by_all_file_types<'a>( 80 | top_level_nodes: &'a [Node], 81 | counter: &mut HashMap, u64>, 82 | ) { 83 | for node in top_level_nodes { 84 | if node.name.is_file() { 85 | let ext = node.name.extension(); 86 | let cumulative_size = counter.entry(ext).or_default(); 87 | *cumulative_size += node.size; 88 | } 89 | build_by_all_file_types(&node.children, counter) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | mod cli; 2 | mod config; 3 | mod dir_walker; 4 | mod display; 5 | mod display_node; 6 | mod filter; 7 | mod filter_type; 8 | mod node; 9 | mod platform; 10 | mod progress; 11 | mod utils; 12 | 13 | use crate::cli::Cli; 14 | use crate::progress::RuntimeErrors; 15 | use clap::Parser; 16 | use dir_walker::WalkData; 17 | use display::InitialDisplayData; 18 | use filter::AggregateData; 19 | use progress::PIndicator; 20 | use regex::Error; 21 | use std::collections::HashSet; 22 | use std::env; 23 | use std::fs::read_to_string; 24 | use std::io; 25 | use std::panic; 26 | use std::process; 27 | use std::sync::Arc; 28 | use std::sync::Mutex; 29 | use sysinfo::{System, SystemExt}; 30 | use utils::canonicalize_absolute_path; 31 | 32 | use self::display::draw_it; 33 | use config::get_config; 34 | use dir_walker::walk_it; 35 | use display_node::OUTPUT_TYPE; 36 | use filter::get_biggest; 37 | use filter_type::get_all_file_types; 38 | use regex::Regex; 39 | use std::cmp::max; 40 | use std::path::PathBuf; 41 | use terminal_size::{Height, Width, terminal_size}; 42 | use utils::get_filesystem_devices; 43 | use utils::simplify_dir_names; 44 | 45 | static DEFAULT_NUMBER_OF_LINES: usize = 30; 46 | static DEFAULT_TERMINAL_WIDTH: usize = 80; 47 | 48 | fn should_init_color(no_color: bool, force_color: bool) -> bool { 49 | if force_color { 50 | return true; 51 | } 52 | if no_color { 53 | return false; 54 | } 55 | // check if NO_COLOR is set 56 | // https://no-color.org/ 57 | if env::var_os("NO_COLOR").is_some() { 58 | return false; 59 | } 60 | if terminal_size().is_none() { 61 | // we are not in a terminal, color may not be needed 62 | return false; 63 | } 64 | // we are in a terminal 65 | #[cfg(windows)] 66 | { 67 | // Required for windows 10 68 | // Fails to resolve for windows 8 so disable color 69 | match ansi_term::enable_ansi_support() { 70 | Ok(_) => true, 71 | Err(_) => { 72 | eprintln!("This version of Windows does not support ANSI colors"); 73 | false 74 | } 75 | } 76 | } 77 | #[cfg(not(windows))] 78 | { 79 | true 80 | } 81 | } 82 | 83 | fn get_height_of_terminal() -> usize { 84 | terminal_size() 85 | // Windows CI runners detect a terminal height of 0 86 | .map(|(_, Height(h))| max(h.into(), DEFAULT_NUMBER_OF_LINES)) 87 | .unwrap_or(DEFAULT_NUMBER_OF_LINES) 88 | - 10 89 | } 90 | 91 | fn get_width_of_terminal() -> usize { 92 | terminal_size() 93 | .map(|(Width(w), _)| match cfg!(windows) { 94 | // Windows CI runners detect a very low terminal width 95 | true => max(w.into(), DEFAULT_TERMINAL_WIDTH), 96 | false => w.into(), 97 | }) 98 | .unwrap_or(DEFAULT_TERMINAL_WIDTH) 99 | } 100 | 101 | fn get_regex_value(maybe_value: Option<&Vec>) -> Vec { 102 | maybe_value 103 | .unwrap_or(&Vec::new()) 104 | .iter() 105 | .map(|reg| { 106 | Regex::new(reg).unwrap_or_else(|err| { 107 | eprintln!("Ignoring bad value for regex {err:?}"); 108 | process::exit(1) 109 | }) 110 | }) 111 | .collect() 112 | } 113 | 114 | fn main() { 115 | let options = Cli::parse(); 116 | let config = get_config(options.config.as_ref()); 117 | 118 | let errors = RuntimeErrors::default(); 119 | let error_listen_for_ctrlc = Arc::new(Mutex::new(errors)); 120 | let errors_for_rayon = error_listen_for_ctrlc.clone(); 121 | 122 | ctrlc::set_handler(move || { 123 | println!("\nAborting"); 124 | process::exit(1); 125 | }) 126 | .expect("Error setting Ctrl-C handler"); 127 | 128 | let target_dirs = match config.get_files_from(&options) { 129 | Some(path) => { 130 | if path == "-" { 131 | let mut targets_to_add = io::stdin() 132 | .lines() 133 | .map_while(Result::ok) 134 | .collect::>(); 135 | 136 | if targets_to_add.is_empty() { 137 | eprintln!("No input provided, defaulting to current directory"); 138 | targets_to_add.push(".".to_owned()); 139 | } 140 | targets_to_add 141 | } else { 142 | // read file 143 | match read_to_string(path) { 144 | Ok(file_content) => file_content.lines().map(|x| x.to_string()).collect(), 145 | Err(e) => { 146 | eprintln!("Error reading file: {e}"); 147 | vec![".".to_owned()] 148 | } 149 | } 150 | } 151 | } 152 | None => match options.params { 153 | Some(ref values) => values.clone(), 154 | None => vec![".".to_owned()], 155 | }, 156 | }; 157 | 158 | let summarize_file_types = options.file_types; 159 | 160 | let filter_regexs = get_regex_value(options.filter.as_ref()); 161 | let invert_filter_regexs = get_regex_value(options.invert_filter.as_ref()); 162 | 163 | let terminal_width: usize = match options.terminal_width { 164 | Some(val) => val, 165 | None => get_width_of_terminal(), 166 | }; 167 | 168 | let depth = config.get_depth(&options); 169 | 170 | // If depth is set, then we set the default number_of_lines to be max 171 | // instead of screen height 172 | 173 | let number_of_lines = match options.number_of_lines { 174 | Some(val) => val, 175 | None => { 176 | if depth != usize::MAX { 177 | usize::MAX 178 | } else { 179 | get_height_of_terminal() 180 | } 181 | } 182 | }; 183 | 184 | let is_colors = should_init_color( 185 | config.get_no_colors(&options), 186 | config.get_force_colors(&options), 187 | ); 188 | 189 | let ignore_directories = match options.ignore_directory { 190 | Some(ref values) => values 191 | .iter() 192 | .map(PathBuf::from) 193 | .map(canonicalize_absolute_path) 194 | .collect::>(), 195 | None => vec![], 196 | }; 197 | 198 | let ignore_from_file_result = match options.ignore_all_in_file { 199 | Some(ref val) => read_to_string(val) 200 | .unwrap() 201 | .lines() 202 | .map(Regex::new) 203 | .collect::>>(), 204 | None => vec![], 205 | }; 206 | let ignore_from_file = ignore_from_file_result 207 | .into_iter() 208 | .filter_map(|x| x.ok()) 209 | .collect::>(); 210 | 211 | let invert_filter_regexs = invert_filter_regexs 212 | .into_iter() 213 | .chain(ignore_from_file) 214 | .collect::>(); 215 | 216 | let by_filecount = options.filecount; 217 | let by_filetime = config.get_filetime(&options); 218 | let limit_filesystem = options.limit_filesystem; 219 | let follow_links = options.dereference_links; 220 | 221 | let allowed_filesystems = limit_filesystem 222 | .then(|| get_filesystem_devices(&target_dirs, follow_links)) 223 | .unwrap_or_default(); 224 | let simplified_dirs = simplify_dir_names(&target_dirs); 225 | 226 | let ignored_full_path: HashSet = ignore_directories 227 | .into_iter() 228 | .flat_map(|x| simplified_dirs.iter().map(move |d| d.join(&x))) 229 | .collect(); 230 | 231 | let output_format = config.get_output_format(&options); 232 | 233 | let ignore_hidden = config.get_ignore_hidden(&options); 234 | 235 | let mut indicator = PIndicator::build_me(); 236 | if !config.get_disable_progress(&options) { 237 | indicator.spawn(output_format.clone()) 238 | } 239 | 240 | let keep_collapsed: HashSet = match options.collapse { 241 | Some(ref collapse) => { 242 | let mut combined_dirs = HashSet::new(); 243 | for collapse_dir in collapse { 244 | for target_dir in target_dirs.iter() { 245 | combined_dirs.insert(PathBuf::from(target_dir).join(collapse_dir)); 246 | } 247 | } 248 | combined_dirs 249 | } 250 | None => HashSet::new(), 251 | }; 252 | 253 | let filter_modified_time = config.get_modified_time_operator(&options); 254 | let filter_accessed_time = config.get_accessed_time_operator(&options); 255 | let filter_changed_time = config.get_changed_time_operator(&options); 256 | 257 | let walk_data = WalkData { 258 | ignore_directories: ignored_full_path, 259 | filter_regex: &filter_regexs, 260 | invert_filter_regex: &invert_filter_regexs, 261 | allowed_filesystems, 262 | filter_modified_time, 263 | filter_accessed_time, 264 | filter_changed_time, 265 | use_apparent_size: config.get_apparent_size(&options), 266 | by_filecount, 267 | by_filetime: &by_filetime, 268 | ignore_hidden, 269 | follow_links, 270 | progress_data: indicator.data.clone(), 271 | errors: errors_for_rayon, 272 | }; 273 | let threads_to_use = config.get_threads(&options); 274 | let stack_size = config.get_custom_stack_size(&options); 275 | init_rayon(&stack_size, &threads_to_use); 276 | 277 | let top_level_nodes = walk_it(simplified_dirs, &walk_data); 278 | 279 | let tree = match summarize_file_types { 280 | true => get_all_file_types(&top_level_nodes, number_of_lines, &by_filetime), 281 | false => { 282 | let agg_data = AggregateData { 283 | min_size: config.get_min_size(&options), 284 | only_dir: config.get_only_dir(&options), 285 | only_file: config.get_only_file(&options), 286 | number_of_lines, 287 | depth, 288 | using_a_filter: !filter_regexs.is_empty() || !invert_filter_regexs.is_empty(), 289 | short_paths: !config.get_full_paths(&options), 290 | }; 291 | get_biggest(top_level_nodes, agg_data, &by_filetime, keep_collapsed) 292 | } 293 | }; 294 | 295 | // Must have stopped indicator before we print to stderr 296 | indicator.stop(); 297 | 298 | let print_errors = config.get_print_errors(&options); 299 | print_any_errors(print_errors, walk_data.errors); 300 | 301 | if let Some(root_node) = tree { 302 | if config.get_output_json(&options) { 303 | OUTPUT_TYPE.with(|wrapped| { 304 | wrapped.replace(output_format); 305 | }); 306 | println!("{}", serde_json::to_string(&root_node).unwrap()); 307 | } else { 308 | let idd = InitialDisplayData { 309 | short_paths: !config.get_full_paths(&options), 310 | is_reversed: !config.get_reverse(&options), 311 | colors_on: is_colors, 312 | by_filecount, 313 | by_filetime, 314 | is_screen_reader: config.get_screen_reader(&options), 315 | output_format, 316 | bars_on_right: config.get_bars_on_right(&options), 317 | }; 318 | 319 | draw_it( 320 | idd, 321 | config.get_no_bars(&options), 322 | terminal_width, 323 | &root_node, 324 | config.get_skip_total(&options), 325 | ) 326 | } 327 | } 328 | } 329 | 330 | fn print_any_errors(print_errors: bool, errors: Arc>) { 331 | let final_errors = errors.lock().unwrap(); 332 | if !final_errors.file_not_found.is_empty() { 333 | let err = final_errors 334 | .file_not_found 335 | .iter() 336 | .map(|a| a.as_ref()) 337 | .collect::>() 338 | .join(", "); 339 | eprintln!("No such file or directory: {}", err); 340 | } 341 | if !final_errors.no_permissions.is_empty() { 342 | if print_errors { 343 | let err = final_errors 344 | .no_permissions 345 | .iter() 346 | .map(|a| a.as_ref()) 347 | .collect::>() 348 | .join(", "); 349 | eprintln!("Did not have permissions for directories: {}", err); 350 | } else { 351 | eprintln!( 352 | "Did not have permissions for all directories (add --print-errors to see errors)" 353 | ); 354 | } 355 | } 356 | if !final_errors.unknown_error.is_empty() { 357 | let err = final_errors 358 | .unknown_error 359 | .iter() 360 | .map(|a| a.as_ref()) 361 | .collect::>() 362 | .join(", "); 363 | eprintln!("Unknown Error: {}", err); 364 | } 365 | } 366 | 367 | fn init_rayon(stack_size: &Option, threads: &Option) { 368 | // Rayon seems to raise this error on 32-bit builds 369 | // The global thread pool has not been initialized.: ThreadPoolBuildError { kind: GlobalPoolAlreadyInitialized } 370 | if cfg!(target_pointer_width = "64") { 371 | let result = panic::catch_unwind(|| build_thread_pool(*stack_size, *threads)); 372 | if result.is_err() { 373 | eprintln!("Problem initializing rayon, try: export RAYON_NUM_THREADS=1") 374 | } 375 | } 376 | } 377 | 378 | fn build_thread_pool( 379 | stack: Option, 380 | threads: Option, 381 | ) -> Result<(), rayon::ThreadPoolBuildError> { 382 | let mut pool = rayon::ThreadPoolBuilder::new(); 383 | 384 | if let Some(thread_count) = threads { 385 | pool = pool.num_threads(thread_count); 386 | } 387 | 388 | let stack_size = match stack { 389 | Some(s) => Some(s), 390 | None => { 391 | let large_stack = usize::pow(1024, 3); 392 | let mut s = System::new(); 393 | s.refresh_memory(); 394 | // Larger stack size if possible to handle cases with lots of nested directories 395 | let available = s.available_memory(); 396 | if available > large_stack.try_into().unwrap() { 397 | Some(large_stack) 398 | } else { 399 | None 400 | } 401 | } 402 | }; 403 | if let Some(stack_size_param) = stack_size { 404 | pool = pool.stack_size(stack_size_param); 405 | } 406 | pool.build_global() 407 | } 408 | -------------------------------------------------------------------------------- /src/node.rs: -------------------------------------------------------------------------------- 1 | use crate::dir_walker::WalkData; 2 | use crate::platform::get_metadata; 3 | use crate::utils::is_filtered_out_due_to_file_time; 4 | use crate::utils::is_filtered_out_due_to_invert_regex; 5 | use crate::utils::is_filtered_out_due_to_regex; 6 | 7 | use std::cmp::Ordering; 8 | use std::path::PathBuf; 9 | 10 | #[derive(Debug, Eq, Clone)] 11 | pub struct Node { 12 | pub name: PathBuf, 13 | pub size: u64, 14 | pub children: Vec, 15 | pub inode_device: Option<(u64, u64)>, 16 | pub depth: usize, 17 | } 18 | 19 | #[derive(Debug, PartialEq)] 20 | pub enum FileTime { 21 | Modified, 22 | Accessed, 23 | Changed, 24 | } 25 | 26 | impl From for FileTime { 27 | fn from(time: crate::cli::FileTime) -> Self { 28 | match time { 29 | crate::cli::FileTime::Modified => Self::Modified, 30 | crate::cli::FileTime::Accessed => Self::Accessed, 31 | crate::cli::FileTime::Changed => Self::Changed, 32 | } 33 | } 34 | } 35 | 36 | #[allow(clippy::too_many_arguments)] 37 | pub fn build_node( 38 | dir: PathBuf, 39 | children: Vec, 40 | is_symlink: bool, 41 | is_file: bool, 42 | depth: usize, 43 | walk_data: &WalkData, 44 | ) -> Option { 45 | let use_apparent_size = walk_data.use_apparent_size; 46 | let by_filecount = walk_data.by_filecount; 47 | let by_filetime = &walk_data.by_filetime; 48 | 49 | get_metadata( 50 | &dir, 51 | use_apparent_size, 52 | walk_data.follow_links && is_symlink, 53 | ) 54 | .map(|data| { 55 | let inode_device = data.1; 56 | 57 | let size = if is_filtered_out_due_to_regex(walk_data.filter_regex, &dir) 58 | || is_filtered_out_due_to_invert_regex(walk_data.invert_filter_regex, &dir) 59 | || by_filecount && !is_file 60 | || [ 61 | (&walk_data.filter_modified_time, data.2.0), 62 | (&walk_data.filter_accessed_time, data.2.1), 63 | (&walk_data.filter_changed_time, data.2.2), 64 | ] 65 | .iter() 66 | .any(|(filter_time, actual_time)| { 67 | is_filtered_out_due_to_file_time(filter_time, *actual_time) 68 | }) { 69 | 0 70 | } else if by_filecount { 71 | 1 72 | } else if by_filetime.is_some() { 73 | match by_filetime { 74 | Some(FileTime::Modified) => data.2.0.unsigned_abs(), 75 | Some(FileTime::Accessed) => data.2.1.unsigned_abs(), 76 | Some(FileTime::Changed) => data.2.2.unsigned_abs(), 77 | None => unreachable!(), 78 | } 79 | } else { 80 | data.0 81 | }; 82 | 83 | Node { 84 | name: dir, 85 | size, 86 | children, 87 | inode_device, 88 | depth, 89 | } 90 | }) 91 | } 92 | 93 | impl PartialEq for Node { 94 | fn eq(&self, other: &Self) -> bool { 95 | self.name == other.name && self.size == other.size && self.children == other.children 96 | } 97 | } 98 | 99 | impl Ord for Node { 100 | fn cmp(&self, other: &Self) -> Ordering { 101 | self.size 102 | .cmp(&other.size) 103 | .then_with(|| self.name.cmp(&other.name)) 104 | .then_with(|| self.children.cmp(&other.children)) 105 | } 106 | } 107 | 108 | impl PartialOrd for Node { 109 | fn partial_cmp(&self, other: &Self) -> Option { 110 | Some(self.cmp(other)) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /src/platform.rs: -------------------------------------------------------------------------------- 1 | #[allow(unused_imports)] 2 | use std::fs; 3 | 4 | use std::path::Path; 5 | 6 | #[cfg(target_family = "unix")] 7 | fn get_block_size() -> u64 { 8 | // All os specific implementations of MetadataExt seem to define a block as 512 bytes 9 | // https://doc.rust-lang.org/std/os/linux/fs/trait.MetadataExt.html#tymethod.st_blocks 10 | 512 11 | } 12 | 13 | type InodeAndDevice = (u64, u64); 14 | type FileTime = (i64, i64, i64); 15 | 16 | #[cfg(target_family = "unix")] 17 | pub fn get_metadata>( 18 | path: P, 19 | use_apparent_size: bool, 20 | follow_links: bool, 21 | ) -> Option<(u64, Option, FileTime)> { 22 | use std::os::unix::fs::MetadataExt; 23 | let metadata = if follow_links { 24 | path.as_ref().metadata() 25 | } else { 26 | path.as_ref().symlink_metadata() 27 | }; 28 | match metadata { 29 | Ok(md) => { 30 | let file_size = md.len(); 31 | if use_apparent_size { 32 | Some(( 33 | file_size, 34 | Some((md.ino(), md.dev())), 35 | (md.mtime(), md.atime(), md.ctime()), 36 | )) 37 | } else { 38 | // On NTFS mounts, the reported block count can be unexpectedly large. 39 | // To avoid overestimating disk usage, cap the allocated size to what the 40 | // file should occupy based on the file system I/O block size (blksize). 41 | // Related: https://github.com/bootandy/dust/issues/295 42 | let blksize = md.blksize(); 43 | let target_size = file_size.div_ceil(blksize) * blksize; 44 | let reported_size = md.blocks() * get_block_size(); 45 | 46 | // File systems can pre-allocate more space for a file than what would be necessary 47 | let pre_allocation_buffer = blksize * 65536; 48 | let max_size = target_size + pre_allocation_buffer; 49 | let allocated_size = if reported_size > max_size { 50 | target_size 51 | } else { 52 | reported_size 53 | }; 54 | Some(( 55 | allocated_size, 56 | Some((md.ino(), md.dev())), 57 | (md.mtime(), md.atime(), md.ctime()), 58 | )) 59 | } 60 | } 61 | Err(_e) => None, 62 | } 63 | } 64 | 65 | #[cfg(target_family = "windows")] 66 | pub fn get_metadata>( 67 | path: P, 68 | use_apparent_size: bool, 69 | follow_links: bool, 70 | ) -> Option<(u64, Option, FileTime)> { 71 | // On windows opening the file to get size, file ID and volume can be very 72 | // expensive because 1) it causes a few system calls, and more importantly 2) it can cause 73 | // windows defender to scan the file. 74 | // Therefore we try to avoid doing that for common cases, mainly those of 75 | // plain files: 76 | 77 | // The idea is to make do with the file size that we get from the OS for 78 | // free as part of iterating a folder. Therefore we want to make sure that 79 | // it makes sense to use that free size information: 80 | 81 | // Volume boundaries: 82 | // The user can ask us not to cross volume boundaries. If the DirEntry is a 83 | // plain file and not a reparse point or other non-trivial stuff, we assume 84 | // that the file is located on the same volume as the directory that 85 | // contains it. 86 | 87 | // File ID: 88 | // This optimization does deprive us of access to a file ID. As a 89 | // workaround, we just make one up that hopefully does not collide with real 90 | // file IDs. 91 | // Hard links: Unresolved. We don't get inode/file index, so hard links 92 | // count once for each link. Hopefully they are not too commonly in use on 93 | // windows. 94 | 95 | // Size: 96 | // We assume (naively?) that for the common cases the free size info is the 97 | // same as one would get by doing the expensive thing. Sparse, encrypted and 98 | // compressed files are not included in the common cases, as one can image 99 | // there being more than view on their size. 100 | 101 | // Savings in orders of magnitude in terms of time, io and cpu have been 102 | // observed on hdd, windows 10, some 100Ks files taking up some hundreds of 103 | // GBs: 104 | // Consistently opening the file: 30 minutes. 105 | // With this optimization: 8 sec. 106 | 107 | use std::io; 108 | use winapi_util::Handle; 109 | fn handle_from_path_limited(path: &Path) -> io::Result { 110 | use std::fs::OpenOptions; 111 | use std::os::windows::fs::OpenOptionsExt; 112 | const FILE_READ_ATTRIBUTES: u32 = 0x0080; 113 | 114 | // So, it seems that it does does have to be that expensive to open 115 | // files to get their info: Avoiding opening the file with the full 116 | // GENERIC_READ is key: 117 | 118 | // https://docs.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights: 119 | // "For example, a Windows file object maps the GENERIC_READ bit to the 120 | // READ_CONTROL and SYNCHRONIZE standard access rights and to the 121 | // FILE_READ_DATA, FILE_READ_EA, and FILE_READ_ATTRIBUTES 122 | // object-specific access rights" 123 | 124 | // The flag FILE_READ_DATA seems to be the expensive one, so we'll avoid 125 | // that, and a most of the other ones. Simply because it seems that we 126 | // don't need them. 127 | 128 | let file = OpenOptions::new() 129 | .access_mode(FILE_READ_ATTRIBUTES) 130 | .open(path)?; 131 | Ok(Handle::from_file(file)) 132 | } 133 | 134 | fn get_metadata_expensive( 135 | path: &Path, 136 | use_apparent_size: bool, 137 | ) -> Option<(u64, Option, FileTime)> { 138 | use winapi_util::file::information; 139 | 140 | let h = handle_from_path_limited(path).ok()?; 141 | let info = information(&h).ok()?; 142 | 143 | if use_apparent_size { 144 | use filesize::PathExt; 145 | Some(( 146 | path.size_on_disk().ok()?, 147 | Some((info.file_index(), info.volume_serial_number())), 148 | ( 149 | info.last_write_time().unwrap() as i64, 150 | info.last_access_time().unwrap() as i64, 151 | info.creation_time().unwrap() as i64, 152 | ), 153 | )) 154 | } else { 155 | Some(( 156 | info.file_size(), 157 | Some((info.file_index(), info.volume_serial_number())), 158 | ( 159 | info.last_write_time().unwrap() as i64, 160 | info.last_access_time().unwrap() as i64, 161 | info.creation_time().unwrap() as i64, 162 | ), 163 | )) 164 | } 165 | } 166 | 167 | use std::os::windows::fs::MetadataExt; 168 | let path = path.as_ref(); 169 | let metadata = if follow_links { 170 | path.metadata() 171 | } else { 172 | path.symlink_metadata() 173 | }; 174 | match metadata { 175 | Ok(ref md) => { 176 | const FILE_ATTRIBUTE_ARCHIVE: u32 = 0x20; 177 | const FILE_ATTRIBUTE_READONLY: u32 = 0x01; 178 | const FILE_ATTRIBUTE_HIDDEN: u32 = 0x02; 179 | const FILE_ATTRIBUTE_SYSTEM: u32 = 0x04; 180 | const FILE_ATTRIBUTE_NORMAL: u32 = 0x80; 181 | const FILE_ATTRIBUTE_DIRECTORY: u32 = 0x10; 182 | const FILE_ATTRIBUTE_SPARSE_FILE: u32 = 0x00000200; 183 | const FILE_ATTRIBUTE_PINNED: u32 = 0x00080000; 184 | const FILE_ATTRIBUTE_UNPINNED: u32 = 0x00100000; 185 | const FILE_ATTRIBUTE_RECALL_ON_OPEN: u32 = 0x00040000; 186 | const FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS: u32 = 0x00400000; 187 | const FILE_ATTRIBUTE_OFFLINE: u32 = 0x00001000; 188 | // normally FILE_ATTRIBUTE_SPARSE_FILE would be enough, however Windows sometimes likes to mask it out. see: https://stackoverflow.com/q/54560454 189 | const IS_PROBABLY_ONEDRIVE: u32 = FILE_ATTRIBUTE_SPARSE_FILE 190 | | FILE_ATTRIBUTE_PINNED 191 | | FILE_ATTRIBUTE_UNPINNED 192 | | FILE_ATTRIBUTE_RECALL_ON_OPEN 193 | | FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS 194 | | FILE_ATTRIBUTE_OFFLINE; 195 | let attr_filtered = md.file_attributes() 196 | & !(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_READONLY | FILE_ATTRIBUTE_SYSTEM); 197 | if ((attr_filtered & FILE_ATTRIBUTE_ARCHIVE) != 0 198 | || (attr_filtered & FILE_ATTRIBUTE_DIRECTORY) != 0 199 | || md.file_attributes() == FILE_ATTRIBUTE_NORMAL) 200 | && !((attr_filtered & IS_PROBABLY_ONEDRIVE != 0) && use_apparent_size) 201 | { 202 | Some(( 203 | md.len(), 204 | None, 205 | ( 206 | md.last_write_time() as i64, 207 | md.last_access_time() as i64, 208 | md.creation_time() as i64, 209 | ), 210 | )) 211 | } else { 212 | get_metadata_expensive(path, use_apparent_size) 213 | } 214 | } 215 | _ => get_metadata_expensive(path, use_apparent_size), 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/progress.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashSet, 3 | io::Write, 4 | path::Path, 5 | sync::{ 6 | Arc, RwLock, 7 | atomic::{AtomicU8, AtomicUsize, Ordering}, 8 | mpsc::{self, RecvTimeoutError, Sender}, 9 | }, 10 | thread::JoinHandle, 11 | time::Duration, 12 | }; 13 | 14 | #[cfg(not(target_has_atomic = "64"))] 15 | use portable_atomic::AtomicU64; 16 | #[cfg(target_has_atomic = "64")] 17 | use std::sync::atomic::AtomicU64; 18 | 19 | use crate::display::human_readable_number; 20 | 21 | /* -------------------------------------------------------------------------- */ 22 | 23 | pub const ORDERING: Ordering = Ordering::Relaxed; 24 | 25 | const SPINNER_SLEEP_TIME: u64 = 100; 26 | const PROGRESS_CHARS: [char; 4] = ['-', '\\', '|', '/']; 27 | const PROGRESS_CHARS_LEN: usize = PROGRESS_CHARS.len(); 28 | 29 | pub trait ThreadSyncTrait { 30 | fn set(&self, val: T); 31 | fn get(&self) -> T; 32 | } 33 | 34 | #[derive(Default)] 35 | pub struct ThreadStringWrapper { 36 | inner: RwLock, 37 | } 38 | 39 | impl ThreadSyncTrait for ThreadStringWrapper { 40 | fn set(&self, val: String) { 41 | *self.inner.write().unwrap() = val; 42 | } 43 | 44 | fn get(&self) -> String { 45 | (*self.inner.read().unwrap()).clone() 46 | } 47 | } 48 | 49 | /* -------------------------------------------------------------------------- */ 50 | 51 | // creating an enum this way allows to have simpler syntax compared to a Mutex or a RwLock 52 | #[allow(non_snake_case)] 53 | pub mod Operation { 54 | pub const INDEXING: u8 = 0; 55 | pub const PREPARING: u8 = 1; 56 | } 57 | 58 | #[derive(Default)] 59 | pub struct PAtomicInfo { 60 | pub num_files: AtomicUsize, 61 | pub total_file_size: AtomicU64, 62 | pub state: AtomicU8, 63 | pub current_path: ThreadStringWrapper, 64 | } 65 | 66 | impl PAtomicInfo { 67 | pub fn clear_state(&self, dir: &Path) { 68 | self.state.store(Operation::INDEXING, ORDERING); 69 | let dir_name = dir.to_string_lossy().to_string(); 70 | self.current_path.set(dir_name); 71 | self.total_file_size.store(0, ORDERING); 72 | self.num_files.store(0, ORDERING); 73 | } 74 | } 75 | 76 | #[derive(Default)] 77 | pub struct RuntimeErrors { 78 | pub no_permissions: HashSet, 79 | pub file_not_found: HashSet, 80 | pub unknown_error: HashSet, 81 | pub interrupted_error: i32, 82 | } 83 | 84 | /* -------------------------------------------------------------------------- */ 85 | 86 | fn format_preparing_str(prog_char: char, data: &PAtomicInfo, output_display: &str) -> String { 87 | let path_in = data.current_path.get(); 88 | let size = human_readable_number(data.total_file_size.load(ORDERING), output_display); 89 | format!("Preparing: {path_in} {size} ... {prog_char}") 90 | } 91 | 92 | fn format_indexing_str(prog_char: char, data: &PAtomicInfo, output_display: &str) -> String { 93 | let path_in = data.current_path.get(); 94 | let file_count = data.num_files.load(ORDERING); 95 | let size = human_readable_number(data.total_file_size.load(ORDERING), output_display); 96 | let file_str = format!("{file_count} files, {size}"); 97 | format!("Indexing: {path_in} {file_str} ... {prog_char}") 98 | } 99 | 100 | pub struct PIndicator { 101 | pub thread: Option<(Sender<()>, JoinHandle<()>)>, 102 | pub data: Arc, 103 | } 104 | 105 | impl PIndicator { 106 | pub fn build_me() -> Self { 107 | Self { 108 | thread: None, 109 | data: Arc::new(PAtomicInfo { 110 | ..Default::default() 111 | }), 112 | } 113 | } 114 | 115 | pub fn spawn(&mut self, output_display: String) { 116 | let data = self.data.clone(); 117 | let (stop_handler, receiver) = mpsc::channel::<()>(); 118 | 119 | let time_info_thread = std::thread::spawn(move || { 120 | let mut progress_char_i: usize = 0; 121 | let mut stderr = std::io::stderr(); 122 | let mut msg = "".to_string(); 123 | 124 | // While the timeout triggers we go round the loop 125 | // If we disconnect or the sender sends its message we exit the while loop 126 | while let Err(RecvTimeoutError::Timeout) = 127 | receiver.recv_timeout(Duration::from_millis(SPINNER_SLEEP_TIME)) 128 | { 129 | // Clear the text written by 'write!'& Return at the start of line 130 | let clear = format!("\r{:width$}", " ", width = msg.len()); 131 | write!(stderr, "{clear}").unwrap(); 132 | let prog_char = PROGRESS_CHARS[progress_char_i]; 133 | 134 | msg = match data.state.load(ORDERING) { 135 | Operation::INDEXING => format_indexing_str(prog_char, &data, &output_display), 136 | Operation::PREPARING => format_preparing_str(prog_char, &data, &output_display), 137 | _ => panic!("Unknown State"), 138 | }; 139 | 140 | write!(stderr, "\r{msg}").unwrap(); 141 | stderr.flush().unwrap(); 142 | 143 | progress_char_i += 1; 144 | progress_char_i %= PROGRESS_CHARS_LEN; 145 | } 146 | 147 | let clear = format!("\r{:width$}", " ", width = msg.len()); 148 | write!(stderr, "{clear}").unwrap(); 149 | write!(stderr, "\r").unwrap(); 150 | stderr.flush().unwrap(); 151 | }); 152 | self.thread = Some((stop_handler, time_info_thread)) 153 | } 154 | 155 | pub fn stop(self) { 156 | if let Some((stop_handler, thread)) = self.thread { 157 | stop_handler.send(()).unwrap(); 158 | thread.join().unwrap(); 159 | } 160 | } 161 | } 162 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use platform::get_metadata; 2 | use std::collections::HashSet; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use crate::config::DAY_SECONDS; 6 | 7 | use crate::dir_walker::Operator; 8 | use crate::platform; 9 | use regex::Regex; 10 | 11 | pub fn simplify_dir_names>(dirs: &[P]) -> HashSet { 12 | let mut top_level_names: HashSet = HashSet::with_capacity(dirs.len()); 13 | 14 | for t in dirs { 15 | let top_level_name = normalize_path(t); 16 | let mut can_add = true; 17 | let mut to_remove: Vec = Vec::new(); 18 | 19 | for tt in top_level_names.iter() { 20 | if is_a_parent_of(&top_level_name, tt) { 21 | to_remove.push(tt.to_path_buf()); 22 | } else if is_a_parent_of(tt, &top_level_name) { 23 | can_add = false; 24 | } 25 | } 26 | for r in to_remove { 27 | top_level_names.remove(&r); 28 | } 29 | if can_add { 30 | top_level_names.insert(top_level_name); 31 | } 32 | } 33 | 34 | top_level_names 35 | } 36 | 37 | pub fn get_filesystem_devices>(paths: &[P], follow_links: bool) -> HashSet { 38 | use std::fs; 39 | // Gets the device ids for the filesystems which are used by the argument paths 40 | paths 41 | .iter() 42 | .filter_map(|p| { 43 | let follow_links = if follow_links { 44 | // slow path: If dereference-links is set, then we check if the file is a symbolic link 45 | match fs::symlink_metadata(p) { 46 | Ok(metadata) => metadata.file_type().is_symlink(), 47 | Err(_) => false, 48 | } 49 | } else { 50 | false 51 | }; 52 | match get_metadata(p, false, follow_links) { 53 | Some((_size, Some((_id, dev)), _time)) => Some(dev), 54 | _ => None, 55 | } 56 | }) 57 | .collect() 58 | } 59 | 60 | pub fn normalize_path>(path: P) -> PathBuf { 61 | // normalize path ... 62 | // 1. removing repeated separators 63 | // 2. removing interior '.' ("current directory") path segments 64 | // 3. removing trailing extra separators and '.' ("current directory") path segments 65 | // * `Path.components()` does all the above work; ref: 66 | // 4. changing to os preferred separator (automatically done by recollecting components back into a PathBuf) 67 | path.as_ref().components().collect() 68 | } 69 | 70 | // Canonicalize the path only if it is an absolute path 71 | pub fn canonicalize_absolute_path(path: PathBuf) -> PathBuf { 72 | if !path.is_absolute() { 73 | return path; 74 | } 75 | match std::fs::canonicalize(&path) { 76 | Ok(canonicalized_path) => canonicalized_path, 77 | Err(_) => path, 78 | } 79 | } 80 | 81 | pub fn is_filtered_out_due_to_regex(filter_regex: &[Regex], dir: &Path) -> bool { 82 | if filter_regex.is_empty() { 83 | false 84 | } else { 85 | filter_regex 86 | .iter() 87 | .all(|f| !f.is_match(&dir.as_os_str().to_string_lossy())) 88 | } 89 | } 90 | 91 | pub fn is_filtered_out_due_to_file_time( 92 | filter_time: &Option<(Operator, i64)>, 93 | actual_time: i64, 94 | ) -> bool { 95 | match filter_time { 96 | None => false, 97 | Some((Operator::Equal, bound_time)) => { 98 | !(actual_time >= *bound_time && actual_time < *bound_time + DAY_SECONDS) 99 | } 100 | Some((Operator::GreaterThan, bound_time)) => actual_time < *bound_time, 101 | Some((Operator::LessThan, bound_time)) => actual_time > *bound_time, 102 | } 103 | } 104 | 105 | pub fn is_filtered_out_due_to_invert_regex(filter_regex: &[Regex], dir: &Path) -> bool { 106 | filter_regex 107 | .iter() 108 | .any(|f| f.is_match(&dir.as_os_str().to_string_lossy())) 109 | } 110 | 111 | fn is_a_parent_of>(parent: P, child: P) -> bool { 112 | let parent = parent.as_ref(); 113 | let child = child.as_ref(); 114 | child.starts_with(parent) && !parent.starts_with(child) 115 | } 116 | 117 | mod tests { 118 | #[allow(unused_imports)] 119 | use super::*; 120 | 121 | #[test] 122 | fn test_simplify_dir() { 123 | let mut correct = HashSet::new(); 124 | correct.insert(PathBuf::from("a")); 125 | assert_eq!(simplify_dir_names(&["a"]), correct); 126 | } 127 | 128 | #[test] 129 | fn test_simplify_dir_rm_subdir() { 130 | let mut correct = HashSet::new(); 131 | correct.insert(["a", "b"].iter().collect::()); 132 | assert_eq!(simplify_dir_names(&["a/b/c", "a/b", "a/b/d/f"]), correct); 133 | assert_eq!(simplify_dir_names(&["a/b", "a/b/c", "a/b/d/f"]), correct); 134 | } 135 | 136 | #[test] 137 | fn test_simplify_dir_duplicates() { 138 | let mut correct = HashSet::new(); 139 | correct.insert(["a", "b"].iter().collect::()); 140 | correct.insert(PathBuf::from("c")); 141 | assert_eq!( 142 | simplify_dir_names(&[ 143 | "a/b", 144 | "a/b//", 145 | "a/././b///", 146 | "c", 147 | "c/", 148 | "c/.", 149 | "c/././", 150 | "c/././." 151 | ]), 152 | correct 153 | ); 154 | } 155 | #[test] 156 | fn test_simplify_dir_rm_subdir_and_not_substrings() { 157 | let mut correct = HashSet::new(); 158 | correct.insert(PathBuf::from("b")); 159 | correct.insert(["c", "a", "b"].iter().collect::()); 160 | correct.insert(["a", "b"].iter().collect::()); 161 | assert_eq!(simplify_dir_names(&["a/b", "c/a/b/", "b"]), correct); 162 | } 163 | 164 | #[test] 165 | fn test_simplify_dir_dots() { 166 | let mut correct = HashSet::new(); 167 | correct.insert(PathBuf::from("src")); 168 | assert_eq!(simplify_dir_names(&["src/."]), correct); 169 | } 170 | 171 | #[test] 172 | fn test_simplify_dir_substring_names() { 173 | let mut correct = HashSet::new(); 174 | correct.insert(PathBuf::from("src")); 175 | correct.insert(PathBuf::from("src_v2")); 176 | assert_eq!(simplify_dir_names(&["src/", "src_v2"]), correct); 177 | } 178 | 179 | #[test] 180 | fn test_is_a_parent_of() { 181 | assert!(is_a_parent_of("/usr", "/usr/andy")); 182 | assert!(is_a_parent_of("/usr", "/usr/andy/i/am/descendant")); 183 | assert!(!is_a_parent_of("/usr", "/usr/.")); 184 | assert!(!is_a_parent_of("/usr", "/usr/")); 185 | assert!(!is_a_parent_of("/usr", "/usr")); 186 | assert!(!is_a_parent_of("/usr/", "/usr")); 187 | assert!(!is_a_parent_of("/usr/andy", "/usr")); 188 | assert!(!is_a_parent_of("/usr/andy", "/usr/sibling")); 189 | assert!(!is_a_parent_of("/usr/folder", "/usr/folder_not_a_child")); 190 | } 191 | 192 | #[test] 193 | fn test_is_a_parent_of_root() { 194 | assert!(is_a_parent_of("/", "/usr/andy")); 195 | assert!(is_a_parent_of("/", "/usr")); 196 | assert!(!is_a_parent_of("/", "/")); 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /tests/test_dir/many/a_file: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir/many/a_file -------------------------------------------------------------------------------- /tests/test_dir/many/hello_file: -------------------------------------------------------------------------------- 1 | hello 2 | -------------------------------------------------------------------------------- /tests/test_dir2/dir/hello: -------------------------------------------------------------------------------- 1 | hello -------------------------------------------------------------------------------- /tests/test_dir2/dir_name_clash: -------------------------------------------------------------------------------- 1 | hello -------------------------------------------------------------------------------- /tests/test_dir2/dir_substring/hello: -------------------------------------------------------------------------------- 1 | hello 2 | -------------------------------------------------------------------------------- /tests/test_dir2/long_dir_name_what_a_very_long_dir_name_what_happens_when_this_goes_over_80_characters_i_wonder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir2/long_dir_name_what_a_very_long_dir_name_what_happens_when_this_goes_over_80_characters_i_wonder -------------------------------------------------------------------------------- /tests/test_dir_hidden_entries/.hidden_file: -------------------------------------------------------------------------------- 1 | something 2 | .secret 3 | -------------------------------------------------------------------------------- /tests/test_dir_hidden_entries/.secret: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir_hidden_entries/.secret -------------------------------------------------------------------------------- /tests/test_dir_matching/andy/dup_name/hello: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir_matching/andy/dup_name/hello -------------------------------------------------------------------------------- /tests/test_dir_matching/dave/dup_name/hello: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir_matching/dave/dup_name/hello -------------------------------------------------------------------------------- /tests/test_dir_unicode/ラウトは難しいです!.japan: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir_unicode/ラウトは難しいです!.japan -------------------------------------------------------------------------------- /tests/test_dir_unicode/👩.unicode: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bootandy/dust/9b2dc4655df00696d025ee083cc3dc0c684a7a68/tests/test_dir_unicode/👩.unicode -------------------------------------------------------------------------------- /tests/test_exact_output.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::Command; 2 | use std::ffi::OsStr; 3 | use std::process::Output; 4 | use std::sync::Once; 5 | use std::{io, str}; 6 | 7 | static INIT: Once = Once::new(); 8 | static UNREADABLE_DIR_PATH: &str = "/tmp/unreadable_dir"; 9 | 10 | /** 11 | * This file contains tests that verify the exact output of the command. 12 | * This output differs on Linux / Mac so the tests are harder to write and debug 13 | * Windows is ignored here because the results vary by host making exact testing impractical 14 | * 15 | * Despite the above problems, these tests are good as they are the closest to 'the real thing'. 16 | */ 17 | 18 | // Warning: File sizes differ on both platform and on the format of the disk. 19 | /// Copy to /tmp dir - we assume that the formatting of the /tmp partition 20 | /// is consistent. If the tests fail your /tmp filesystem probably differs 21 | fn copy_test_data(dir: &str) { 22 | // First remove the existing directory - just in case it is there and has incorrect data 23 | let last_slash = dir.rfind('/').unwrap(); 24 | let last_part_of_dir = dir.chars().skip(last_slash).collect::(); 25 | let _ = Command::new("rm") 26 | .arg("-rf") 27 | .arg("/tmp/".to_owned() + &*last_part_of_dir) 28 | .ok(); 29 | 30 | let _ = Command::new("cp") 31 | .arg("-r") 32 | .arg(dir) 33 | .arg("/tmp/") 34 | .ok() 35 | .map_err(|err| eprintln!("Error copying directory for test setup\n{:?}", err)); 36 | } 37 | 38 | fn create_unreadable_directory() -> io::Result<()> { 39 | #[cfg(unix)] 40 | { 41 | use std::fs; 42 | use std::fs::Permissions; 43 | use std::os::unix::fs::PermissionsExt; 44 | fs::create_dir_all(UNREADABLE_DIR_PATH)?; 45 | fs::set_permissions(UNREADABLE_DIR_PATH, Permissions::from_mode(0))?; 46 | } 47 | Ok(()) 48 | } 49 | 50 | fn initialize() { 51 | INIT.call_once(|| { 52 | copy_test_data("tests/test_dir"); 53 | copy_test_data("tests/test_dir2"); 54 | copy_test_data("tests/test_dir_unicode"); 55 | 56 | if let Err(e) = create_unreadable_directory() { 57 | panic!("Failed to create unreadable directory: {}", e); 58 | } 59 | }); 60 | } 61 | 62 | fn run_cmd>(command_args: &[T]) -> Output { 63 | initialize(); 64 | let mut to_run = &mut Command::cargo_bin("dust").unwrap(); 65 | for p in command_args { 66 | to_run = to_run.arg(p); 67 | } 68 | to_run.unwrap() 69 | } 70 | 71 | fn exact_stdout_test>(command_args: &[T], valid_stdout: Vec) { 72 | let to_run = run_cmd(command_args); 73 | 74 | let stdout_output = str::from_utf8(&to_run.stdout).unwrap().to_owned(); 75 | let will_fail = valid_stdout.iter().any(|i| stdout_output.contains(i)); 76 | if !will_fail { 77 | eprintln!( 78 | "output(stdout):\n{}\ndoes not contain any of:\n{}", 79 | stdout_output, 80 | valid_stdout.join("\n\n") 81 | ); 82 | } 83 | assert!(will_fail); 84 | } 85 | 86 | fn exact_stderr_test>(command_args: &[T], valid_stderr: String) { 87 | let to_run = run_cmd(command_args); 88 | 89 | let stderr_output = str::from_utf8(&to_run.stderr).unwrap().trim(); 90 | assert_eq!(stderr_output, valid_stderr); 91 | } 92 | 93 | // "windows" result data can vary by host (size seems to be variable by one byte); fix code vs test and re-enable 94 | #[cfg_attr(target_os = "windows", ignore)] 95 | #[test] 96 | pub fn test_main_basic() { 97 | // -c is no color mode - This makes testing much simpler 98 | exact_stdout_test(&["-c", "-B", "/tmp/test_dir/"], main_output()); 99 | } 100 | 101 | #[cfg_attr(target_os = "windows", ignore)] 102 | #[test] 103 | pub fn test_main_multi_arg() { 104 | let command_args = [ 105 | "-c", 106 | "-B", 107 | "/tmp/test_dir/many/", 108 | "/tmp/test_dir", 109 | "/tmp/test_dir", 110 | ]; 111 | exact_stdout_test(&command_args, main_output()); 112 | } 113 | 114 | fn main_output() -> Vec { 115 | // Some linux currently thought to be Manjaro, Arch 116 | // Although probably depends on how drive is formatted 117 | let mac_and_some_linux = r#" 118 | 0B ┌── a_file │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░█ │ 0% 119 | 4.0K ├── hello_file│█████████████████████████████████████████████████ │ 100% 120 | 4.0K ┌─┴ many │█████████████████████████████████████████████████ │ 100% 121 | 4.0K ┌─┴ test_dir │█████████████████████████████████████████████████ │ 100% 122 | "# 123 | .trim() 124 | .to_string(); 125 | 126 | let ubuntu = r#" 127 | 0B ┌── a_file │ ░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░█ │ 0% 128 | 4.0K ├── hello_file│ ░░░░░░░░░░░░░░░░█████████████████ │ 33% 129 | 8.0K ┌─┴ many │ █████████████████████████████████ │ 67% 130 | 12K ┌─┴ test_dir │█████████████████████████████████████████████████ │ 100% 131 | "# 132 | .trim() 133 | .to_string(); 134 | 135 | vec![mac_and_some_linux, ubuntu] 136 | } 137 | 138 | #[cfg_attr(target_os = "windows", ignore)] 139 | #[test] 140 | pub fn test_main_long_paths() { 141 | let command_args = ["-c", "-p", "-B", "/tmp/test_dir/"]; 142 | exact_stdout_test(&command_args, main_output_long_paths()); 143 | } 144 | 145 | fn main_output_long_paths() -> Vec { 146 | let mac_and_some_linux = r#" 147 | 0B ┌── /tmp/test_dir/many/a_file │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░█ │ 0% 148 | 4.0K ├── /tmp/test_dir/many/hello_file│██████████████████████████████ │ 100% 149 | 4.0K ┌─┴ /tmp/test_dir/many │██████████████████████████████ │ 100% 150 | 4.0K ┌─┴ /tmp/test_dir │██████████████████████████████ │ 100% 151 | "# 152 | .trim() 153 | .to_string(); 154 | let ubuntu = r#" 155 | 0B ┌── /tmp/test_dir/many/a_file │ ░░░░░░░░░░░░░░░░░░░░█ │ 0% 156 | 4.0K ├── /tmp/test_dir/many/hello_file│ ░░░░░░░░░░███████████ │ 33% 157 | 8.0K ┌─┴ /tmp/test_dir/many │ █████████████████████ │ 67% 158 | 12K ┌─┴ /tmp/test_dir │██████████████████████████████ │ 100% 159 | "# 160 | .trim() 161 | .to_string(); 162 | vec![mac_and_some_linux, ubuntu] 163 | } 164 | 165 | // Check against directories and files whose names are substrings of each other 166 | #[cfg_attr(target_os = "windows", ignore)] 167 | #[test] 168 | pub fn test_substring_of_names_and_long_names() { 169 | let command_args = ["-c", "-B", "/tmp/test_dir2"]; 170 | exact_stdout_test(&command_args, no_substring_of_names_output()); 171 | } 172 | 173 | fn no_substring_of_names_output() -> Vec { 174 | let ubuntu = " 175 | 0B ┌── long_dir_name_what_a_very_long_dir_name_what_happens_when_this_goes.. 176 | 4.0K ├── dir_name_clash 177 | 4.0K │ ┌── hello 178 | 8.0K ├─┴ dir 179 | 4.0K │ ┌── hello 180 | 8.0K ├─┴ dir_substring 181 | 24K ┌─┴ test_dir2 182 | " 183 | .trim() 184 | .into(); 185 | 186 | let mac_and_some_linux = " 187 | 0B ┌── long_dir_name_what_a_very_long_dir_name_what_happens_when_this_goes.. 188 | 4.0K │ ┌── hello 189 | 4.0K ├─┴ dir 190 | 4.0K ├── dir_name_clash 191 | 4.0K │ ┌── hello 192 | 4.0K ├─┴ dir_substring 193 | 12K ┌─┴ test_dir2 194 | " 195 | .trim() 196 | .into(); 197 | vec![mac_and_some_linux, ubuntu] 198 | } 199 | 200 | #[cfg_attr(target_os = "windows", ignore)] 201 | #[test] 202 | pub fn test_unicode_directories() { 203 | let command_args = ["-c", "-B", "/tmp/test_dir_unicode"]; 204 | exact_stdout_test(&command_args, unicode_dir()); 205 | } 206 | 207 | fn unicode_dir() -> Vec { 208 | // The way unicode & asian characters are rendered on the terminal should make this line up 209 | let ubuntu = " 210 | 0B ┌── ラウトは難しいです!.japan│ █ │ 0% 211 | 0B ├── 👩.unicode │ █ │ 0% 212 | 4.0K ┌─┴ test_dir_unicode │███████████████████████████████████ │ 100% 213 | " 214 | .trim() 215 | .into(); 216 | 217 | let mac_and_some_linux = " 218 | 0B ┌── ラウトは難しいです!.japan│ █ │ 0% 219 | 0B ├── 👩.unicode │ █ │ 0% 220 | 0B ┌─┴ test_dir_unicode │ █ │ 0% 221 | " 222 | .trim() 223 | .into(); 224 | vec![mac_and_some_linux, ubuntu] 225 | } 226 | 227 | #[cfg_attr(target_os = "windows", ignore)] 228 | #[test] 229 | pub fn test_apparent_size() { 230 | let command_args = ["-c", "-s", "-b", "/tmp/test_dir"]; 231 | exact_stdout_test(&command_args, apparent_size_output()); 232 | } 233 | 234 | fn apparent_size_output() -> Vec { 235 | // The apparent directory sizes are too unpredictable and system dependent to try and match 236 | let one_space_before = r#" 237 | 0B ┌── a_file 238 | 6B ├── hello_file 239 | "# 240 | .trim() 241 | .to_string(); 242 | 243 | let two_space_before = r#" 244 | 0B ┌── a_file 245 | 6B ├── hello_file 246 | "# 247 | .trim() 248 | .to_string(); 249 | 250 | vec![one_space_before, two_space_before] 251 | } 252 | 253 | #[cfg_attr(target_os = "windows", ignore)] 254 | #[test] 255 | pub fn test_permission_normal() { 256 | let command_args = [UNREADABLE_DIR_PATH]; 257 | let permission_msg = 258 | r#"Did not have permissions for all directories (add --print-errors to see errors)"# 259 | .trim() 260 | .to_string(); 261 | exact_stderr_test(&command_args, permission_msg); 262 | } 263 | 264 | #[cfg_attr(target_os = "windows", ignore)] 265 | #[test] 266 | pub fn test_permission_flag() { 267 | // add the flag to CLI 268 | let command_args = ["--print-errors", UNREADABLE_DIR_PATH]; 269 | let permission_msg = format!( 270 | "Did not have permissions for directories: {}", 271 | UNREADABLE_DIR_PATH 272 | ); 273 | exact_stderr_test(&command_args, permission_msg); 274 | } 275 | -------------------------------------------------------------------------------- /tests/test_flags.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::Command; 2 | use std::ffi::OsStr; 3 | use std::str; 4 | 5 | /** 6 | * This file contains tests that test a substring of the output using '.contains' 7 | * 8 | * These tests should be the same cross platform 9 | */ 10 | 11 | fn build_command>(command_args: Vec) -> String { 12 | let mut cmd = &mut Command::cargo_bin("dust").unwrap(); 13 | // Hide progress bar 14 | cmd = cmd.arg("-P"); 15 | 16 | for p in command_args { 17 | cmd = cmd.arg(p); 18 | } 19 | let finished = &cmd.unwrap(); 20 | let stderr = str::from_utf8(&finished.stderr).unwrap(); 21 | assert_eq!(stderr, ""); 22 | 23 | str::from_utf8(&finished.stdout).unwrap().into() 24 | } 25 | 26 | // We can at least test the file names are there 27 | #[test] 28 | pub fn test_basic_output() { 29 | let output = build_command(vec!["tests/test_dir/"]); 30 | 31 | assert!(output.contains(" ┌─┴ ")); 32 | assert!(output.contains("test_dir ")); 33 | assert!(output.contains(" ┌─┴ ")); 34 | assert!(output.contains("many ")); 35 | assert!(output.contains(" ├── ")); 36 | assert!(output.contains("hello_file")); 37 | assert!(output.contains(" ┌── ")); 38 | assert!(output.contains("a_file ")); 39 | } 40 | 41 | #[test] 42 | pub fn test_output_no_bars_means_no_excess_spaces() { 43 | let output = build_command(vec!["-b", "tests/test_dir/"]); 44 | // If bars are not being shown we don't need to pad the output with spaces 45 | assert!(output.contains("many")); 46 | assert!(!output.contains("many ")); 47 | } 48 | 49 | #[test] 50 | pub fn test_reverse_flag() { 51 | let output = build_command(vec!["-r", "-c", "tests/test_dir/"]); 52 | assert!(output.contains(" └─┬ test_dir ")); 53 | assert!(output.contains(" └─┬ many ")); 54 | assert!(output.contains(" ├── hello_file")); 55 | assert!(output.contains(" └── a_file ")); 56 | } 57 | 58 | #[test] 59 | pub fn test_d_flag_works() { 60 | // We should see the top level directory but not the sub dirs / files: 61 | let output = build_command(vec!["-d", "1", "tests/test_dir/"]); 62 | assert!(!output.contains("hello_file")); 63 | } 64 | 65 | #[test] 66 | pub fn test_threads_flag_works() { 67 | let output = build_command(vec!["-T", "1", "tests/test_dir/"]); 68 | assert!(output.contains("hello_file")); 69 | } 70 | 71 | #[test] 72 | pub fn test_d_flag_works_and_still_recurses_down() { 73 | // We had a bug where running with '-d 1' would stop at the first directory and the code 74 | // would fail to recurse down 75 | let output = build_command(vec!["-d", "1", "-f", "-c", "tests/test_dir2/"]); 76 | assert!(output.contains("1 ┌── dir")); 77 | assert!(output.contains("4 ┌─┴ test_dir2")); 78 | } 79 | 80 | // Check against directories and files whose names are substrings of each other 81 | #[test] 82 | pub fn test_ignore_dir() { 83 | let output = build_command(vec!["-c", "-X", "dir_substring", "tests/test_dir2/"]); 84 | assert!(!output.contains("dir_substring")); 85 | } 86 | 87 | #[test] 88 | pub fn test_ignore_all_in_file() { 89 | let output = build_command(vec![ 90 | "-c", 91 | "-I", 92 | "tests/test_dir_hidden_entries/.hidden_file", 93 | "tests/test_dir_hidden_entries/", 94 | ]); 95 | assert!(output.contains(" test_dir_hidden_entries")); 96 | assert!(!output.contains(".secret")); 97 | } 98 | 99 | #[test] 100 | pub fn test_with_bad_param() { 101 | let mut cmd = Command::cargo_bin("dust").unwrap(); 102 | let result = cmd.arg("bad_place").unwrap(); 103 | let stderr = str::from_utf8(&result.stderr).unwrap(); 104 | assert!(stderr.contains("No such file or directory")); 105 | } 106 | 107 | #[test] 108 | pub fn test_hidden_flag() { 109 | // Check we can see the hidden file normally 110 | let output = build_command(vec!["-c", "tests/test_dir_hidden_entries/"]); 111 | assert!(output.contains(".hidden_file")); 112 | assert!(output.contains("┌─┴ test_dir_hidden_entries")); 113 | 114 | // Check that adding the '-h' flag causes us to not see hidden files 115 | let output = build_command(vec!["-c", "-i", "tests/test_dir_hidden_entries/"]); 116 | assert!(!output.contains(".hidden_file")); 117 | assert!(output.contains("┌── test_dir_hidden_entries")); 118 | } 119 | 120 | #[test] 121 | pub fn test_number_of_files() { 122 | // Check we can see the hidden file normally 123 | let output = build_command(vec!["-c", "-f", "tests/test_dir"]); 124 | assert!(output.contains("1 ┌── a_file ")); 125 | assert!(output.contains("1 ├── hello_file")); 126 | assert!(output.contains("2 ┌─┴ many")); 127 | assert!(output.contains("2 ┌─┴ test_dir")); 128 | } 129 | 130 | #[test] 131 | pub fn test_show_files_by_type() { 132 | // Check we can list files by type 133 | let output = build_command(vec!["-c", "-t", "tests"]); 134 | assert!(output.contains(" .unicode")); 135 | assert!(output.contains(" .japan")); 136 | assert!(output.contains(" .rs")); 137 | assert!(output.contains(" (no extension)")); 138 | assert!(output.contains("┌─┴ (total)")); 139 | } 140 | 141 | #[test] 142 | #[cfg(target_family = "unix")] 143 | pub fn test_show_files_only() { 144 | let output = build_command(vec!["-c", "-F", "tests/test_dir"]); 145 | assert!(output.contains("a_file")); 146 | assert!(output.contains("hello_file")); 147 | assert!(!output.contains("many")); 148 | } 149 | 150 | #[test] 151 | pub fn test_output_skip_total() { 152 | let output = build_command(vec![ 153 | "--skip-total", 154 | "tests/test_dir/many/hello_file", 155 | "tests/test_dir/many/a_file", 156 | ]); 157 | assert!(output.contains("hello_file")); 158 | assert!(!output.contains("(total)")); 159 | } 160 | 161 | #[test] 162 | pub fn test_output_screen_reader() { 163 | let output = build_command(vec!["--screen-reader", "-c", "tests/test_dir/"]); 164 | println!("{}", output); 165 | assert!(output.contains("test_dir 0")); 166 | assert!(output.contains("many 1")); 167 | assert!(output.contains("hello_file 2")); 168 | assert!(output.contains("a_file 2")); 169 | 170 | // Verify no 'symbols' reported by screen reader 171 | assert!(!output.contains('│')); 172 | 173 | for block in ['█', '▓', '▒', '░'] { 174 | assert!(!output.contains(block)); 175 | } 176 | } 177 | 178 | #[test] 179 | pub fn test_show_files_by_regex_match_lots() { 180 | // Check we can see '.rs' files in the tests directory 181 | let output = build_command(vec!["-c", "-e", "\\.rs$", "tests"]); 182 | assert!(output.contains(" ┌─┴ tests")); 183 | assert!(!output.contains("0B ┌── tests")); 184 | assert!(!output.contains("0B ┌─┴ tests")); 185 | } 186 | 187 | #[test] 188 | pub fn test_show_files_by_regex_match_nothing() { 189 | // Check there are no files named: '.match_nothing' in the tests directory 190 | let output = build_command(vec!["-c", "-e", "match_nothing$", "tests"]); 191 | assert!(output.contains("0B ┌── tests")); 192 | } 193 | 194 | #[test] 195 | pub fn test_show_files_by_regex_match_multiple() { 196 | let output = build_command(vec![ 197 | "-c", 198 | "-e", 199 | "test_dir_hidden", 200 | "-e", 201 | "test_dir2", 202 | "-n", 203 | "100", 204 | "tests", 205 | ]); 206 | assert!(output.contains("test_dir2")); 207 | assert!(output.contains("test_dir_hidden")); 208 | assert!(!output.contains("many")); // We do not find the 'many' folder in the 'test_dir' folder 209 | } 210 | 211 | #[test] 212 | pub fn test_show_files_by_invert_regex() { 213 | let output = build_command(vec!["-c", "-f", "-v", "e", "tests/test_dir2"]); 214 | // There are 0 files without 'e' in the name 215 | assert!(output.contains("0 ┌── test_dir2")); 216 | 217 | let output = build_command(vec!["-c", "-f", "-v", "a", "tests/test_dir2"]); 218 | // There are 2 files without 'a' in the name 219 | assert!(output.contains("2 ┌─┴ test_dir2")); 220 | 221 | // There are 4 files in the test_dir2 hierarchy 222 | let output = build_command(vec!["-c", "-f", "-v", "match_nothing$", "tests/test_dir2"]); 223 | assert!(output.contains("4 ┌─┴ test_dir2")); 224 | } 225 | 226 | #[test] 227 | pub fn test_show_files_by_invert_regex_match_multiple() { 228 | // We ignore test_dir2 & test_dir_unicode, leaving the test_dir folder 229 | // which has the 'many' folder inside 230 | let output = build_command(vec![ 231 | "-c", 232 | "-v", 233 | "test_dir2", 234 | "-v", 235 | "test_dir_unicode", 236 | "-n", 237 | "100", 238 | "tests", 239 | ]); 240 | assert!(!output.contains("test_dir2")); 241 | assert!(!output.contains("test_dir_unicode")); 242 | assert!(output.contains("many")); 243 | } 244 | 245 | #[test] 246 | pub fn test_no_color() { 247 | let output = build_command(vec!["-c"]); 248 | // Red is 31 249 | assert!(!output.contains("\x1B[31m")); 250 | assert!(!output.contains("\x1B[0m")); 251 | } 252 | 253 | #[test] 254 | pub fn test_force_color() { 255 | let output = build_command(vec!["-C"]); 256 | // Red is 31 257 | assert!(output.contains("\x1B[31m")); 258 | assert!(output.contains("\x1B[0m")); 259 | } 260 | 261 | #[test] 262 | pub fn test_collapse() { 263 | let output = build_command(vec!["--collapse", "many", "tests/test_dir/"]); 264 | assert!(output.contains("many")); 265 | assert!(!output.contains("hello_file")); 266 | } 267 | 268 | #[test] 269 | pub fn test_handle_duplicate_names() { 270 | // Check that even if we run on a multiple directories with the same name 271 | // we still show the distinct parent dir in the output 272 | let output = build_command(vec![ 273 | "tests/test_dir_matching/dave/dup_name", 274 | "tests/test_dir_matching/andy/dup_name", 275 | "ci", 276 | ]); 277 | assert!(output.contains("andy")); 278 | assert!(output.contains("dave")); 279 | assert!(output.contains("ci")); 280 | assert!(output.contains("dup_name")); 281 | assert!(!output.contains("test_dir_matching")); 282 | } 283 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/tests_symlinks.rs: -------------------------------------------------------------------------------- 1 | use assert_cmd::Command; 2 | use std::fs::File; 3 | use std::io::Write; 4 | use std::path::PathBuf; 5 | use std::str; 6 | 7 | use tempfile::Builder; 8 | use tempfile::TempDir; 9 | 10 | // File sizes differ on both platform and on the format of the disk. 11 | // Windows: `ln` is not usually an available command; creation of symbolic links requires special enhanced permissions 12 | 13 | fn build_temp_file(dir: &TempDir) -> PathBuf { 14 | let file_path = dir.path().join("notes.txt"); 15 | let mut file = File::create(&file_path).unwrap(); 16 | writeln!(file, "I am a temp file").unwrap(); 17 | file_path 18 | } 19 | 20 | fn link_it(link_path: PathBuf, file_path_s: &str, is_soft: bool) -> String { 21 | let link_name_s = link_path.to_str().unwrap(); 22 | let mut c = Command::new("ln"); 23 | if is_soft { 24 | c.arg("-s"); 25 | } 26 | c.arg(file_path_s); 27 | c.arg(link_name_s); 28 | assert!(c.output().is_ok()); 29 | link_name_s.into() 30 | } 31 | 32 | #[cfg_attr(target_os = "windows", ignore)] 33 | #[test] 34 | pub fn test_soft_sym_link() { 35 | let dir = Builder::new().tempdir().unwrap(); 36 | let file = build_temp_file(&dir); 37 | let dir_s = dir.path().to_str().unwrap(); 38 | let file_path_s = file.to_str().unwrap(); 39 | 40 | let link_name = dir.path().join("the_link"); 41 | let link_name_s = link_it(link_name, file_path_s, true); 42 | 43 | let c = format!(" ├── {}", link_name_s); 44 | let b = format!(" ┌── {}", file_path_s); 45 | let a = format!("─┴ {}", dir_s); 46 | 47 | let mut cmd = Command::cargo_bin("dust").unwrap(); 48 | // Mac test runners create long filenames in tmp directories 49 | let output = cmd 50 | .args(["-p", "-c", "-s", "-w", "999", dir_s]) 51 | .unwrap() 52 | .stdout; 53 | 54 | let output = str::from_utf8(&output).unwrap(); 55 | 56 | assert!(output.contains(a.as_str())); 57 | assert!(output.contains(b.as_str())); 58 | assert!(output.contains(c.as_str())); 59 | } 60 | 61 | #[cfg_attr(target_os = "windows", ignore)] 62 | #[test] 63 | pub fn test_hard_sym_link() { 64 | let dir = Builder::new().tempdir().unwrap(); 65 | let file = build_temp_file(&dir); 66 | let dir_s = dir.path().to_str().unwrap(); 67 | let file_path_s = file.to_str().unwrap(); 68 | 69 | let link_name = dir.path().join("the_link"); 70 | link_it(link_name, file_path_s, false); 71 | 72 | let file_output = format!(" ┌── {}", file_path_s); 73 | let dirs_output = format!("─┴ {}", dir_s); 74 | 75 | let mut cmd = Command::cargo_bin("dust").unwrap(); 76 | // Mac test runners create long filenames in tmp directories 77 | let output = cmd.args(["-p", "-c", "-w", "999", dir_s]).unwrap().stdout; 78 | 79 | // The link should not appear in the output because multiple inodes are now ordered 80 | // then filtered. 81 | let output = str::from_utf8(&output).unwrap(); 82 | assert!(output.contains(dirs_output.as_str())); 83 | assert!(output.contains(file_output.as_str())); 84 | } 85 | 86 | #[cfg_attr(target_os = "windows", ignore)] 87 | #[test] 88 | pub fn test_hard_sym_link_no_dup_multi_arg() { 89 | let dir = Builder::new().tempdir().unwrap(); 90 | let dir_link = Builder::new().tempdir().unwrap(); 91 | let file = build_temp_file(&dir); 92 | let dir_s = dir.path().to_str().unwrap(); 93 | let dir_link_s = dir_link.path().to_str().unwrap(); 94 | let file_path_s = file.to_str().unwrap(); 95 | 96 | let link_name = dir_link.path().join("the_link"); 97 | let link_name_s = link_it(link_name, file_path_s, false); 98 | 99 | let mut cmd = Command::cargo_bin("dust").unwrap(); 100 | 101 | // Mac test runners create long filenames in tmp directories 102 | let output = cmd 103 | .args(["-p", "-c", "-w", "999", "-b", dir_link_s, dir_s]) 104 | .unwrap() 105 | .stdout; 106 | 107 | // The link or the file should appear but not both 108 | let output = str::from_utf8(&output).unwrap(); 109 | let has_file_only = output.contains(file_path_s) && !output.contains(&link_name_s); 110 | let has_link_only = !output.contains(file_path_s) && output.contains(&link_name_s); 111 | assert!(has_file_only || has_link_only); 112 | } 113 | 114 | #[cfg_attr(target_os = "windows", ignore)] 115 | #[test] 116 | pub fn test_recursive_sym_link() { 117 | let dir = Builder::new().tempdir().unwrap(); 118 | let dir_s = dir.path().to_str().unwrap(); 119 | 120 | let link_name = dir.path().join("the_link"); 121 | let link_name_s = link_it(link_name, dir_s, true); 122 | 123 | let a = format!("─┬ {}", dir_s); 124 | let b = format!(" └── {}", link_name_s); 125 | 126 | let mut cmd = Command::cargo_bin("dust").unwrap(); 127 | let output = cmd 128 | .arg("-p") 129 | .arg("-c") 130 | .arg("-r") 131 | .arg("-s") 132 | .arg("-w") 133 | .arg("999") 134 | .arg(dir_s) 135 | .unwrap() 136 | .stdout; 137 | let output = str::from_utf8(&output).unwrap(); 138 | 139 | assert!(output.contains(a.as_str())); 140 | assert!(output.contains(b.as_str())); 141 | } 142 | --------------------------------------------------------------------------------