├── .gitignore ├── dub.selections.json ├── ci ├── docker │ ├── Dockerfile │ ├── setup-ldc.sh │ ├── setup-ldc-runtime.sh │ └── setup.sh ├── build-docker.sh ├── tests │ ├── nixos-test.nix │ └── manual-test-script.sh └── build-inside-docker.sh ├── dub.sdl ├── dub-lock.json ├── .github └── workflows │ ├── test.yml │ └── build.yml ├── flake.lock ├── btdu.1 ├── flake.nix ├── source └── btdu │ ├── alloc.d │ ├── impexp.d │ ├── proto.d │ ├── ui │ ├── deletion.d │ └── curses.d │ ├── common.d │ ├── subproc.d │ ├── state.d │ ├── sample.d │ ├── main.d │ └── paths.d ├── ARCHITECTURE.md ├── CONCEPTS.md ├── README.md └── COPYING /.gitignore: -------------------------------------------------------------------------------- 1 | /btdu 2 | /btdu-static-* 3 | /btdu-glibc-* 4 | -------------------------------------------------------------------------------- /dub.selections.json: -------------------------------------------------------------------------------- 1 | { 2 | "fileVersion": 1, 3 | "versions": { 4 | "ae": "0.0.3672", 5 | "btrfs": "0.0.21", 6 | "ncurses": "1.0.0", 7 | "emsi_containers": "0.9.0" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /ci/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:bookworm-20251020 2 | 3 | ARG BTDU_ARCH 4 | 5 | COPY setup.sh /root/setup.sh 6 | RUN /root/setup.sh 7 | 8 | COPY setup-ldc.sh /root/setup-ldc.sh 9 | RUN /root/setup-ldc.sh 10 | 11 | COPY setup-ldc-runtime.sh /root/setup-ldc-runtime.sh 12 | RUN /root/setup-ldc-runtime.sh 13 | -------------------------------------------------------------------------------- /dub.sdl: -------------------------------------------------------------------------------- 1 | name "btdu" 2 | description "sampling disk usage profiler for btrfs" 3 | license "GPL v2" 4 | targetType "executable" 5 | 6 | toolchainRequirements frontend=">=2.097" 7 | 8 | dependency "ae" version="==0.0.3672" 9 | dependency "btrfs" version="~>0.0.21" 10 | dependency "ncurses" version="~>1.0.0" 11 | dependency "emsi_containers" version="==0.9.0" 12 | -------------------------------------------------------------------------------- /ci/docker/setup-ldc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eEuo pipefail 3 | 4 | ldc_ver=1.27.1 5 | 6 | host_arch=$(uname -m) 7 | 8 | cd /tmp 9 | 10 | arch=$host_arch 11 | name=ldc2-$ldc_ver-linux-"$arch" 12 | filename="$name".tar.xz 13 | test -f "$filename" || curl --location --fail --remote-name https://github.com/ldc-developers/ldc/releases/download/v$ldc_ver/"$filename" 14 | test -d "$name" || tar axf "$filename" 15 | 16 | ln -s ldc2-$ldc_ver-linux-"$host_arch" ldc2-host 17 | -------------------------------------------------------------------------------- /ci/build-docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eEuo pipefail 3 | 4 | cd "$(dirname "$0")" 5 | 6 | docker=${DOCKER-docker} 7 | 8 | arches=("$@") 9 | if [[ ${#arches[@]} -eq 0 ]] 10 | then 11 | arches=("$(uname -m)") 12 | printf 'No architectures specified, building for host architecture.\n' 13 | fi 14 | 15 | for arch in "${arches[@]}" 16 | do 17 | "$docker" build --build-arg BTDU_ARCH="$arch" -t btdu-"$arch" docker 18 | "$docker" run --rm -v "$(cd .. && pwd)":/btdu --env BTDU_ARCH="$arch" btdu-"$arch" /btdu/ci/build-inside-docker.sh 19 | done 20 | -------------------------------------------------------------------------------- /dub-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "ae": { 4 | "version": "0.0.3672", 5 | "sha256": "1did2l2bdkjjhwlrwy9ynbcgx1d2kmhzclq4ihbiw7qrpp2bbh5v" 6 | }, 7 | "btrfs": { 8 | "version": "0.0.21", 9 | "sha256": "0j7ksngwn8kilbql6p7hh5jvmrxqsr7pniw2n1qlrwdsms6qv72m" 10 | }, 11 | "ncurses": { 12 | "version": "1.0.0", 13 | "sha256": "0ivl88vp2dy9rpv6x3f9jlyqa7aps2x1kkyx80w2d4vcs31pzmb2" 14 | }, 15 | "emsi_containers": { 16 | "version": "0.9.0", 17 | "sha256": "1viz1fjh6jhfvl0d25bb1q7aclm1hrs0d7hhcx1d9c0gg5k6lcpm" 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /ci/docker/setup-ldc-runtime.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eEuo pipefail 3 | 4 | host_arch=$(uname -m) 5 | target_arch=$BTDU_ARCH 6 | 7 | cd /tmp 8 | 9 | args=( 10 | env 11 | ) 12 | 13 | target_api=gnu 14 | case "$target_arch" in 15 | arm) 16 | target_api=gnueabihf 17 | ;; 18 | *) 19 | esac 20 | 21 | if [[ "$host_arch" != "$target_arch" ]]; then 22 | args+=(CC="$target_arch"-linux-"$target_api"-gcc) 23 | fi 24 | 25 | args+=( 26 | ldc2-host/bin/ldc-build-runtime 27 | --dFlags="-mtriple=$target_arch-linux-$target_api" 28 | --dFlags="-flto=full" 29 | --dFlags="-O" 30 | --dFlags="--release" 31 | BUILD_SHARED_LIBS=OFF 32 | ) ; "${args[@]}" 33 | 34 | -------------------------------------------------------------------------------- /ci/tests/nixos-test.nix: -------------------------------------------------------------------------------- 1 | { lib, pkgs, btdu }: 2 | 3 | pkgs.testers.nixosTest { 4 | name = "btdu-integration"; 5 | 6 | meta = with lib.maintainers; { 7 | maintainers = [ ]; 8 | }; 9 | 10 | nodes.machine = { config, pkgs, ... }: { 11 | # Enable btrfs module 12 | boot.supportedFilesystems = [ "btrfs" ]; 13 | 14 | # Install btdu 15 | environment.systemPackages = [ btdu ]; 16 | 17 | # Create virtual disks for btrfs and ext4 testing 18 | virtualisation = { 19 | emptyDiskImages = [ 1024 1024 ]; # 1GB for btrfs/ext4, 1GB for multi-device/RAID 20 | memorySize = 2048; 21 | }; 22 | }; 23 | 24 | testScript = builtins.readFile ./test_script.py; 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: [push, pull_request] 3 | 4 | jobs: 5 | test: 6 | timeout-minutes: 60 7 | runs-on: ubuntu-24.04 8 | 9 | steps: 10 | - name: Checkout repository 11 | uses: actions/checkout@v4 12 | 13 | - name: Install Nix 14 | uses: DeterminateSystems/nix-installer-action@v18 15 | with: 16 | extra-conf: | 17 | extra-experimental-features = nix-command flakes 18 | 19 | - name: Setup Nix cache 20 | uses: DeterminateSystems/magic-nix-cache-action@v11 21 | 22 | - name: Build btdu 23 | run: nix build --show-trace --print-build-logs 24 | 25 | - name: Run flake checks 26 | run: nix flake check --show-trace --print-build-logs 27 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: [push, pull_request] 3 | jobs: 4 | build: 5 | runs-on: ubuntu-24.04 6 | strategy: 7 | matrix: 8 | arch: [x86_64, arm, aarch64] 9 | steps: 10 | - uses: actions/checkout@v2 11 | with: 12 | submodules: true 13 | - name: Build 14 | run: | 15 | ci/build-docker.sh ${{ matrix.arch }} 16 | - name: Generate checksums 17 | run: | 18 | for f in btdu-*-${{ matrix.arch }} ; do 19 | sha256sum "$f" | tee "$f".sha256sum 20 | done 21 | - name: Upload binary 22 | uses: actions/upload-artifact@v4 23 | with: 24 | name: btdu-${{ matrix.arch }} 25 | path: | 26 | btdu-*-${{ matrix.arch }} 27 | btdu-*-${{ matrix.arch }}.sha256sum 28 | -------------------------------------------------------------------------------- /ci/docker/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eEuo pipefail 3 | 4 | target_arch=$BTDU_ARCH 5 | 6 | # Translate LDC target architecture to Debian architecture 7 | target_api=gnu 8 | case "$target_arch" in 9 | x86_64) 10 | target_debian_arch=amd64 11 | ;; 12 | arm) 13 | target_debian_arch=armhf 14 | target_api=gnueabihf 15 | ;; 16 | aarch64) 17 | target_debian_arch=arm64 18 | ;; 19 | *) 20 | esac 21 | 22 | host_arch=$(uname -m) 23 | 24 | if [[ "$target_arch" != "$host_arch" ]] 25 | then 26 | dpkg --add-architecture "$target_debian_arch" 27 | fi 28 | 29 | apt-get update 30 | 31 | packages=( 32 | jq # To parse `dub --describe` output 33 | xz-utils # To unpack LDC archives 34 | libxml2 # Needed by LDC 35 | curl # To download LDC; Needed by Dub 36 | cmake # To rebuild the LDC runtime 37 | ) 38 | 39 | if [[ "$target_arch" == "$host_arch" ]] 40 | then 41 | packages+=( 42 | gcc 43 | ) 44 | else 45 | packages+=( 46 | gcc-"${target_arch/_/-}"-linux-"$target_api" 47 | ) 48 | fi 49 | packages+=( 50 | binutils-"${target_arch/_/-}"-linux-"$target_api" 51 | libncurses-dev:"$target_debian_arch" 52 | libz-dev:"$target_debian_arch" 53 | libtinfo-dev:"$target_debian_arch" 54 | ) 55 | 56 | apt-get install -y "${packages[@]}" 57 | 58 | mkdir /btdu 59 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "flake-utils": { 4 | "inputs": { 5 | "systems": "systems" 6 | }, 7 | "locked": { 8 | "lastModified": 1731533236, 9 | "narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=", 10 | "owner": "numtide", 11 | "repo": "flake-utils", 12 | "rev": "11707dc2f618dd54ca8739b309ec4fc024de578b", 13 | "type": "github" 14 | }, 15 | "original": { 16 | "owner": "numtide", 17 | "repo": "flake-utils", 18 | "type": "github" 19 | } 20 | }, 21 | "nixpkgs": { 22 | "locked": { 23 | "lastModified": 1761907660, 24 | "narHash": "sha256-kJ8lIZsiPOmbkJypG+B5sReDXSD1KGu2VEPNqhRa/ew=", 25 | "owner": "NixOS", 26 | "repo": "nixpkgs", 27 | "rev": "2fb006b87f04c4d3bdf08cfdbc7fab9c13d94a15", 28 | "type": "github" 29 | }, 30 | "original": { 31 | "owner": "NixOS", 32 | "ref": "nixos-unstable", 33 | "repo": "nixpkgs", 34 | "type": "github" 35 | } 36 | }, 37 | "root": { 38 | "inputs": { 39 | "flake-utils": "flake-utils", 40 | "nixpkgs": "nixpkgs" 41 | } 42 | }, 43 | "systems": { 44 | "locked": { 45 | "lastModified": 1681028828, 46 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 47 | "owner": "nix-systems", 48 | "repo": "default", 49 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "nix-systems", 54 | "repo": "default", 55 | "type": "github" 56 | } 57 | } 58 | }, 59 | "root": "root", 60 | "version": 7 61 | } 62 | -------------------------------------------------------------------------------- /ci/build-inside-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eEuo pipefail 3 | 4 | cd "$(dirname "$0")"/.. 5 | 6 | PATH=/tmp/ldc2-host/bin:$PATH 7 | 8 | host_arch=$(uname -m) 9 | target_arch=$BTDU_ARCH 10 | 11 | target_api=gnu 12 | case "$target_arch" in 13 | arm) 14 | target_api=gnueabihf 15 | ;; 16 | *) 17 | esac 18 | 19 | if [[ "$target_arch" == "$host_arch" ]] 20 | then 21 | gnu_prefix= 22 | else 23 | gnu_prefix="$target_arch"-linux-"$target_api"- 24 | fi 25 | 26 | cat >> /tmp/ldc2-host/etc/ldc2.conf < and contributors: 93 | 94 | .I https://github.com/CyberShadow/btdu/graphs/contributors 95 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "btdu - sampling disk usage profiler for btrfs"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | }; 8 | 9 | outputs = { self, nixpkgs, flake-utils }: 10 | flake-utils.lib.eachDefaultSystem (system: 11 | let 12 | pkgs = nixpkgs.legacyPackages.${system}; 13 | 14 | # Filter source to only include files needed for building 15 | # This prevents rebuilds when test files or docs change 16 | btduSrc = pkgs.lib.cleanSourceWith { 17 | src = ./.; 18 | filter = path: type: 19 | let 20 | baseName = baseNameOf path; 21 | relPath = pkgs.lib.removePrefix (toString ./. + "/") (toString path); 22 | in 23 | # Include source directory and all its contents 24 | (relPath == "source" || pkgs.lib.hasPrefix "source/" relPath) || 25 | # Include build configuration 26 | (baseName == "dub.sdl"); 27 | }; 28 | 29 | # Build btdu from local source 30 | btdu = pkgs.buildDubPackage { 31 | pname = "btdu"; 32 | version = "0.6.1"; 33 | 34 | src = btduSrc; 35 | 36 | dubLock = ./dub-lock.json; 37 | 38 | buildInputs = with pkgs; [ 39 | ncurses 40 | zlib 41 | ]; 42 | 43 | installPhase = '' 44 | runHook preInstall 45 | install -Dm755 btdu -t $out/bin 46 | # Generate man page from btdu itself 47 | ./btdu --man "" > btdu.1 48 | install -Dm644 btdu.1 -t $out/share/man/man1 49 | runHook postInstall 50 | ''; 51 | 52 | meta = with pkgs.lib; { 53 | description = "Sampling disk usage profiler for btrfs"; 54 | homepage = "https://github.com/CyberShadow/btdu"; 55 | license = licenses.gpl2Only; 56 | platforms = platforms.linux; 57 | mainProgram = "btdu"; 58 | }; 59 | }; 60 | in 61 | { 62 | packages = { 63 | default = btdu; 64 | btdu = btdu; 65 | }; 66 | 67 | apps.default = { 68 | type = "app"; 69 | program = "${btdu}/bin/btdu"; 70 | }; 71 | 72 | devShells.default = pkgs.mkShell { 73 | buildInputs = with pkgs; [ 74 | dmd 75 | dub 76 | ncurses 77 | zlib 78 | ]; 79 | }; 80 | 81 | # Integration tests as checks (only on Linux systems) 82 | checks = pkgs.lib.optionalAttrs pkgs.stdenv.isLinux { 83 | integration = import ./ci/tests/nixos-test.nix { 84 | inherit (pkgs) lib; 85 | inherit pkgs; 86 | inherit btdu; 87 | }; 88 | }; 89 | } 90 | ); 91 | } 92 | -------------------------------------------------------------------------------- /source/btdu/alloc.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2023, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Memory allocation 20 | module btdu.alloc; 21 | 22 | import core.exception : onOutOfMemoryError; 23 | 24 | import std.algorithm.comparison : max; 25 | import std.experimental.allocator.building_blocks.allocator_list; 26 | import std.experimental.allocator.building_blocks.null_allocator; 27 | import std.experimental.allocator.building_blocks.region; 28 | import std.experimental.allocator.mallocator; 29 | import std.experimental.allocator.mmap_allocator; 30 | import std.experimental.allocator; 31 | import std.traits; 32 | 33 | /// Allocator to use for objects with infinite lifetime, which will never be freed. 34 | alias GrowAllocator = AllocatorList!((n) => Region!MmapAllocator(max(n, 1024 * 4096)), NullAllocator); 35 | CheckedAllocator!GrowAllocator growAllocator; 36 | 37 | /// Casual allocator which supports deallocation. 38 | alias CasualAllocator = CheckedAllocator!Mallocator; 39 | 40 | /// Wrapper allocator which calls a function when memory allocation fails. 41 | /// Because downstream code doesn't check for nulls, this allows better error messages 42 | /// should btdu run out of memory. (Mainly this is useful for testing and profiling.) 43 | struct CheckedAllocator(ParentAllocator, alias onFail = onOutOfMemoryError) 44 | { 45 | import std.traits : hasMember; 46 | import std.typecons : Ternary; 47 | 48 | static if (stateSize!ParentAllocator) 49 | ParentAllocator parent; 50 | else 51 | { 52 | alias parent = ParentAllocator.instance; 53 | static CheckedAllocator instance; 54 | } 55 | 56 | private T check(T)(T value) { if (!value) onFail(); return value; } 57 | 58 | void[] allocate(size_t n) { return check(parent.allocate(n)); } 59 | bool reallocate(ref void[] b, size_t s) { return check(parent.reallocate(b, s)); } 60 | 61 | // Note: we can't use `alias this` because we need to intercept allocateZeroed, 62 | // but we can't do that because it's package(std). 63 | 64 | enum alignment = ParentAllocator.alignment; 65 | 66 | size_t goodAllocSize(size_t n) { return parent.goodAllocSize(n); } 67 | 68 | static if (hasMember!(ParentAllocator, "expand")) 69 | bool expand(ref void[] b, size_t delta) { return parent.expand(b, delta); } 70 | 71 | static if (hasMember!(ParentAllocator, "owns")) 72 | Ternary owns(void[] b) { return parent.owns(b); } 73 | 74 | static if (hasMember!(ParentAllocator, "deallocate")) 75 | bool deallocate(void[] b) { return parent.deallocate(b); } 76 | 77 | static if (hasMember!(ParentAllocator, "deallocateAll")) 78 | bool deallocateAll() { return parent.deallocateAll(); } 79 | 80 | static if (hasMember!(ParentAllocator, "empty")) 81 | pure nothrow @safe @nogc Ternary empty() const { return parent.empty; } 82 | } 83 | 84 | /// Reusable appender. 85 | template StaticAppender(T) 86 | if (!hasIndirections!T) 87 | { 88 | import ae.utils.appender : FastAppender; 89 | alias StaticAppender = FastAppender!(T, Mallocator); 90 | } 91 | 92 | /// Typed slab allocator for objects that need efficient iteration. 93 | /// Allocates objects in large contiguous slabs linked together. 94 | /// Overhead: one pointer per slab (~4MB), plus fixed global state. 95 | struct SlabAllocator(T, size_t slabSize = 4 * 1024 * 1024) 96 | { 97 | enum itemsPerSlab = (slabSize - (Slab*).sizeof) / T.sizeof; 98 | static assert(itemsPerSlab > 0, "Type too large for slab allocator"); 99 | 100 | struct Slab 101 | { 102 | T[itemsPerSlab] items; 103 | Slab* next; 104 | } 105 | 106 | static assert(Slab.sizeof <= slabSize); 107 | 108 | Slab* firstSlab; 109 | Slab* currentSlab; 110 | size_t currentIndex; 111 | 112 | /// Allocate a new item, returns a pointer to uninitialized memory. 113 | T* allocate() 114 | { 115 | if (!currentSlab || currentIndex >= itemsPerSlab) 116 | { 117 | auto mem = MmapAllocator.instance.allocate(Slab.sizeof); 118 | if (!mem) 119 | onOutOfMemoryError(); 120 | auto newSlab = cast(Slab*) mem.ptr; 121 | newSlab.next = null; 122 | if (currentSlab) 123 | currentSlab.next = newSlab; 124 | else 125 | firstSlab = newSlab; 126 | currentSlab = newSlab; 127 | currentIndex = 0; 128 | } 129 | return ¤tSlab.items[currentIndex++]; 130 | } 131 | 132 | /// Iterate over all allocated items. 133 | int opApply(scope int delegate(ref T) dg) 134 | { 135 | auto lastSlab = currentSlab; 136 | auto lastIndex = currentIndex; 137 | for (auto slab = firstSlab; slab; slab = slab.next) 138 | { 139 | auto count = (slab is lastSlab) ? lastIndex : itemsPerSlab; 140 | foreach (ref item; slab.items[0 .. count]) 141 | if (auto r = dg(item)) 142 | return r; 143 | } 144 | return 0; 145 | } 146 | 147 | /// Number of allocated items. 148 | size_t length() const 149 | { 150 | if (!firstSlab) 151 | return 0; 152 | size_t count = currentIndex; 153 | for (const(Slab)* slab = firstSlab; slab !is currentSlab; slab = slab.next) 154 | count += itemsPerSlab; 155 | return count; 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /source/btdu/impexp.d: -------------------------------------------------------------------------------- 1 | module btdu.impexp; 2 | 3 | import std.algorithm.comparison : max; 4 | import std.algorithm.iteration : filter, map; 5 | import std.algorithm.sorting : sort; 6 | import std.array : array; 7 | import std.conv : to; 8 | import std.math : ceil; 9 | import std.process : environment; 10 | import std.stdio : File, stdout; 11 | 12 | import core.lifetime : move; 13 | 14 | import ae.sys.data; 15 | import ae.sys.datamm; 16 | import ae.utils.json; 17 | 18 | import btdu.common : humanSize, pointerWriter; 19 | import btdu.paths; 20 | import btdu.state; 21 | 22 | alias imported = btdu.state.imported; 23 | 24 | /// Serialized 25 | struct SerializedState 26 | { 27 | bool expert; 28 | @JSONOptional bool physical; 29 | string fsPath; 30 | ulong totalSize; 31 | BrowserPath* root; 32 | } 33 | 34 | void importData(string path) 35 | { 36 | __gshared Data importData; // Keep memory-mapped file alive, as directory names may reference it 37 | 38 | importData = mapFile(path, MmMode.read); 39 | auto json = cast(string)importData.unsafeContents; 40 | 41 | debug importing = true; 42 | auto s = json.jsonParse!SerializedState(); 43 | 44 | expert = s.expert; 45 | physical = s.physical; 46 | fsPath = s.fsPath; 47 | totalSize = s.totalSize; 48 | move(*s.root, browserRoot); 49 | 50 | browserRoot.resetParents(); 51 | debug importing = false; 52 | imported = true; 53 | } 54 | 55 | void exportData(string path) 56 | { 57 | SerializedState s; 58 | s.expert = expert; 59 | s.physical = physical; 60 | s.fsPath = fsPath; 61 | s.totalSize = totalSize; 62 | s.root = &browserRoot; 63 | 64 | alias LockingBinaryWriter = typeof(File.lockingBinaryWriter()); 65 | alias JsonFileSerializer = CustomJsonSerializer!(JsonWriter!LockingBinaryWriter); 66 | 67 | { 68 | JsonFileSerializer j; 69 | auto file = path == "-" ? stdout : File(path, "wb"); 70 | j.writer.output = file.lockingBinaryWriter; 71 | j.put(s); 72 | } 73 | } 74 | 75 | void exportDu() 76 | { 77 | ulong blockSize = { 78 | // As in du(1) 79 | if ("POSIXLY_CORRECT" in environment) 80 | return 512; 81 | foreach (name; ["BTDU_BLOCK_SIZE", "DU_BLOCK_SIZE", "BLOCK_SIZE", "BLOCKSIZE"]) 82 | if (auto value = environment.get(name)) 83 | return value.to!ulong; 84 | return 1024; 85 | }(); 86 | 87 | auto totalSamples = browserRoot.getSamples(SampleType.represented); 88 | 89 | void visit(BrowserPath* path) 90 | { 91 | for (auto child = path.firstChild; child; child = child.nextSibling) 92 | visit(child); 93 | 94 | auto samples = path.getSamples(SampleType.represented); 95 | auto size = ceil(samples * real(totalSize) / totalSamples / blockSize).to!ulong; 96 | stdout.writefln("%d\t%s%s", size, fsPath, path.pointerWriter); 97 | } 98 | if (totalSamples) 99 | visit(&browserRoot); 100 | } 101 | 102 | /// Print a pretty tree of the biggest nodes to stdout. 103 | /// In non-expert mode: size and path columns. 104 | /// In expert mode: represented, distributed, exclusive, shared size columns. 105 | void exportHuman() 106 | { 107 | auto totalSamples = browserRoot.getSamples(SampleType.represented); 108 | if (totalSamples == 0) 109 | return; 110 | 111 | // Threshold: 1% of total size 112 | auto threshold = totalSamples / 100; 113 | if (threshold == 0) 114 | threshold = 1; 115 | 116 | // Calculate size from samples 117 | real sizeFromSamples(double samples) 118 | { 119 | return samples * real(totalSize) / totalSamples; 120 | } 121 | 122 | // Collect and print nodes recursively 123 | void visit(BrowserPath* path, string indent, bool isLast) 124 | { 125 | // Get samples for represented size (primary sort/filter) 126 | auto samples = path.getSamples(SampleType.represented); 127 | 128 | // Skip nodes below threshold (but always show root) 129 | if (path !is &browserRoot && samples < threshold) 130 | return; 131 | 132 | string prefix, childIndent, label; 133 | if (path is &browserRoot) 134 | { 135 | prefix = ""; 136 | childIndent = ""; 137 | label = fsPath; 138 | } 139 | else 140 | { 141 | prefix = indent ~ (isLast ? "└── " : "├── "); 142 | childIndent = indent ~ (isLast ? " " : "│ "); 143 | label = path.humanName.to!string; 144 | } 145 | 146 | // Format and print the line 147 | if (expert) 148 | { 149 | // Expert mode: four size columns 150 | auto represented = sizeFromSamples(samples); 151 | auto distributed = sizeFromSamples(path.getDistributedSamples()); 152 | auto exclusive = sizeFromSamples(path.getSamples(SampleType.exclusive)); 153 | auto shared_ = sizeFromSamples(path.getSamples(SampleType.shared_)); 154 | 155 | stdout.writefln(" ~%s ~%s ~%s ~%s %s%s", 156 | humanSize(represented, true), 157 | humanSize(distributed, true), 158 | humanSize(exclusive, true), 159 | humanSize(shared_, true), 160 | prefix, 161 | label, 162 | ); 163 | } 164 | else 165 | { 166 | // Non-expert mode: single size column 167 | auto size = sizeFromSamples(samples); 168 | stdout.writefln("%s%s (~%s)", prefix, label, humanSize(size, false)); 169 | } 170 | 171 | // Collect children that pass threshold 172 | BrowserPath*[] children; 173 | for (auto child = path.firstChild; child; child = child.nextSibling) 174 | if (child.getSamples(SampleType.represented) >= threshold) 175 | children ~= child; 176 | 177 | // Sort children by size (largest first) 178 | children.sort!((a, b) => a.getSamples(SampleType.represented) > b.getSamples(SampleType.represented)); 179 | 180 | // Visit children 181 | foreach (i, child; children) 182 | visit(child, childIndent, i + 1 == children.length); 183 | } 184 | 185 | // Print header in expert mode 186 | if (expert) 187 | { 188 | stdout.writeln(" Represented Distributed Exclusive Shared Path"); 189 | } 190 | 191 | visit(&browserRoot, "", true); 192 | } 193 | -------------------------------------------------------------------------------- /source/btdu/proto.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2024 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Main process / subprocess communication protocol 20 | module btdu.proto; 21 | 22 | import core.sys.posix.unistd; 23 | 24 | import std.exception; 25 | import std.meta; 26 | import std.traits; 27 | 28 | import ae.utils.array; 29 | 30 | import btrfs.c.ioctl : btrfs_ioctl_dev_info_args; 31 | import btrfs.c.kerncompat : u64, __u64; 32 | 33 | import btdu.alloc : StaticAppender; 34 | 35 | struct Error 36 | { 37 | const(char)[] msg; 38 | int errno; 39 | const(char)[] path; 40 | } 41 | 42 | struct StartMessage 43 | { 44 | ulong totalSize; 45 | btrfs_ioctl_dev_info_args[] devices; 46 | } 47 | 48 | struct NewRootMessage 49 | { 50 | __u64 rootID, parentRootID; 51 | const(char)[] name; 52 | } 53 | 54 | struct Offset 55 | { 56 | ulong logical = -1; 57 | ulong devID = -1, physical = -1; 58 | } 59 | 60 | /// Used for Offset.logical to represent unallocated space in physical mode. 61 | enum u64 logicalOffsetHole = -1; 62 | 63 | /// Used for Offset.logical to represent device slack. 64 | enum u64 logicalOffsetSlack = -2; 65 | 66 | struct ResultStartMessage 67 | { 68 | ulong chunkFlags; 69 | Offset offset; 70 | } 71 | 72 | // Retrying with BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET 73 | struct ResultIgnoringOffsetMessage 74 | { 75 | } 76 | 77 | struct ResultInodeStartMessage 78 | { 79 | u64 rootID; 80 | } 81 | 82 | struct ResultInodeErrorMessage 83 | { 84 | Error error; 85 | } 86 | 87 | struct ResultInodeEndMessage 88 | { 89 | } 90 | 91 | struct ResultMessage 92 | { 93 | const(char)[] path; 94 | } 95 | 96 | struct ResultErrorMessage 97 | { 98 | Error error; 99 | } 100 | 101 | struct ResultEndMessage 102 | { 103 | ulong duration; 104 | } 105 | 106 | struct FatalErrorMessage 107 | { 108 | const(char)[] msg; 109 | } 110 | 111 | alias AllMessages = AliasSeq!( 112 | StartMessage, 113 | NewRootMessage, 114 | ResultStartMessage, 115 | ResultIgnoringOffsetMessage, 116 | ResultInodeStartMessage, 117 | ResultInodeErrorMessage, 118 | ResultInodeEndMessage, 119 | ResultMessage, 120 | ResultErrorMessage, 121 | ResultEndMessage, 122 | FatalErrorMessage, 123 | ); 124 | 125 | struct Header 126 | { 127 | /// Includes Header. 128 | /// Even when the length is redundant (fixed-size messages), 129 | /// putting it up front allows simplifying deserialization and 130 | /// process entire messages in one go 131 | size_t length; 132 | /// Index into AllMessages 133 | size_t type; 134 | } 135 | 136 | StaticAppender!ubyte sendBuf; 137 | 138 | private void serialize(T)(ref T value) 139 | { 140 | static if (!hasIndirections!T) 141 | sendBuf.put(value.asBytes); 142 | else 143 | static if (is(T U : U[])) 144 | { 145 | size_t length = value.length; 146 | serialize(length); 147 | static if (!hasIndirections!U) 148 | sendBuf.put(value.asBytes); 149 | else 150 | foreach (ref e; value) 151 | serialize(e); 152 | } 153 | else 154 | static if (is(T == struct)) 155 | { 156 | foreach (ref f; value.tupleof) 157 | serialize(f); 158 | } 159 | else 160 | static assert(false, "Can't serialize " ~ T.stringof); 161 | } 162 | 163 | private void sendRaw(const(void)[] data) 164 | { 165 | auto written = write(STDOUT_FILENO, data.ptr, data.length); 166 | errnoEnforce(written > 0, "write"); 167 | data.shift(written); 168 | if (!data.length) 169 | return; 170 | sendRaw(data); 171 | } 172 | 173 | /// Send a message from a subprocess to the main process. 174 | void send(T)(auto ref T message) 175 | if (staticIndexOf!(T, AllMessages) >= 0) 176 | { 177 | Header header; 178 | header.type = staticIndexOf!(T, AllMessages); 179 | sendBuf.clear(); 180 | serialize(message); 181 | header.length = Header.sizeof + sendBuf.length; 182 | sendRaw(header.asBytes); 183 | sendRaw(sendBuf.peek()); 184 | } 185 | 186 | private T deserialize(T)(ref ubyte[] buf) 187 | { 188 | static if (!hasIndirections!T) 189 | return (cast(T[])buf.shift(T.sizeof))[0]; 190 | else 191 | static if (is(T U : U[])) 192 | { 193 | size_t length = deserialize!size_t(buf); 194 | static if (!hasIndirections!U) 195 | return cast(U[])buf.shift(U.sizeof * length); 196 | else 197 | static assert(false, "Can't deserialize arrays of types with indirections without allocating"); 198 | } 199 | else 200 | static if (is(T == struct)) 201 | { 202 | T value; 203 | foreach (ref f; value.tupleof) 204 | f = deserialize!(typeof(f))(buf); 205 | return value; 206 | } 207 | else 208 | static assert(false, "Can't deserialize " ~ T.stringof); 209 | } 210 | 211 | /// Decode received data. 212 | /// Returns how many bytes should be read before calling this function again. 213 | /// H should implement handleMessage(M) overloads for every M in AllMessages. 214 | size_t parse(H)(ref ubyte[] buf, ref H handler) 215 | { 216 | while (true) 217 | { 218 | if (buf.length < Header.sizeof) 219 | return Header.sizeof - buf.length; 220 | 221 | auto header = (cast(Header*)buf.ptr); 222 | if (buf.length < header.length) 223 | return header.length - buf.length; 224 | 225 | auto initialBufLength = buf.length; 226 | buf.shift(Header.sizeof); 227 | 228 | typeSwitch: 229 | switch (header.type) 230 | { 231 | foreach (i, Message; AllMessages) 232 | { 233 | case i: 234 | handler.handleMessage(deserialize!Message(buf)); 235 | break typeSwitch; 236 | } 237 | default: 238 | assert(false, "Unknown message"); 239 | } 240 | 241 | auto consumed = initialBufLength - buf.length; 242 | import std.format : format; 243 | assert(consumed == header.length, 244 | "Deserialization consumed size / header size mismatch (%d / %d)" 245 | .format(consumed, header.length)); 246 | } 247 | assert(false, "Unreachable"); 248 | } 249 | -------------------------------------------------------------------------------- /ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | btdu collects data using sub-processes. 4 | Each subprocess is started with its own random sub-seed, which then prints results to its standard output. 5 | The main process collects data from subprocesses, and arranges it into a tree. 6 | 7 | ## Source layout 8 | 9 | - `alloc.d` - Memory allocation utility code. 10 | - `browser.d` - The ncurses-based user interface. 11 | - `common.d` - Small module with common definitions. 12 | - `impexp.d` - Import/export code. 13 | - `main.d` - Entry point and event loop. 14 | - `paths.d` - Implements a trie for efficiently storing paths and associated hierarchical data. 15 | - `proto.d` - Describes the protocol used between the main process and subprocesses. 16 | - `sample.d` - The main loop for subprocesses. Performs sample acquisition. 17 | - `state.d` - Global variables. 18 | - `subproc.d` - Code to manage subprocesses from the main process. 19 | 20 | ## Design decisions 21 | 22 | Here are a few potential questions about decisions made when writing btdu, which have possibly non-obvious answers. 23 | 24 | ### Why copy ncdu's UI? 25 | 26 | Many users who would like to use btdu have probably already used ncdu. Mimicking a familiar user interface allows the users to be immediately productive in the new software. Since btdu aims to be a tool which solves problems, as opposed to creating them, removing the obstacle of learning a new user interface was seen as a non-negligible benefit. 27 | 28 | ### Why sub-processes and not threads? 29 | 30 | To collect data, btdu issues ioctls to the kernel. These ioctls can take a very long time to execute, and are non-interruptible. The main reason why this becomes an issue is that D's garbage collector, which operates according to a stop-the-world model, needs to suspend all threads for it to scan reachable objects. Threads executing ioctls can only be "suspended" once the ioctl finishes - even though no userspace code is executed during the ioctl, the signal is still processed only after the ioctl returns. This means that a GC cycle will last for as long as the longest ioctl, which can take up to a few seconds. As the UI is not redrawn or can process keyboard input during this time, this would make it unpleasantly unresponsive. 31 | 32 | There are some other, similar but less obvious reasons, such as allowing the user to instantly exit back to the shell even though an ioctl would otherwise need to take a few more seconds to finish. 33 | 34 | ### Why random samples and not some even / predictable order? 35 | 36 | Given the use of multiple subprocesses which collect data in parallel, there's two general approaches that this could be done: 37 | 38 | 1. Divide the volume into some predefined sections, one per subprocess, and dedicate each section to a subprocess. The subprocess then autonomously picks samples within its section using whatever deterministic algorithm. 39 | 40 | 2. Have the main process decide on the order of samples to query, and dispatch these samples in order to subprocesses as they report that they are idle and ready to do more work. 41 | 42 | The problem with the first approach is that the time it takes to resolve a sample varies. Consider the simple hypothetical scenario where the first half of the disk is twice as fast as the second half of the disk. Then, with two subprocesses, the subprocess responsible for the first half of the disk could collect 200 samples in the time that the other subprocess would collect only 100. If the samples were treated equally, this would thus create the false impression that the first half of the disk contains two thirds of the data, instead of just half. 43 | 44 | Though btdu could do this and compensate by scaling the weight of the samples by the time it took to resolve them, this introduces two orders of inaccuracy and causes the results to be further skewed by temporary effects such as disk cache. 45 | 46 | The second approach listed above does avoid this problem. However, its implementation requires more elaborate communication with subprocesses - currently, subprocesses do not read any data from the parent process at all. 47 | 48 | ### How does using a random uniform distribution avoid the problem caused by variable sample resolution duration above? 49 | 50 | Let's consider a hypothetical extreme scenario where samples in the first half of the disk take 1 millisecond to resolve, but 10 milliseconds in the second half. For illustration, we'll use 1000 worker processes. 51 | 52 | What will happen is that, even though the total number of samples collected in the first half will initially be higher, every time as workers assigned to the first half quickly finish their work, half of them will get assigned to the second half. Thus, all workers will quickly get "clammed up" and assigned to the slow second half. This process repeats as the workers in the second half finish resolving. Most importantly, the difference in the total number of samples remains the same, even as the total number of samples grows: 53 | 54 | | Elapsed ms | Busy workers,
1st half | Busy workers,
2nd half | Total samples,
1st half | Total samples,
2nd half | 55 | | ---------- | ------------------------- | ------------------------- | -------------------------- | -------------------------- | 56 | | 0 | 500 | 500 | 0 | 0 | 57 | | 1 | 250 | 750 | 500 | 0 | 58 | | 2 | 125 | 875 | 750 | 0 | 59 | | 3 | 63 | 938 | 875 | 0 | 60 | | 4 | 31 | 969 | 938 | 0 | 61 | | 5 | 16 | 984 | 969 | 0 | 62 | | 6 | 8 | 992 | 984 | 0 | 63 | | 7 | 4 | 996 | 992 | 0 | 64 | | 8 | 2 | 998 | 996 | 0 | 65 | | 9 | 1 | 999 | 998 | 0 | 66 | | 10 | 250 | 750 | 999 | 500 | 67 | | 11 | 250 | 750 | 1250 | 750 | 68 | | 12 | 188 | 812 | 1500 | 875 | 69 | | 13 | 125 | 875 | 1687 | 938 | 70 | | 14 | 78 | 922 | 1812 | 969 | 71 | | 15 | 47 | 953 | 1891 | 984 | 72 | | 16 | 27 | 973 | 1937 | 992 | 73 | | 17 | 16 | 984 | 1965 | 996 | 74 | | 18 | 9 | 991 | 1980 | 998 | 75 | | 19 | 5 | 995 | 1989 | 999 | 76 | | 20 | 128 | 872 | 1994 | 1250 | 77 | | 21 | 189 | 811 | 2122 | 1500 | 78 | | 22 | 188 | 812 | 2311 | 1687 | 79 | | 23 | 157 | 843 | 2499 | 1812 | 80 | | 24 | 117 | 883 | 2656 | 1891 | 81 | | 25 | 82 | 918 | 2773 | 1937 | 82 | | ... | ... | ... | ... | ... | 83 | | 100 | 94 | 906 | 9482 | 8671 | 84 | | ... | ... | ... | ... | ... | 85 | | 500 | 91 | 909 | 45826 | 45008 | 86 | | ... | ... | ... | ... | ... | 87 | | 1000 | 91 | 909 | 91281 | 90463 | 88 | 89 | As you can see, the ratio for the total number of samples still converges towards 50%. 90 | 91 | ### Why use the same random seed by default? 92 | 93 | Using a fixed seed instead of an unique (unpredictable) seed enables the following workflow: 94 | 95 | - Run btdu to collect initial information about disk usage. 96 | - Quit btdu, and delete the biggest space hog (e.g. a batch of old snapshots). 97 | - Re-run btdu to acquire fresh results. 98 | 99 | Because the metadata that btdu accessed will now be in the operating system's cache, the second invocation is likely to be much faster, and it will quickly "fast-forward" to the point where the first invocation stopped. 100 | -------------------------------------------------------------------------------- /ci/tests/manual-test-script.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Manual testing script for btdu 3 | # Creates an isolated btrfs filesystem with various test scenarios 4 | 5 | set -euo pipefail 6 | 7 | # Colors for output 8 | GREEN='\033[0;32m' 9 | BLUE='\033[0;34m' 10 | NC='\033[0m' # No Color 11 | 12 | set -eEuo pipefail 13 | 14 | if [[ ! -v BTDU_IN_UNSHARE ]] ; then 15 | 16 | echo -e "${BLUE}Building btdu...${NC}" 17 | dub build 18 | 19 | echo -e "${BLUE}Entering mount namespace...${NC}" 20 | 21 | # Use unshare to create a private mount namespace 22 | # This allows us to create mounts that will be automatically cleaned up when btdu exits 23 | sudo env BTDU_IN_UNSHARE=1 unshare --mount "$0" "$@" 24 | fi 25 | 26 | echo -e "${BLUE}Creating isolated filesystem environment...${NC}" 27 | 28 | # Create a working directory for our test filesystem 29 | mkdir -p /tmp/btdu-test 30 | mount -t tmpfs -o size=2G tmpfs /tmp/btdu-test 31 | 32 | echo "Creating 1.5GB filesystem image..." 33 | dd if=/dev/zero of=/tmp/btdu-test/fs.img bs=1M count=1536 status=progress 34 | 35 | echo "Setting up loopback device..." 36 | LOOP_DEV=$(losetup -f --show /tmp/btdu-test/fs.img) 37 | 38 | # Ensure cleanup on script exit 39 | cleanup() { 40 | echo "Cleaning up..." 41 | sync || true 42 | umount /mnt/btdu-test 2>/dev/null || true 43 | losetup -d "$LOOP_DEV" 2>/dev/null || true 44 | } 45 | trap cleanup EXIT 46 | 47 | echo "Formatting as btrfs..." 48 | mkfs.btrfs -f "$LOOP_DEV" >/dev/null 49 | 50 | echo "Mounting filesystem..." 51 | mkdir -p /mnt/btdu-test 52 | mount "$LOOP_DEV" /mnt/btdu-test 53 | 54 | echo "Creating test files and scenarios..." 55 | 56 | # 1. Basic directory structure with various file sizes 57 | echo " - Creating basic files..." 58 | mkdir -p /mnt/btdu-test/documents 59 | mkdir -p /mnt/btdu-test/media/photos 60 | mkdir -p /mnt/btdu-test/code/src 61 | 62 | dd if=/dev/urandom of=/mnt/btdu-test/documents/report.pdf bs=1M count=5 2>/dev/null 63 | dd if=/dev/urandom of=/mnt/btdu-test/documents/spreadsheet.xlsx bs=1M count=2 2>/dev/null 64 | dd if=/dev/urandom of=/mnt/btdu-test/media/photos/vacation.jpg bs=1M count=3 2>/dev/null 65 | dd if=/dev/urandom of=/mnt/btdu-test/code/src/main.cpp bs=512K count=1 2>/dev/null 66 | 67 | # 2. Reflinked (CoW cloned) files - demonstrates sharing 68 | echo " - Creating reflinked files (shared extents)..." 69 | dd if=/dev/urandom of=/mnt/btdu-test/original.bin bs=1M count=10 2>/dev/null 70 | cp --reflink=always --sparse=auto /mnt/btdu-test/original.bin /mnt/btdu-test/clone1.bin 71 | cp --reflink=always --sparse=auto /mnt/btdu-test/original.bin /mnt/btdu-test/clone2.bin 72 | cp --reflink=always --sparse=auto /mnt/btdu-test/original.bin /mnt/btdu-test/media/clone3.bin 73 | 74 | # 3. Enable compression and create compressible data 75 | echo " - Enabling compression and creating compressible files..." 76 | mount -o remount,compress=zstd /mnt/btdu-test 77 | dd if=/dev/zero of=/mnt/btdu-test/zeros.dat bs=1M count=20 2>/dev/null 78 | dd if=/dev/urandom of=/mnt/btdu-test/random.dat bs=1M count=20 2>/dev/null 79 | 80 | # 4. Subvolumes 81 | echo " - Creating subvolumes..." 82 | btrfs subvolume create /mnt/btdu-test/home >/dev/null 83 | btrfs subvolume create /mnt/btdu-test/backups >/dev/null 84 | 85 | dd if=/dev/urandom of=/mnt/btdu-test/home/user_data.db bs=1M count=15 2>/dev/null 86 | dd if=/dev/urandom of=/mnt/btdu-test/backups/backup_2024.tar bs=1M count=25 2>/dev/null 87 | 88 | # 5. Snapshots - demonstrate snapshot sharing 89 | echo " - Creating snapshots..." 90 | btrfs subvolume snapshot /mnt/btdu-test/home /mnt/btdu-test/home-snapshot-2024-01 >/dev/null 91 | btrfs subvolume snapshot /mnt/btdu-test/home /mnt/btdu-test/home-snapshot-2024-02 >/dev/null 92 | 93 | # Modify the original after snapshot to create some unique data 94 | dd if=/dev/urandom of=/mnt/btdu-test/home/new_file.dat bs=1M count=5 2>/dev/null 95 | 96 | # 6. Sparse files 97 | echo " - Creating sparse files..." 98 | dd if=/dev/urandom of=/mnt/btdu-test/sparse.dat bs=1M count=1 2>/dev/null 99 | dd if=/dev/zero of=/mnt/btdu-test/sparse.dat bs=1M count=0 seek=1000 conv=notrunc 2>/dev/null 100 | 101 | # 7. Fragmented file with many extents 102 | echo " - Creating fragmented file..." 103 | fallocate -l 50M /mnt/btdu-test/fragmented.dat 104 | for i in {0..99}; do 105 | dd if=/dev/urandom of=/mnt/btdu-test/fragmented.dat bs=4K count=1 seek=$((i*125)) conv=notrunc 2>/dev/null 106 | done 107 | 108 | # 8. Small files (stored inline with metadata) 109 | echo " - Creating many small files..." 110 | mkdir -p /mnt/btdu-test/config 111 | for i in {1..50}; do 112 | dd if=/dev/urandom of=/mnt/btdu-test/config/setting$i.conf bs=100 count=1 2>/dev/null 113 | done 114 | 115 | # 9. Files with spaces and special characters 116 | echo " - Creating files with special names..." 117 | mkdir -p "/mnt/btdu-test/My Documents/Project Files" 118 | dd if=/dev/urandom of="/mnt/btdu-test/My Documents/Important Report.pdf" bs=1M count=3 2>/dev/null 119 | dd if=/dev/urandom of="/mnt/btdu-test/My Documents/Project Files/Code Review.txt" bs=512K count=1 2>/dev/null 120 | 121 | # 10. Unicode filenames 122 | echo " - Creating files with Unicode names..." 123 | dd if=/dev/urandom of="/mnt/btdu-test/café_français.dat" bs=1M count=2 2>/dev/null 124 | dd if=/dev/urandom of="/mnt/btdu-test/文件_chinese.dat" bs=1M count=2 2>/dev/null 125 | dd if=/dev/urandom of="/mnt/btdu-test/test_🎉_emoji.dat" bs=1M count=2 2>/dev/null 126 | 127 | # 11. Create and delete a file to demonstrate UNUSED space 128 | echo " - Creating unused space..." 129 | dd if=/dev/urandom of=/mnt/btdu-test/to_be_deleted.dat bs=1M count=30 2>/dev/null 130 | sync -f /mnt/btdu-test 131 | rm /mnt/btdu-test/to_be_deleted.dat 132 | 133 | # 12. Partial extent sharing (overwrite part of a reflinked file) 134 | echo " - Creating partial extent sharing..." 135 | dd if=/dev/urandom of=/mnt/btdu-test/base_file.dat bs=1M count=10 2>/dev/null 136 | cp --reflink=always --sparse=auto /mnt/btdu-test/base_file.dat /mnt/btdu-test/partial_clone.dat 137 | # Overwrite the middle of the clone, creating a mix of shared and unique extents 138 | dd if=/dev/urandom of=/mnt/btdu-test/partial_clone.dat bs=1M count=3 seek=3 conv=notrunc 2>/dev/null 139 | 140 | # Final sync 141 | sync -f /mnt/btdu-test 142 | 143 | echo -e "\n${GREEN}Test filesystem ready!${NC}" 144 | echo "Location: /mnt/btdu-test" 145 | echo "Device: $LOOP_DEV" 146 | echo "" 147 | echo "Test scenarios created:" 148 | echo " • Basic files in various directories" 149 | echo " • Reflinked files (4 copies of original.bin sharing data)" 150 | echo " • Compressed files (zeros.dat compresses well, random.dat does not)" 151 | echo " • Subvolumes (home, backups)" 152 | echo " • Snapshots (2 snapshots of home subvolume)" 153 | echo " • Sparse file (1GB logical, ~1MB physical)" 154 | echo " • Fragmented file (~100 extents)" 155 | echo " • Many small files (50 tiny config files)" 156 | echo " • Files with spaces and special characters" 157 | echo " • Unicode filenames (emoji, Chinese, French)" 158 | echo " • Deleted file space (UNUSED)" 159 | echo " • Partial extent sharing (partial_clone.dat)" 160 | echo "" 161 | echo -e "${BLUE}Starting btdu...${NC}" 162 | echo "" 163 | 164 | # Execute btdu with sudo, passing through all script arguments 165 | # Using exec ensures the namespace dies when btdu exits, cleaning up everything 166 | exec ./btdu "$@" /mnt/btdu-test 167 | -------------------------------------------------------------------------------- /source/btdu/ui/deletion.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, 2024 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Interactive deletion logic 20 | module btdu.ui.deletion; 21 | 22 | import std.algorithm.comparison; 23 | import std.algorithm.searching; 24 | import std.conv : to; 25 | import std.exception; 26 | import std.path; 27 | import std.typecons; 28 | 29 | import core.sync.event; 30 | import core.sys.posix.fcntl : O_RDONLY; 31 | import core.thread : Thread; 32 | 33 | import ae.sys.file : listDir, getMounts; 34 | 35 | import btrfs : getSubvolumeID, deleteSubvolume; 36 | 37 | import btdu.paths : BrowserPath, Mark; 38 | import btdu.state : toFilesystemPath; 39 | 40 | struct Deleter 41 | { 42 | enum Status 43 | { 44 | none, 45 | ready, // confirmation 46 | progress, 47 | success, 48 | error, 49 | subvolumeConfirm, 50 | subvolumeProgress, 51 | } 52 | 53 | struct State 54 | { 55 | Status status; 56 | string current, error; 57 | bool stopping; 58 | } 59 | private State state; 60 | 61 | Thread thread; 62 | Event subvolumeResume; 63 | 64 | State getState() 65 | { 66 | if (thread) 67 | synchronized (this.thread) 68 | return state; 69 | else 70 | return state; 71 | } 72 | 73 | @property bool needRefresh() 74 | { 75 | return this.state.status == Deleter.Status.progress; 76 | } 77 | 78 | /// One item to delete. 79 | struct Item 80 | { 81 | /// The `BrowserPath` of the item to delete. 82 | BrowserPath* browserPath; 83 | /// If set, deletion will stop if the corresponding node has a negative mark. 84 | bool obeyMarks; 85 | } 86 | Item[] items; 87 | 88 | void prepare(Item[] items) 89 | { 90 | assert(this.state.status == Status.none); 91 | 92 | this.items = items; 93 | this.state.current = items.length ? items[0].browserPath.toFilesystemPath.to!string : null; 94 | this.state.status = Status.ready; 95 | } 96 | 97 | void cancel() 98 | { 99 | assert(this.state.status == Status.ready); 100 | this.state.status = Status.none; 101 | } 102 | 103 | void start() 104 | { 105 | assert(this.state.status == Status.ready); 106 | this.state.stopping = false; 107 | this.state.status = Status.progress; 108 | this.subvolumeResume.initialize(false, false); 109 | this.thread = new Thread(&threadFunc); 110 | this.thread.start(); 111 | } 112 | 113 | private void threadFunc() 114 | { 115 | foreach (item; items) 116 | { 117 | auto fsPath = item.browserPath.toFilesystemPath.to!string; 118 | assert(fsPath && fsPath.isAbsolute); 119 | 120 | ulong initialDeviceID; 121 | listDir!(( 122 | // The directory entry 123 | e, 124 | // Only true when `e` is the root (`item.fsPath`) 125 | bool root, 126 | // The corresponding parent `BrowserPath`, if we are to obey marks 127 | BrowserPath* parentBrowserPath, 128 | // We will set this to false if we don't fully clear out this directory 129 | bool* unlinkOK, 130 | ) { 131 | auto entryBrowserPath = 132 | root ? parentBrowserPath : 133 | parentBrowserPath ? parentBrowserPath.appendName!true(e.baseNameFS) 134 | : null; 135 | if (entryBrowserPath && entryBrowserPath.mark == Mark.unmarked) 136 | { 137 | if (unlinkOK) *unlinkOK = false; 138 | return; 139 | } 140 | 141 | synchronized(this.thread) 142 | this.state.current = e.fullName; 143 | 144 | if (this.state.stopping) 145 | { 146 | // e.stop(); 147 | // return; 148 | throw new Exception("User abort"); 149 | } 150 | 151 | if (!initialDeviceID) 152 | initialDeviceID = e.needStat!(e.StatTarget.dirEntry)().st_dev; 153 | 154 | bool entryUnlinkOK = true; 155 | if (e.entryIsDir) 156 | { 157 | auto stat = e.needStat!(e.StatTarget.dirEntry)(); 158 | 159 | // A subvolume root, or a different btrfs filesystem is mounted here 160 | auto isTreeRoot = stat.st_ino.among(2, 256); 161 | 162 | if (stat.st_dev != initialDeviceID || isTreeRoot) 163 | { 164 | if (getMounts().canFind!(mount => mount.file == e.fullNameFS)) 165 | throw new Exception("Path resides in another filesystem, stopping"); 166 | enforce(isTreeRoot, "Unexpected st_dev change"); 167 | // Can only be a subvolume going forward. 168 | 169 | bool haveNegativeMarks = false; 170 | if (entryBrowserPath) 171 | entryBrowserPath.enumerateMarks((_, bool isMarked) { if (!isMarked) haveNegativeMarks = true; }); 172 | if (!haveNegativeMarks) // Can't delete subvolume if the user excluded some items inside it. 173 | { 174 | this.state.status = Status.subvolumeConfirm; 175 | this.subvolumeResume.wait(); 176 | if (this.state.stopping) 177 | throw new Exception("User abort"); 178 | 179 | auto fd = openat(e.dirFD, e.baseNameFSPtr, O_RDONLY); 180 | errnoEnforce(fd >= 0, "openat"); 181 | auto subvolumeID = getSubvolumeID(fd); 182 | deleteSubvolume(fd, subvolumeID); 183 | 184 | this.state.status = Status.progress; 185 | return; // The ioctl will also unlink the directory entry 186 | } 187 | } 188 | 189 | e.recurse(false, entryBrowserPath, &entryUnlinkOK); 190 | } 191 | 192 | if (entryUnlinkOK) 193 | { 194 | int ret = unlinkat(e.dirFD, e.baseNameFSPtr, 195 | e.entryIsDir ? AT_REMOVEDIR : 0); 196 | errnoEnforce(ret == 0, "unlinkat failed"); 197 | } 198 | if (unlinkOK) *unlinkOK &= entryUnlinkOK; 199 | }, Yes.includeRoot)( 200 | fsPath, 201 | true, 202 | item.obeyMarks ? item.browserPath : null, 203 | null, 204 | ); 205 | } 206 | } 207 | 208 | void confirm(Flag!"proceed" proceed) 209 | { 210 | assert(this.state.status == Status.subvolumeConfirm); 211 | if (proceed) 212 | this.state.status = Status.subvolumeProgress; 213 | else 214 | { 215 | this.state.stopping = true; 216 | this.state.status = Status.progress; 217 | } 218 | this.subvolumeResume.set(); 219 | } 220 | 221 | void stop() 222 | { 223 | this.state.stopping = true; 224 | } 225 | 226 | void finish() 227 | { 228 | assert(this.state.status.among(Status.success, Status.error)); 229 | this.state.status = Status.none; 230 | } 231 | 232 | void update() 233 | { 234 | if (this.state.status == Status.progress && !this.thread.isRunning()) 235 | { 236 | try 237 | { 238 | this.thread.join(); 239 | 240 | // Success: 241 | this.state.status = Status.success; 242 | } 243 | catch (Exception e) 244 | { 245 | // Failure: 246 | this.state.error = e.msg; 247 | this.state.status = Status.error; 248 | } 249 | this.thread = null; 250 | this.subvolumeResume.terminate(); 251 | } 252 | } 253 | } 254 | 255 | private: 256 | 257 | // TODO: upstream into Druntime 258 | extern (C) int openat(int fd, const char *path, int oflag, ...) nothrow @nogc; 259 | extern (C) int unlinkat(int fd, const(char)* pathname, int flags); 260 | enum AT_REMOVEDIR = 0x200; 261 | -------------------------------------------------------------------------------- /CONCEPTS.md: -------------------------------------------------------------------------------- 1 | Concepts 2 | ======== 3 | 4 | Sample size 5 | ----------- 6 | 7 | One sample is one logical offset chosen at random by btdu. Because we know the total size of the filesystem, we can divide this size by the total number of samples to obtain the approximate size of how much data one sample represents. (This size is also shown at the bottom as "Resolution".) 8 | 9 | Confidence 10 | ---------- 11 | 12 | For the represented and exclusive size, btdu displays a confidence range, e.g.: 13 | 14 | - Represented size: ~763.0 GiB (6006 samples), ±16.9 GiB 15 | 16 | This should be interpreted as: given the data btdu collected so far, it is [confident with 95% certainty](https://en.wikipedia.org/wiki/Confidence_interval) that the object size is within 16.9 GiB of 763.0 GiB. 17 | 18 | In the file browser, the confidence range is visually represented in the size graph using question marks. 19 | 20 | Logical vs. physical space 21 | -------------------------- 22 | 23 | Quoting [On-disk format](https://btrfs.wiki.kernel.org/index.php/On-disk_Format): 24 | 25 | > Btrfs makes a distinction between logical and physical addresses. Logical addresses are used in the filesystem structures, while physical addresses are simply byte offsets on a disk. One logical address may correspond to physical addresses on any number of disks, depending on RAID settings. 26 | 27 | In this regard, btdu has two modes of operation: 28 | 29 | - In logical space mode, btdu samples the logical offset space. As such, a 1GB file (containing unique uncompressed unshared data) will show up with a size of 1GB, regardless of whether it is stored in a SINGLE, DUP, or RAID1 profile block group. 30 | - In physical space mode, btdu samples offsets from the underlying block devices, translating each to a logical offset first. The file in the example above will thus show up with a size of 2GB if it is stored on a block group using the RAID1 or DUP profiles. 31 | 32 | In physical space mode, btdu will also show unallocated space (represented as an `` node in the hierarchy root) and any device slack (represented as a `` node). 33 | 34 | Logical space mode is the default. To use physical space mode, run btdu with `--physical` (`-p`). 35 | 36 | Representative location 37 | ----------------------- 38 | 39 | After picking a logical offset to sample, btdu asks btrfs what is located at that offset. btrfs replies with zero or more locations. 40 | Out of these locations, btdu picks one location where it should place the sample within its tree, to *represent* the space occupied by this data. We call this location the *representative* location. 41 | 42 | The way in which btdu selects the representative location aims to prefer better visualization of what the data is used for, i.e., the simplest explanation for what is using this disk space. 43 | For instance, if one location's filesystem path is longer than the other, then the shorter is chosen, as the longer is more likely to point at a snapshot or other redundant clone of the shorter one. 44 | 45 | Examples: 46 | 47 | - For data which is used exactly once, the representative location will be the path to the file which references that data. 48 | - For data which is used in `/@root/file.txt` and `/@root-20210203/file.txt`, the representative location will be `/@root/file.txt`, because it is shorter. 49 | - For data which is used in `/@root/file1.txt` and `/@root/file2.txt`, the representative location will be `/@root/file1.txt`, because it is lexicographically smaller. 50 | 51 | The "shorter / lexicographically smaller path wins" rule can be overridden by selecting a node and pressing ⇧ ShiftP to prefer this node when selecting a representative location, or ⇧ ShiftI to avoid it. On the command line, you can use the `--prefer` and `--ignore` options, which accept absolute filesystem paths with shell-like pattern syntax (understanding `?`, `*`, `**`, `[a-z]`, `{this,that}`). 52 | 53 | Size metrics 54 | ------------ 55 | 56 | In `--expert` mode, btdu shows four size metrics for tree nodes: 57 | 58 | - **Represented** size 59 | - The represented size of a node is the amount of disk space that this path is *representing*. 60 | - For every logical offset, btdu picks one [representative location](#representative-location) out of all locations that reference that logical offset, and assigns the sample's respective disk space usage to that location. 61 | - This location is thus chosen to *represent* this disk space. So, if a directory's represented size is 1MiB, we can say that this directory is the simplest explanation for what is using that 1MiB of space. 62 | - This metric is most useful in understanding what is using up disk space on a btrfs filesystem, and is what's shown in the btdu directory listings. 63 | - The represented size of a directory is the sum of represented sizes of its children. 64 | - Adding up the represented size for all filesystem objects (btdu tree leaves) adds up to the total size of the filesystem. 65 | 66 | - **Distributed** size 67 | - To calculate the distributed size, btdu evenly *distributes* a sample's respective disk space usage across all locations which reference data from that logical offset. 68 | - Thus, two 1MiB files which share the same 1MiB of data will each have a distributed size of 512KiB. 69 | - The distributed size of a directory is the sum of distributed sizes of its children. 70 | - Adding up the distributed size for all filesystem objects (btdu tree leaves) also adds up to the total size of the filesystem. 71 | 72 | - **Exclusive** size 73 | - The exclusive size represents the samples which are used *only* by this file or directory. 74 | - Specifically, btdu awards exclusive size to the *common prefix* of all paths which reference data from a given logical offset. 75 | - Two files which are perfect clones of each other will thus both have an exclusive size of zero. The same applies to two identical snapshots. 76 | - However, if the two clones are in the same directory, and the data is not used anywhere else, then that data will be represented in the directory's exclusive size. 77 | - The exclusive size can also be described as the amount of space which would be freed if the corresponding object were to be deleted. 78 | - Unlike other size metrics, adding up the exclusive size of all items in a directory may not necessarily add up to the exclusive size of the directory. 79 | 80 | - **Shared** size 81 | - The shared size is the total size including all references of a single logical offset at this location. 82 | - This size generally correlates with the "visible" size, i.e. the size reported by classic space usage analysis tools, such as `du`. (However, if compression is used, the shown size will still be after compression.) 83 | - The shared size of a directory is the sum of shared sizes of its children. 84 | - The total shared size will likely exceed the total size of the filesystem, if snapshots or reflinking is used. 85 | 86 | As an illustration, consider a file consisting of unique data (`dd if=/dev/urandom of=a bs=1M count=1`): 87 | 88 | ![](https://raw.githubusercontent.com/gist/CyberShadow/6b6ecfde854ec7d991f8774bc35bbce5/raw/2246dafb074b466c89f9cf3f7a62cd88a44b74e4/single.svg) 89 | 90 | Here is what happens if we clone the file (`cp --reflink=always a b`): 91 | 92 | ![](https://raw.githubusercontent.com/gist/CyberShadow/6b6ecfde854ec7d991f8774bc35bbce5/raw/2246dafb074b466c89f9cf3f7a62cd88a44b74e4/clone.svg) 93 | 94 | Finally, here is what the sizes would look like for two 2M files which share 1M. Note how the represented size adds up to 3M, the total size of the underlying data. 95 | 96 | ![](https://raw.githubusercontent.com/gist/CyberShadow/6b6ecfde854ec7d991f8774bc35bbce5/raw/2246dafb074b466c89f9cf3f7a62cd88a44b74e4/overlap.svg) 97 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | btdu - sampling disk usage profiler for btrfs 2 | ============================================= 3 | 4 |

5 | 6 |

7 | 8 | Some [btrfs](https://btrfs.wiki.kernel.org/) features may make it difficult to estimate what disk space is being used for: 9 | 10 | - **Subvolumes** allow cheap copy-on-write snapshots of entire filesystem trees, with unmodified data being shared among snapshots 11 | - **File and extent cloning** allow creating cheap copies of files or parts thereof, with extents being stored only once 12 | - **Compression** transparently allows further reducing disk usage 13 | 14 | For these reasons, classic disk usage analyzers such as [ncdu](https://dev.yorhel.nl/ncdu) cannot provide an accurate depiction of actual disk usage. (btrfs compression in particular is challenging to classic analyzers, and [special tools](https://github.com/kilobyte/compsize) must be used to query compressed usage.) 15 | 16 | **btdu** is a sampling disk usage profiler for btrfs. It works according to the following algorithm: 17 | 18 | 1. Pick a random point on the disk in use 19 | 2. Find what is located at that point 20 | 3. Add the path to the results 21 | 4. Repeat the above steps indefinitely 22 | 23 | Though it works by taking random samples, it is "eventually" accurate. 24 | 25 | It differs from classic analyzers through the following properties: 26 | 27 | - btdu starts showing results instantly. Though wildly inaccurate at first, they become progressively more accurate the longer btdu is allowed to run. 28 | - btdu analyzes entire filesystems only. There is no way to analyze only a particular subdirectory or subvolume. 29 | - btdu counts extents used by multiple files only once. (The shortest path is used when placing the sample in the tree for visualization.) 30 | - By nature of its algorithm, btdu works correctly with compression and other btrfs filesystem features. 31 | - Because it queries raw filesystem metadata, btdu requires root privileges to run. 32 | 33 | 34 | Use cases 35 | --------- 36 | 37 | - **Quickly summarize space usage** 38 | 39 | btdu needs to collect only 100 samples to achieve a ~1% resolution, which means it can identify space hogs very quickly. This is useful if the disk is full, and some space must be freed urgently to restore normal operation. 40 | 41 | - **Estimate snapshot size** 42 | 43 | When an extent is in use by multiple files or snapshots, to decide where to place it in the browsable tree, btdu picks the path with the shortest length, or the lexicographically smaller path if the length is the same. An emergent effect of this property is that it can be used to estimate snapshot size, if your snapshots use a fixed-length lexicographically-ordered naming scheme (such as e.g. YYYY-MM-DD-HH-MM-SS): the size of snapshots displayed in btdu will thus indicate data that occurs in that snapshot or any later one, i.e. the amount of "new" data in that snapshot. 44 | 45 | - **Estimate compressed data size** 46 | 47 | If you use btrfs data compression (whether to save space, improve performance, or conserve flash writes), btdu can be used to estimate how much real disk space compressed data uses. 48 | 49 | - **Estimate unreachable extent size** 50 | 51 | A feature unique to btdu is the ability to estimate the amount of space used by unreachable parts of extents, i.e. data in extents containing older versions of file content which has since been overwritten. This btrfs "dark matter" can be an easily overlooked space hog, which can be eliminated by rewriting or defragmenting affected files. 52 | 53 | - **Understand btrfs space usage** 54 | 55 | btdu shows explanations for hierarchy objects and common errors, which can help understand how btrfs uses disk space. The `--expert` mode enables the collection and display of [additional size metrics](CONCEPTS.md#size-metrics), providing more insight into the allocation of objects with non-trivial sharing. [Logical and physical sampling modes](CONCEPTS.md#logical-vs-physical-space) can help understand RAID space usage, especially when using multiple profiles. 56 | 57 | 58 | Installation 59 | ------------ 60 | 61 | Packaging status 62 | 63 | btdu can be installed in one of the following ways: 64 | 65 | - Via package manager, if it is packaged by your distribution (see on the right). 66 | - Download a static binary from [the releases page](https://github.com/CyberShadow/btdu/releases) 67 | or [the latest CI run](https://github.com/CyberShadow/btdu/actions?query=branch%3Amaster). 68 | - Clone this repository and build from source (see below). 69 | 70 | 71 | Building 72 | -------- 73 | 74 | 1. Install [a D compiler](https://dlang.org/download.html). 75 | Note that you will need a compiler supporting D v2.097 or newer - the compiler in your distribution's repositories might be too old. 76 | 2. Install [Dub](https://github.com/dlang/dub), if it wasn't included with your D compiler. 77 | 3. Install `libncursesw5-dev`, or your distribution's equivalent package. 78 | 4. Run `dub build -b release` 79 | 80 | 81 | Usage 82 | ----- 83 | 84 | Run btdu with root privileges as follows: 85 | 86 | # btdu /path/to/filesystem/root 87 | 88 | Note: The indicated path must be to the top-level subvolume (otherwise btdu will be unable to open other subvolumes for inode resolution). If in doubt, mount the filesystem to a new mountpoint with `-o subvol=/,subvolid=5`. 89 | 90 | Run `btdu --help` for more usage information. 91 | 92 | ### Interactive mode 93 | 94 | By default, btdu launches a terminal user interface where you can browse the results interactively. btdu will keep collecting samples to improve accuracy until it is stopped by quitting or pausing (which you can do by pressing p). 95 | 96 | ### Headless mode 97 | 98 | With the `--headless` switch, btdu runs without the interactive UI and prints a tree of the largest items to stdout: 99 | 100 | # btdu --headless --max-time=30s /mnt/btrfs 101 | /mnt/btrfs (~97.56 GiB) 102 | ├── @home (~45.23 GiB) 103 | │ ├── user (~30.12 GiB) 104 | │ └── .cache (~12.34 GiB) 105 | └── @root (~35.78 GiB) 106 | 107 | This is useful for quickly getting an overview without launching the full UI. Only nodes representing more than 1% of total space are shown. 108 | 109 | In `--expert` mode, four size columns are displayed (represented, distributed, exclusive, shared): 110 | 111 | # btdu --headless --expert --max-time=30s /mnt/btrfs 112 | 113 | For automated invocations or scripting, combine with `--export` to save results to a file that can later be viewed in the UI: 114 | 115 | # btdu --headless --export=results.json --max-time=10m /mnt/btrfs 116 | $ btdu --import results.json 117 | 118 | ### Sampling options 119 | 120 | `--expert` collects additional metrics; `--physical` switches the addressing mode from logical to physical, causing btdu to measure physical disk space. 121 | 122 | Use `--prefer=PATTERN` and `--ignore=PATTERN` to control which path represents shared extents. Patterns are absolute filesystem paths (supporting glob syntax: `*`, `**`, etc.) and must be under the sampled filesystem root. Example: `--prefer=/mnt/btrfs/data/.snapshots`. Interactively, you can do this by pressing ⇧ ShiftP and ⇧ ShiftI respectively. 123 | 124 | `--max-samples`, `--max-time`, and `--min-resolution` control when btdu stops sampling (and, in headless mode, exits). 125 | 126 | See [CONCEPTS.md](CONCEPTS.md) for more information about btdu / btrfs concepts, such as represented / exclusive / shared size. 127 | 128 | ### Deleting 129 | 130 | You can delete the selected file or directory from the filesystem by pressing d then ⇧ ShiftY. This will recursively delete the file or directory shown as "Full path". 131 | 132 | Deleting files during a btdu run (whether via btdu or externally) skews the results. When deleting files from btdu, it will make a best-effort attempt to adjust the results to match. Statistics such as exclusive size may be inaccurate. Re-run btdu to obtain fresh results. 133 | 134 | ### Marking 135 | 136 | You can mark or unmark items under the cursor by pressing the space bar. 137 | 138 | Press ⇧ ShiftM to view all marks, and ⇧ ShiftD then ⇧ ShiftY to delete all marked items. 139 | 140 | Press * to invert marks on the current screen. 141 | 142 | In [`--expert` mode](CONCEPTS.md#size-metrics), btdu will show the total exclusive size of (i.e. how much would be freed by deleting) the marked items it the top status bar. 143 | 144 | Marks are saved in exported `.json` files; a boolean field named `"mark"` will be present on marked nodes. 145 | 146 | ### Import and export 147 | 148 | Press ⇧ ShiftO to save an export file during an interactive session; alternatively, pass `--export=FILENAME` to do so automatically on exit. 149 | 150 | Exports can be loaded with `--import`. Then, specify a file name instead of the filesystem path to sample. 151 | 152 | For a more portable (but less detailed) export, use `--du`; the resulting file should then be loadable by any disk usage analyzer which supports loading `du` output. 153 | 154 | License 155 | ------- 156 | 157 | `btdu` is available under the [GNU GPL v2](https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html). (The license is inherited from btrfs-progs.) 158 | 159 | 160 | See Also 161 | -------- 162 | 163 | * [btsdu](https://github.com/rkapl/btsdu), the Btrfs Snapshot Disk Usage Analyzer 164 | -------------------------------------------------------------------------------- /source/btdu/common.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Common definitions 20 | module btdu.common; 21 | 22 | import ae.utils.text.functor : stringifiable; 23 | 24 | import std.format : format, formattedWrite; 25 | import std.random : Random; 26 | import std.traits; 27 | 28 | enum btduVersion = "0.6.1"; 29 | 30 | alias Seed = typeof(Random.defaultSeed); 31 | 32 | // C error messages 33 | 34 | const(char)[] errorString(int errno) 35 | { 36 | import core.stdc.string : strlen, strerror_r; 37 | import std.traits : ReturnType; 38 | 39 | char[1024] buf = void; 40 | const(char)* s; 41 | static if (is(ReturnType!strerror_r == int)) 42 | { 43 | import std.exception : errnoEnforce; 44 | errnoEnforce(strerror_r(errno, buf.ptr, buf.length) == 0, "strerror_r"); 45 | s = buf.ptr; 46 | } 47 | else // GNU 48 | s = strerror_r(errno, buf.ptr, buf.length); 49 | return s[0 .. s.strlen]; 50 | } 51 | 52 | struct Errno 53 | { 54 | string name, description; 55 | } 56 | int[string] errnoLookup; 57 | ref Errno getErrno(int errno) 58 | { 59 | static Errno[int] cache; 60 | return cache.require(errno, { 61 | Errno m; 62 | 63 | // TODO: use strerrorname_np, once glibc 2.32 is generally available everywhere 64 | switch (errno) 65 | { 66 | case 1: m.name = "EPERM"; break; 67 | case 2: m.name = "ENOENT"; break; 68 | case 3: m.name = "ESRCH"; break; 69 | case 4: m.name = "EINTR"; break; 70 | case 5: m.name = "EIO"; break; 71 | case 6: m.name = "ENXIO"; break; 72 | case 7: m.name = "E2BIG"; break; 73 | case 8: m.name = "ENOEXEC"; break; 74 | case 9: m.name = "EBADF"; break; 75 | case 10: m.name = "ECHILD"; break; 76 | case 11: m.name = "EAGAIN"; break; 77 | case 12: m.name = "ENOMEM"; break; 78 | case 13: m.name = "EACCES"; break; 79 | case 14: m.name = "EFAULT"; break; 80 | case 15: m.name = "ENOTBLK"; break; 81 | case 16: m.name = "EBUSY"; break; 82 | case 17: m.name = "EEXIST"; break; 83 | case 18: m.name = "EXDEV"; break; 84 | case 19: m.name = "ENODEV"; break; 85 | case 20: m.name = "ENOTDIR"; break; 86 | case 21: m.name = "EISDIR"; break; 87 | case 22: m.name = "EINVAL"; break; 88 | case 23: m.name = "ENFILE"; break; 89 | case 24: m.name = "EMFILE"; break; 90 | case 25: m.name = "ENOTTY"; break; 91 | case 26: m.name = "ETXTBSY"; break; 92 | case 27: m.name = "EFBIG"; break; 93 | case 28: m.name = "ENOSPC"; break; 94 | case 29: m.name = "ESPIPE"; break; 95 | case 30: m.name = "EROFS"; break; 96 | case 31: m.name = "EMLINK"; break; 97 | case 32: m.name = "EPIPE"; break; 98 | case 33: m.name = "EDOM"; break; 99 | case 34: m.name = "ERANGE"; break; 100 | case 35: m.name = "EDEADLK"; break; 101 | case 36: m.name = "ENAMETOOLONG"; break; 102 | case 37: m.name = "ENOLCK"; break; 103 | case 38: m.name = "ENOSYS"; break; 104 | case 39: m.name = "ENOTEMPTY"; break; 105 | case 40: m.name = "ELOOP"; break; 106 | case 42: m.name = "ENOMSG"; break; 107 | case 43: m.name = "EIDRM"; break; 108 | case 44: m.name = "ECHRNG"; break; 109 | case 45: m.name = "EL2NSYNC"; break; 110 | case 46: m.name = "EL3HLT"; break; 111 | case 47: m.name = "EL3RST"; break; 112 | case 48: m.name = "ELNRNG"; break; 113 | case 49: m.name = "EUNATCH"; break; 114 | case 50: m.name = "ENOCSI"; break; 115 | case 51: m.name = "EL2HLT"; break; 116 | case 52: m.name = "EBADE"; break; 117 | case 53: m.name = "EBADR"; break; 118 | case 54: m.name = "EXFULL"; break; 119 | case 55: m.name = "ENOANO"; break; 120 | case 56: m.name = "EBADRQC"; break; 121 | case 57: m.name = "EBADSLT"; break; 122 | case 59: m.name = "EBFONT"; break; 123 | case 60: m.name = "ENOSTR"; break; 124 | case 61: m.name = "ENODATA"; break; 125 | case 62: m.name = "ETIME"; break; 126 | case 63: m.name = "ENOSR"; break; 127 | case 64: m.name = "ENONET"; break; 128 | case 65: m.name = "ENOPKG"; break; 129 | case 66: m.name = "EREMOTE"; break; 130 | case 67: m.name = "ENOLINK"; break; 131 | case 68: m.name = "EADV"; break; 132 | case 69: m.name = "ESRMNT"; break; 133 | case 70: m.name = "ECOMM"; break; 134 | case 71: m.name = "EPROTO"; break; 135 | case 72: m.name = "EMULTIHOP"; break; 136 | case 73: m.name = "EDOTDOT"; break; 137 | case 74: m.name = "EBADMSG"; break; 138 | case 75: m.name = "EOVERFLOW"; break; 139 | case 76: m.name = "ENOTUNIQ"; break; 140 | case 77: m.name = "EBADFD"; break; 141 | case 78: m.name = "EREMCHG"; break; 142 | case 79: m.name = "ELIBACC"; break; 143 | case 80: m.name = "ELIBBAD"; break; 144 | case 81: m.name = "ELIBSCN"; break; 145 | case 82: m.name = "ELIBMAX"; break; 146 | case 83: m.name = "ELIBEXEC"; break; 147 | case 84: m.name = "EILSEQ"; break; 148 | case 85: m.name = "ERESTART"; break; 149 | case 86: m.name = "ESTRPIPE"; break; 150 | case 87: m.name = "EUSERS"; break; 151 | case 88: m.name = "ENOTSOCK"; break; 152 | case 89: m.name = "EDESTADDRREQ"; break; 153 | case 90: m.name = "EMSGSIZE"; break; 154 | case 91: m.name = "EPROTOTYPE"; break; 155 | case 92: m.name = "ENOPROTOOPT"; break; 156 | case 93: m.name = "EPROTONOSUPPORT"; break; 157 | case 94: m.name = "ESOCKTNOSUPPORT"; break; 158 | case 95: m.name = "EOPNOTSUPP"; break; 159 | case 96: m.name = "EPFNOSUPPORT"; break; 160 | case 97: m.name = "EAFNOSUPPORT"; break; 161 | case 98: m.name = "EADDRINUSE"; break; 162 | case 99: m.name = "EADDRNOTAVAIL"; break; 163 | case 100: m.name = "ENETDOWN"; break; 164 | case 101: m.name = "ENETUNREACH"; break; 165 | case 102: m.name = "ENETRESET"; break; 166 | case 103: m.name = "ECONNABORTED"; break; 167 | case 104: m.name = "ECONNRESET"; break; 168 | case 105: m.name = "ENOBUFS"; break; 169 | case 106: m.name = "EISCONN"; break; 170 | case 107: m.name = "ENOTCONN"; break; 171 | case 108: m.name = "ESHUTDOWN"; break; 172 | case 109: m.name = "ETOOMANYREFS"; break; 173 | case 110: m.name = "ETIMEDOUT"; break; 174 | case 111: m.name = "ECONNREFUSED"; break; 175 | case 112: m.name = "EHOSTDOWN"; break; 176 | case 113: m.name = "EHOSTUNREACH"; break; 177 | case 114: m.name = "EALREADY"; break; 178 | case 115: m.name = "EINPROGRESS"; break; 179 | case 116: m.name = "ESTALE"; break; 180 | case 117: m.name = "EUCLEAN"; break; 181 | case 118: m.name = "ENOTNAM"; break; 182 | case 119: m.name = "ENAVAIL"; break; 183 | case 120: m.name = "EISNAM"; break; 184 | case 121: m.name = "EREMOTEIO"; break; 185 | case 122: m.name = "EDQUOT"; break; 186 | case 123: m.name = "ENOMEDIUM"; break; 187 | case 124: m.name = "EMEDIUMTYPE"; break; 188 | case 125: m.name = "ECANCELED"; break; 189 | case 126: m.name = "ENOKEY"; break; 190 | case 127: m.name = "EKEYEXPIRED"; break; 191 | case 128: m.name = "EKEYREVOKED"; break; 192 | case 129: m.name = "EKEYREJECTED"; break; 193 | case 130: m.name = "EOWNERDEAD"; break; 194 | case 131: m.name = "ENOTRECOVERABLE"; break; 195 | case 132: m.name = "ERFKILL"; break; 196 | case 133: m.name = "EHWPOISON"; break; 197 | default: m.name = format!"%d"(errno); 198 | } 199 | errnoLookup.require(m.name, errno); 200 | 201 | m.description = errorString(errno).idup; 202 | 203 | return m; 204 | }()); 205 | } 206 | 207 | // Conversion 208 | 209 | alias humanSize = stringifiable!( 210 | (size, aligned, sink) 211 | { 212 | if (size == 0 && !aligned) 213 | return sink("0"); 214 | static immutable prefixChars = " KMGTPEZY"; 215 | size_t power = 0; 216 | while (size > 1000 && power + 1 < prefixChars.length) 217 | { 218 | size /= 1024; 219 | power++; 220 | } 221 | auto digits = 222 | size == 0 ? 1 : 223 | size < 10 ? 3 : 224 | size < 100 ? 2 : 225 | 1; 226 | if (aligned) 227 | sink.formattedWrite!"%5.*f %s%sB"(digits, size, prefixChars[power ], prefixChars[power] == ' ' ? ' ' : 'i'); 228 | else 229 | sink.formattedWrite!"%.*f %s%sB"(digits, size, prefixChars[power] == ' ' ? "" : prefixChars[power .. power + 1], prefixChars[power] == ' ' ? "" : "i"); 230 | }, real, bool); 231 | auto humanSize(real size) { return humanSize(size, false); } 232 | 233 | unittest 234 | { 235 | import std.conv : text; 236 | assert(humanSize(8192).text == "8.000 KiB"); 237 | } 238 | 239 | real parseSize(string s) 240 | { 241 | import std.ascii : isAlpha; 242 | import std.string : strip, endsWith, toUpper, indexOf; 243 | import std.exception : enforce; 244 | import std.conv : to; 245 | 246 | static immutable prefixChars = " KMGTPEZY"; 247 | s = s.strip().toUpper(); 248 | if (s.endsWith("IB")) 249 | s = s[0 .. $-2]; 250 | else 251 | if (s.endsWith("B")) 252 | s = s[0 .. $-1]; 253 | sizediff_t magnitude = 0; 254 | if (s.length && isAlpha(s[$-1])) 255 | { 256 | magnitude = prefixChars.indexOf(s[$-1]); 257 | enforce(magnitude > 0, "Unrecognized size suffix: " ~ s); 258 | s = s[0 .. $-1]; 259 | } 260 | 261 | return s.to!real * (1024.0 ^^ magnitude); 262 | } 263 | 264 | unittest 265 | { 266 | assert(parseSize("0") == 0); 267 | assert(parseSize("1") == 1); 268 | assert(parseSize("1b") == 1); 269 | assert(parseSize("1k") == 1024); 270 | assert(parseSize("1.5k") == 1024 + 512); 271 | assert(parseSize("1.5kb") == 1024 + 512); 272 | assert(parseSize("1.5kib") == 1024 + 512); 273 | } 274 | 275 | alias humanDuration = stringifiable!( 276 | (hnsecs, sink) 277 | { 278 | if (hnsecs == 0) 279 | return sink("0"); 280 | auto d = hnsecs * 100; // nanoseconds 281 | 282 | static immutable units = ["ns", "µs", "ms", "s", "m", "h", "d", "w"]; 283 | static immutable unitSize = [1000, 1000, 1000, 60, 60, 24, 7]; 284 | size_t unitIndex = 0; 285 | while (unitIndex < unitSize.length && d > unitSize[unitIndex]) 286 | { 287 | d /= unitSize[unitIndex]; 288 | unitIndex++; 289 | } 290 | auto digits = d < 1 ? 3 : d < 10 ? 2 : 1; 291 | sink.formattedWrite!"%4.*f%s"(digits, d, units[unitIndex]); 292 | }, real); 293 | 294 | unittest 295 | { 296 | import std.conv : text; 297 | assert(humanDuration(5.5 * 10 * 1000 * 1000).text == "5.50s"); 298 | } 299 | 300 | /// Helper type for formatting pointers without passing their contents by-value. 301 | /// Helps preserve the SubPath invariant (which would be broken by copying). 302 | struct PointerWriter(T) 303 | { 304 | T* ptr; 305 | void toString(scope void delegate(const(char)[]) sink) const 306 | { 307 | ptr.toString(sink); 308 | } 309 | } 310 | PointerWriter!T pointerWriter(T)(T* ptr) { return PointerWriter!T(ptr); } 311 | -------------------------------------------------------------------------------- /source/btdu/subproc.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Subprocess management 20 | module btdu.subproc; 21 | 22 | import core.sys.posix.signal; 23 | import core.sys.posix.unistd; 24 | 25 | import std.algorithm.iteration; 26 | import std.algorithm.mutation; 27 | import std.algorithm.searching; 28 | import std.conv; 29 | import std.exception; 30 | import std.file; 31 | import std.process; 32 | import std.random; 33 | import std.socket; 34 | import std.stdio : stdin; 35 | import std.string; 36 | 37 | import ae.utils.array; 38 | 39 | import btrfs.c.kernel_shared.ctree; 40 | 41 | import btdu.alloc; 42 | import btdu.common; 43 | import btdu.paths; 44 | import btdu.proto; 45 | import btdu.state; 46 | 47 | /// Represents one managed subprocess 48 | struct Subprocess 49 | { 50 | Pipe pipe; 51 | Socket socket; 52 | Pid pid; 53 | 54 | void start() 55 | { 56 | pipe = .pipe(); 57 | socket = new Socket(cast(socket_t)pipe.readEnd.fileno.dup, AddressFamily.UNSPEC); 58 | socket.blocking = false; 59 | 60 | pid = spawnProcess( 61 | [ 62 | thisExePath, 63 | "--subprocess", 64 | "--seed", rndGen.uniform!Seed.text, 65 | "--physical=" ~ physical.text, 66 | "--", 67 | fsPath, 68 | ], 69 | stdin, 70 | pipe.writeEnd, 71 | ); 72 | } 73 | 74 | void pause(bool doPause) 75 | { 76 | pid.kill(doPause ? SIGSTOP : SIGCONT); 77 | } 78 | 79 | /// Receive buffer 80 | private ubyte[] buf; 81 | /// Section of buffer containing received and unparsed data 82 | private size_t bufStart, bufEnd; 83 | 84 | /// Called when select() identifies that the process wrote something. 85 | /// Reads one datum; returns `true` if there is more to read. 86 | bool handleInput() 87 | { 88 | auto data = buf[bufStart .. bufEnd]; 89 | auto bytesNeeded = parse(data, this); 90 | bufStart = bufEnd - data.length; 91 | if (bufStart == bufEnd) 92 | bufStart = bufEnd = 0; 93 | if (buf.length < bufEnd + bytesNeeded) 94 | { 95 | // Moving remaining data to the start of the buffer 96 | // may allow us to avoid an allocation. 97 | if (bufStart > 0) 98 | { 99 | copy(buf[bufStart .. bufEnd], buf[0 .. bufEnd - bufStart]); 100 | bufEnd -= bufStart; 101 | bufStart -= bufStart; 102 | } 103 | if (buf.length < bufEnd + bytesNeeded) 104 | { 105 | buf.length = bufEnd + bytesNeeded; 106 | buf.length = buf.capacity; 107 | } 108 | } 109 | auto received = read(pipe.readEnd.fileno, buf.ptr + bufEnd, buf.length - bufEnd); 110 | enforce(received != 0, "Unexpected subprocess termination"); 111 | if (received == Socket.ERROR) 112 | { 113 | errnoEnforce(wouldHaveBlocked, "Subprocess read error"); 114 | return false; // Done 115 | } 116 | bufEnd += received; 117 | return true; // Not done 118 | } 119 | 120 | void handleMessage(StartMessage m) 121 | { 122 | if (!totalSize) 123 | { 124 | totalSize = m.totalSize; 125 | devices = m.devices; 126 | } 127 | } 128 | 129 | void handleMessage(NewRootMessage m) 130 | { 131 | if (m.rootID in globalRoots) 132 | return; 133 | 134 | GlobalPath* path; 135 | if (m.parentRootID || m.name.length) 136 | path = new GlobalPath( 137 | *(m.parentRootID in globalRoots).enforce("Unknown parent root"), 138 | subPathRoot.appendPath(m.name), 139 | ); 140 | else 141 | if (m.rootID == BTRFS_FS_TREE_OBJECTID) 142 | path = new GlobalPath(null, &subPathRoot); 143 | else 144 | if (m.rootID == BTRFS_ROOT_TREE_OBJECTID) 145 | path = new GlobalPath(null, subPathRoot.appendName("\0ROOT_TREE")); 146 | else 147 | path = new GlobalPath(null, subPathRoot.appendName(format!"\0TREE_%d"(m.rootID))); 148 | 149 | globalRoots[m.rootID] = path; 150 | } 151 | 152 | private struct Result 153 | { 154 | Offset offset; 155 | BrowserPath* browserPath; 156 | GlobalPath* inodeRoot; 157 | bool haveInode, havePath; 158 | bool ignoringOffset; 159 | } 160 | private Result result; 161 | private FastAppender!GlobalPath allPaths; 162 | 163 | void handleMessage(ResultStartMessage m) 164 | { 165 | result.offset = m.offset; 166 | result.browserPath = &browserRoot; 167 | static immutable flagNames = [ 168 | "DATA", 169 | "SYSTEM", 170 | "METADATA", 171 | "RAID0", 172 | "RAID1", 173 | "DUP", 174 | "RAID10", 175 | "RAID5", 176 | "RAID6", 177 | "RAID1C3", 178 | "RAID1C4", 179 | ].amap!(s => "\0" ~ s); 180 | if (result.offset.logical == logicalOffsetHole) 181 | result.browserPath = result.browserPath.appendName("\0UNALLOCATED"); 182 | else if (result.offset.logical == logicalOffsetSlack) 183 | result.browserPath = result.browserPath.appendName("\0SLACK"); 184 | else if ((m.chunkFlags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) 185 | result.browserPath = result.browserPath.appendName("\0SINGLE"); 186 | foreach_reverse (b; 0 .. flagNames.length) 187 | if (m.chunkFlags & (1UL << b)) 188 | result.browserPath = result.browserPath.appendName(flagNames[b]); 189 | if ((m.chunkFlags & BTRFS_BLOCK_GROUP_DATA) == 0) 190 | result.haveInode = true; // Sampler won't even try 191 | } 192 | 193 | void handleMessage(ResultIgnoringOffsetMessage m) 194 | { 195 | cast(void) m; // empty message 196 | result.ignoringOffset = true; 197 | } 198 | 199 | void handleMessage(ResultInodeStartMessage m) 200 | { 201 | result.haveInode = true; 202 | result.havePath = false; 203 | result.inodeRoot = *(m.rootID in globalRoots).enforce("Unknown inode root"); 204 | } 205 | 206 | void handleMessage(ResultInodeErrorMessage m) 207 | { 208 | allPaths ~= GlobalPath(result.inodeRoot, subPathRoot.appendError(m.error)); 209 | } 210 | 211 | void handleMessage(ResultMessage m) 212 | { 213 | result.havePath = true; 214 | allPaths ~= GlobalPath(result.inodeRoot, subPathRoot.appendPath(m.path)); 215 | } 216 | 217 | void handleMessage(ResultInodeEndMessage m) 218 | { 219 | cast(void) m; // empty message 220 | if (!result.havePath) 221 | allPaths ~= GlobalPath(result.inodeRoot, subPathRoot.appendPath("\0NO_PATH")); 222 | } 223 | 224 | void handleMessage(ResultErrorMessage m) 225 | { 226 | allPaths ~= GlobalPath(null, subPathRoot.appendError(m.error)); 227 | result.haveInode = true; 228 | } 229 | 230 | /// Get or create a sharing group for the given paths 231 | private static SharingGroup* saveSharingGroup(BrowserPath* root, GlobalPath[] paths, Offset offset, ulong duration, out bool isNew) 232 | { 233 | import std.experimental.allocator : makeArray, make; 234 | 235 | // Create a temporary group for lookup 236 | SharingGroup lookupGroup; 237 | lookupGroup.root = root; 238 | lookupGroup.paths = paths; 239 | auto groupKey = SharingGroup.Paths(&lookupGroup); 240 | 241 | auto existingGroupPtr = groupKey in sharingGroups; 242 | SharingGroup* group; 243 | 244 | if (existingGroupPtr) 245 | { 246 | // Reuse existing group 247 | group = existingGroupPtr.group; 248 | if (group.data.samples == 1) 249 | numSingleSampleGroups--; 250 | isNew = false; 251 | } 252 | else 253 | { 254 | // New set of paths - allocate and create new group 255 | auto persistentPaths = growAllocator.makeArray!GlobalPath(paths.length); 256 | persistentPaths[] = paths[]; 257 | auto pathData = growAllocator.makeArray!(SharingGroup.PathData)(paths.length); 258 | pathData[] = SharingGroup.PathData.init; 259 | 260 | // Find the representative index 261 | size_t representativeIndex = size_t.max; 262 | if (persistentPaths.length > 0) 263 | { 264 | auto representativePath = selectRepresentativePath(persistentPaths); 265 | representativeIndex = persistentPaths.countUntil!(p => p is representativePath); 266 | } 267 | 268 | // Create the sharing group 269 | SharingGroup newGroupData; 270 | newGroupData.root = root; 271 | newGroupData.paths = persistentPaths; 272 | newGroupData.pathData = pathData.ptr; 273 | newGroupData.representativeIndex = representativeIndex; 274 | group = sharingGroupAllocator.allocate(); 275 | *group = newGroupData; 276 | 277 | // Add to HashSet for future deduplication 278 | sharingGroups.insert(SharingGroup.Paths(group)); 279 | 280 | numSharingGroups++; 281 | numSingleSampleGroups++; 282 | 283 | isNew = true; 284 | } 285 | 286 | group.data.add(1, (&offset)[0..1], duration); 287 | 288 | // Track when this extent was last seen (shift existing values, add new at end) 289 | auto currentCounter = browserRoot.getSamples(SampleType.represented); 290 | foreach (i; 0 .. group.lastSeen.length) 291 | group.lastSeen[i] = i + 1 == group.lastSeen.length 292 | ? currentCounter 293 | : group.lastSeen[i + 1]; 294 | 295 | return group; 296 | } 297 | 298 | void handleMessage(ResultEndMessage m) 299 | { 300 | if (result.ignoringOffset) 301 | { 302 | if (!result.haveInode) 303 | {} // Same with or without BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET 304 | else 305 | result.browserPath = result.browserPath.appendName("\0UNREACHABLE"); 306 | } 307 | 308 | if (!result.haveInode) 309 | result.browserPath = result.browserPath.appendName("\0NO_INODE"); 310 | 311 | auto pathsSlice = allPaths.peek(); 312 | 313 | // Sort paths for consistent hashing/deduplication 314 | { 315 | import std.algorithm.sorting : sort; 316 | import std.typecons : tuple; 317 | pathsSlice.sort!((ref a, ref b) => 318 | tuple(a.parent, a.subPath) < tuple(b.parent, b.subPath) 319 | ); 320 | } 321 | 322 | // Get or create sharing group (even for empty paths - root-only case) 323 | bool isNewGroup; 324 | auto group = saveSharingGroup(result.browserPath, pathsSlice, result.offset, m.duration, isNewGroup); 325 | 326 | // Populate BrowserPath tree from sharing group 327 | populateBrowserPathsFromSharingGroup( 328 | group, 329 | isNewGroup, 330 | 1, // Adding 1 sample 331 | (&result.offset)[0..1], 332 | m.duration 333 | ); 334 | 335 | result = Result.init; 336 | allPaths.clear(); 337 | } 338 | 339 | void handleMessage(FatalErrorMessage m) 340 | { 341 | throw new Exception("Subprocess encountered a fatal error:\n" ~ cast(string)m.msg); 342 | } 343 | } 344 | 345 | private SubPath* appendError(ref SubPath path, ref btdu.proto.Error error) 346 | { 347 | auto result = &path; 348 | 349 | import core.stdc.errno : ENOENT; 350 | if (&path == &subPathRoot && error.errno == ENOENT && error.msg == "logical ino") 351 | return result.appendName("\0UNUSED"); 352 | 353 | result = result.appendName("\0ERROR"); 354 | result = result.appendName(error.msg); 355 | if (error.errno || error.path.length) 356 | { 357 | result = result.appendName(getErrno(error.errno).name); 358 | if (error.path.length) 359 | { 360 | auto errorPath = error.path; 361 | if (!errorPath.skipOver("/")) 362 | debug assert(false, "Non-absolute path: " ~ errorPath); 363 | result = result.appendPath(errorPath); 364 | } 365 | } 366 | return result; 367 | } 368 | -------------------------------------------------------------------------------- /source/btdu/state.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Global state definitions 20 | module btdu.state; 21 | 22 | import std.format : format; 23 | import std.traits : EnumMembers; 24 | 25 | import ae.utils.appender : FastAppender; 26 | import ae.utils.meta : enumLength; 27 | 28 | import btrfs.c.ioctl : btrfs_ioctl_dev_info_args; 29 | import btrfs.c.kerncompat : u64; 30 | 31 | import containers.hashset; 32 | import containers.internal.hash : generateHash; 33 | 34 | import btdu.alloc; 35 | import btdu.paths; 36 | import btdu.subproc : Subprocess; 37 | 38 | // Global variables 39 | 40 | __gshared: // btdu is single-threaded 41 | 42 | bool imported; 43 | bool expert; 44 | bool physical; 45 | bool exportSeenAs; 46 | string fsPath; 47 | ulong totalSize; 48 | btrfs_ioctl_dev_info_args[] devices; 49 | 50 | SubPath subPathRoot; 51 | GlobalPath*[u64] globalRoots; 52 | BrowserPath browserRoot; 53 | 54 | /// Deduplicates sharing groups - multiple samples with the same set of paths 55 | /// will reference the same SharingGroup and just increment its sample count. 56 | HashSet!(SharingGroup.Paths, CasualAllocator, SharingGroup.Paths.hashOf, false, true) sharingGroups; 57 | 58 | /// Slab allocator instance for SharingGroups - enables efficient iteration over all groups. 59 | SlabAllocator!SharingGroup sharingGroupAllocator; 60 | 61 | /// Total number of created sharing groups 62 | size_t numSharingGroups; 63 | /// Number of sharing groups with exactly 1 sample 64 | size_t numSingleSampleGroups; 65 | 66 | BrowserPath marked; /// A fake `BrowserPath` used to represent all marked nodes. 67 | ulong markTotalSamples; /// Number of seen samples since the mark was invalidated. 68 | 69 | /// Called when something is marked or unmarked. 70 | void invalidateMark() 71 | { 72 | markTotalSamples = 0; 73 | if (expert) 74 | marked.resetNodeSamples(SampleType.exclusive); 75 | } 76 | 77 | /// Update stats in `marked` for a redisplay. 78 | void updateMark() 79 | { 80 | static foreach (sampleType; EnumMembers!SampleType) 81 | if (sampleType != SampleType.exclusive) 82 | marked.resetNodeSamples(sampleType); 83 | marked.resetDistributedSamples(); 84 | 85 | browserRoot.enumerateMarks( 86 | (const BrowserPath* path, bool isMarked) 87 | { 88 | if (isMarked) 89 | { 90 | static foreach (sampleType; EnumMembers!SampleType) 91 | if (sampleType != SampleType.exclusive) 92 | marked.addSamples(sampleType, path.getSamples(sampleType), path.getOffsets(sampleType)[], path.getDuration(sampleType)); 93 | marked.addDistributedSample(path.getDistributedSamples(), path.getDistributedDuration()); 94 | } 95 | else 96 | { 97 | static foreach (sampleType; EnumMembers!SampleType) 98 | if (sampleType != SampleType.exclusive) 99 | marked.removeSamples(sampleType, path.getSamples(sampleType), path.getOffsets(sampleType)[], path.getDuration(sampleType)); 100 | marked.removeDistributedSample(path.getDistributedSamples(), path.getDistributedDuration()); 101 | } 102 | } 103 | ); 104 | } 105 | 106 | /// Returns the total number of unique samples collected by btdu since `p` was born. 107 | /// Comparing the returned number with the number of samples recorded in `p` itself 108 | /// can give us a proportion of how much disk space `p` is using. 109 | ulong getTotalUniqueSamplesFor(BrowserPath* p) 110 | { 111 | if (p is &marked) 112 | { 113 | // The `marked` node is special in that, unlike every real `BrowserPath`, 114 | // it exists for some fraction of the time since when btdu started running 115 | // (since the point it was last "invalidated"). 116 | return markTotalSamples; 117 | } 118 | else 119 | { 120 | // We assume that all seen paths equally existed since btdu was started. 121 | 122 | // We use `browserRoot` because we add samples to all nodes going up in the hierarchy, 123 | // so we will always include `browserRoot`. 124 | auto reference = &browserRoot; 125 | 126 | // We use `SampleType.represented` because 127 | // 1. It is always going to be collected 128 | // (it's the only sample type collected in non-expert mode); 129 | // 2. At the root level, it will exactly correspond to the total number 130 | // of samples collected. 131 | enum type = SampleType.represented; 132 | 133 | return reference.getSamples(type); 134 | } 135 | } 136 | 137 | Subprocess[] subprocesses; 138 | bool paused; 139 | debug bool importing; 140 | 141 | bool toFilesystemPath(BrowserPath* path, void delegate(const(char)[]) sink) 142 | { 143 | sink(fsPath); 144 | bool recurse(BrowserPath *path) 145 | { 146 | string name = path.name[]; 147 | if (name.skipOverNul()) 148 | switch (name) 149 | { 150 | case "DATA": 151 | case "UNREACHABLE": 152 | return true; 153 | default: 154 | return false; 155 | } 156 | if (path.parent) 157 | { 158 | if (!recurse(path.parent)) 159 | return false; 160 | } 161 | else 162 | { 163 | if (path is &marked) 164 | return false; 165 | } 166 | sink("/"); 167 | sink(name); 168 | return true; 169 | } 170 | return recurse(path); 171 | } 172 | 173 | auto toFilesystemPath(BrowserPath* path) 174 | { 175 | import ae.utils.functor.primitives : functor; 176 | import ae.utils.text.functor : stringifiable; 177 | return path 178 | .functor!((path, writer) => path.toFilesystemPath(writer)) 179 | .stringifiable; 180 | } 181 | 182 | /// Populate BrowserPath tree from a sharing group. 183 | /// Params: 184 | /// group = The sharing group to process 185 | /// needsLinking = Whether to link the group to BrowserPaths' firstSharingGroup lists 186 | /// (true for new groups and for rebuild after reset) 187 | /// samples = Number of samples to add 188 | /// offsets = Sample offsets to record 189 | /// duration = Total duration for these samples 190 | void populateBrowserPathsFromSharingGroup( 191 | SharingGroup* group, 192 | bool needsLinking, 193 | ulong samples, 194 | const(Offset)[] offsets, 195 | ulong duration 196 | ) 197 | { 198 | bool allMarked = true; 199 | auto root = group.root; 200 | auto paths = group.paths; 201 | 202 | // Handle empty paths case (root-only, no sharing) 203 | if (paths.length == 0) 204 | { 205 | root.addSamples(SampleType.represented, samples, offsets, duration); 206 | if (expert) 207 | { 208 | root.addSamples(SampleType.shared_, samples, offsets, duration); 209 | root.addSamples(SampleType.exclusive, samples, offsets, duration); 210 | root.addDistributedSample(samples, duration); 211 | } 212 | allMarked = root.getEffectiveMark(); 213 | // Update global marked state 214 | markTotalSamples += samples; 215 | if (allMarked && expert) 216 | marked.addSamples(SampleType.exclusive, samples, offsets, duration); 217 | return; 218 | } 219 | 220 | // Link sharing groups to BrowserPaths' firstSharingGroup list 221 | auto representativeIndex = group.representativeIndex; 222 | if (needsLinking) 223 | { 224 | // In expert mode, link this group to all BrowserPaths 225 | // In non-expert mode, only link to the representative 226 | if (expert) 227 | { 228 | // Link to all BrowserPaths 229 | foreach (i, ref path; paths) 230 | { 231 | auto pathBrowserPath = root.appendPath(&path); 232 | group.pathData[i].path = pathBrowserPath; 233 | group.pathData[i].next = pathBrowserPath.firstSharingGroup; 234 | pathBrowserPath.firstSharingGroup = group; 235 | } 236 | } 237 | else 238 | { 239 | // Only link to representative path 240 | auto representativeBrowserPath = root.appendPath(&paths[representativeIndex]); 241 | group.pathData[representativeIndex].path = representativeBrowserPath; 242 | group.pathData[representativeIndex].next = representativeBrowserPath.firstSharingGroup; 243 | representativeBrowserPath.firstSharingGroup = group; 244 | } 245 | } 246 | 247 | // Add represented samples to the representative path (using cached path) 248 | group.pathData[representativeIndex].path.addSamples(SampleType.represented, samples, offsets, duration); 249 | 250 | if (expert) 251 | { 252 | auto distributedSamples = double(samples) / paths.length; 253 | auto distributedDuration = double(duration) / paths.length; 254 | 255 | static FastAppender!(BrowserPath*) browserPaths; 256 | browserPaths.clear(); 257 | foreach (i, ref path; paths) 258 | { 259 | auto browserPath = group.pathData[i].path; 260 | browserPaths.put(browserPath); 261 | 262 | browserPath.addSamples(SampleType.shared_, samples, offsets, duration); 263 | browserPath.addDistributedSample(distributedSamples, distributedDuration); 264 | } 265 | 266 | auto exclusiveBrowserPath = BrowserPath.commonPrefix(browserPaths.peek()); 267 | exclusiveBrowserPath.addSamples(SampleType.exclusive, samples, offsets, duration); 268 | 269 | foreach (ref path; browserPaths.get()) 270 | if (!path.getEffectiveMark()) 271 | { 272 | allMarked = false; 273 | break; 274 | } 275 | } 276 | else 277 | { 278 | if (false) // `allMarked` result will not be used in non-expert mode anyway... 279 | foreach (ref path; paths) 280 | { 281 | auto browserPath = root.appendPath!true(&path); 282 | if (browserPath && !browserPath.getEffectiveMark()) 283 | { 284 | allMarked = false; 285 | break; 286 | } 287 | } 288 | } 289 | 290 | // Update global marked state 291 | markTotalSamples += samples; 292 | if (allMarked && expert) 293 | marked.addSamples(SampleType.exclusive, samples, offsets, duration); 294 | } 295 | 296 | /// Rebuild the BrowserPath tree from all SharingGroups. 297 | /// Call this after changing pathRules to recompute representative paths. 298 | /// Params: 299 | /// progress = Optional callback called with progress message string 300 | void rebuildFromSharingGroups(scope void delegate(const(char)[]) progress = null) 301 | { 302 | void report(const(char)[] msg) { if (progress) progress(msg); } 303 | 304 | // Reset all BrowserPath sample data and sharing group links 305 | report("Resetting tree..."); 306 | browserRoot.reset(); 307 | markTotalSamples = 0; 308 | 309 | auto total = sharingGroupAllocator.length; 310 | if (total == 0) 311 | return; 312 | 313 | auto step = total / 100; 314 | if (step == 0) 315 | step = 1; 316 | 317 | size_t processed; 318 | int lastPercent = -1; 319 | 320 | // Rebuild from all sharing groups 321 | foreach (ref group; sharingGroupAllocator) 322 | { 323 | // Recalculate which path is the representative under current rules 324 | group.representativeIndex = selectRepresentativeIndex(group.paths); 325 | 326 | // Repopulate BrowserPath tree from this group's stored data 327 | populateBrowserPathsFromSharingGroup( 328 | &group, 329 | true, // needsLinking - always true for rebuild 330 | group.data.samples, 331 | group.data.offsets[], 332 | group.data.duration 333 | ); 334 | 335 | processed++; 336 | if (progress && processed % step == 0) 337 | { 338 | auto percent = cast(int)(processed * 100 / total); 339 | if (percent != lastPercent) 340 | { 341 | lastPercent = percent; 342 | report(format!"Rebuilding... %d%%"(percent)); 343 | } 344 | } 345 | } 346 | 347 | report("Done."); 348 | } 349 | -------------------------------------------------------------------------------- /source/btdu/sample.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Sampling subprocess implementation 20 | module btdu.sample; 21 | 22 | import core.stdc.errno; 23 | import core.sys.posix.fcntl; 24 | import core.sys.posix.sys.ioctl : ioctl, _IOR; 25 | import core.sys.posix.unistd; 26 | 27 | import std.algorithm.comparison : among; 28 | import std.algorithm.iteration; 29 | import std.algorithm.searching : countUntil; 30 | import std.bigint; 31 | import std.conv : to; 32 | import std.datetime.stopwatch; 33 | import std.exception; 34 | import std.random; 35 | import std.string; 36 | 37 | import ae.sys.shutdown; 38 | import ae.utils.aa : addNew; 39 | import ae.utils.appender; 40 | import ae.utils.meta : I; 41 | import ae.utils.time : stdTime; 42 | 43 | import btrfs; 44 | import btrfs.c.ioctl : btrfs_ioctl_dev_info_args; 45 | import btrfs.c.kerncompat; 46 | import btrfs.c.kernel_shared.ctree; 47 | 48 | import btdu.alloc : StaticAppender; 49 | import btdu.common : errorString; 50 | import btdu.proto; 51 | 52 | void subprocessMain(string fsPath, bool physical) 53 | { 54 | try 55 | { 56 | // Ignore SIGINT/SIGTERM, because the main process will handle it for us. 57 | // We want the main process to receive and process the signal before any child 58 | // processes do, otherwise the main process doesn't know if the child exited due to an 59 | // abrupt failure or simply because it received and processed the signal before it did. 60 | addShutdownHandler((reason) {}); 61 | 62 | // stderr.writeln("Opening filesystem..."); 63 | int fd = open(fsPath.toStringz, O_RDONLY); 64 | errnoEnforce(fd >= 0, "open"); 65 | 66 | // stderr.writeln("Reading chunks..."); 67 | 68 | /// Represents one continuous sampling zone, 69 | /// in physical or logical space (depending on the mode). 70 | /// Represents one physical extent or one logical chunk. 71 | static struct ChunkInfo 72 | { 73 | u64 type; 74 | u64 logicalOffset, logicalLength; 75 | u64 devID; 76 | u64 physicalOffset, physicalLength; 77 | u64 numStripes, stripeIndex, stripeLength; 78 | } 79 | @property u64 length(ChunkInfo c) { return physical ? c.physicalLength : c.logicalLength; } 80 | 81 | ChunkInfo[] chunks; 82 | btrfs_ioctl_dev_info_args[] devices; 83 | 84 | if (!physical) // logical mode 85 | { 86 | enumerateChunks(fd, (u64 offset, const ref btrfs_chunk chunk) { 87 | chunks ~= ChunkInfo( 88 | chunk.type, 89 | offset, chunk.length, 90 | -1, 91 | -1, 0, 92 | ); 93 | }); 94 | } 95 | else // physical mode 96 | { 97 | btrfs_chunk[u64] chunkLookup; 98 | btrfs_stripe[][u64] stripeLookup; 99 | enumerateChunks(fd, (u64 offset, const ref btrfs_chunk chunk) { 100 | chunkLookup.addNew(offset, cast()chunk).enforce("Chunk with duplicate offset"); 101 | stripeLookup.addNew(offset, chunk.stripe.ptr[0 .. chunk.num_stripes].dup).enforce("Chunk with duplicate offset"); 102 | }); 103 | 104 | debug u64 totalSlack; 105 | 106 | devices = getDevices(fd); 107 | 108 | foreach (ref device; devices) 109 | { 110 | u64 lastOffset = 0; 111 | void flushHole(u64 dataStart, u64 dataEnd, u64 holeType = logicalOffsetHole) 112 | { 113 | if (dataStart != lastOffset) 114 | { 115 | enforce(lastOffset < dataStart, "Unordered extents"); 116 | chunks ~= ChunkInfo( 117 | 0, 118 | holeType, 0, 119 | device.devid, 120 | lastOffset, dataStart - lastOffset, 121 | ); 122 | } 123 | lastOffset = dataEnd; 124 | } 125 | enumerateDevExtents(fd, (u64 devid, u64 offset, const ref btrfs_dev_extent extent) { 126 | flushHole(offset, offset + extent.length); 127 | auto chunk = (extent.chunk_offset in chunkLookup).enforce("Chunk for extent not found"); 128 | auto stripes = stripeLookup[extent.chunk_offset]; 129 | auto stripeIndex = stripes.countUntil!((ref stripe) => stripe.devid == devid && stripe.offset == offset); 130 | enforce(stripeIndex >= 0, "Stripe for extent not found in chunk"); 131 | 132 | auto logicalOffset = extent.chunk_offset; 133 | auto logicalLength = chunk.length; 134 | auto physicalOffset = offset; 135 | auto physicalLength = extent.length; 136 | chunks ~= ChunkInfo( 137 | chunk.type, 138 | logicalOffset, logicalLength, 139 | devid, 140 | physicalOffset, physicalLength, 141 | chunk.num_stripes, stripeIndex, chunk.stripe_len, 142 | ); 143 | }, [device.devid, device.devid]); 144 | flushHole(device.total_bytes, device.total_bytes); 145 | 146 | u64 deviceSize = { 147 | int devFd = open(cast(char*)device.path.ptr, O_RDONLY); 148 | if (devFd < 0) 149 | return 0; 150 | scope(exit) close(devFd); 151 | 152 | stat_t st; 153 | int ret = fstat(devFd, &st); 154 | if (ret < 0) 155 | return 0; 156 | if (S_ISREG(st.st_mode)) 157 | return st.st_size; 158 | if (!S_ISBLK(st.st_mode)) 159 | return 0; 160 | 161 | u64 size; 162 | if (ioctl(devFd, BLKGETSIZE64, &size) < 0) 163 | return 0; 164 | return size; 165 | }(); 166 | 167 | if (deviceSize > device.total_bytes) 168 | { 169 | flushHole(deviceSize, deviceSize, logicalOffsetSlack); 170 | debug totalSlack += deviceSize - device.total_bytes; 171 | } 172 | } 173 | 174 | // The sum of sizes of all chunks should be equal to the sum of sizes of all devices plus the total slack. 175 | debug assert(chunks.map!((ref chunk) => chunk.I!length).sum == devices.map!((ref device) => device.total_bytes).sum + totalSlack); 176 | } 177 | 178 | u64 totalSize = chunks.map!((ref chunk) => chunk.I!length).sum; 179 | // stderr.writefln("Found %d chunks with a total size of %d.", chunks.length, totalSize); 180 | send(StartMessage(totalSize, devices)); 181 | 182 | while (true) 183 | { 184 | auto targetPos = uniform(0, totalSize); 185 | u64 pos = 0; 186 | foreach (ref chunk; chunks) 187 | { 188 | auto end = pos + chunk.I!length; 189 | if (end > targetPos) 190 | { 191 | auto sw = StopWatch(AutoStart.yes); 192 | 193 | u64 logicalOffset, physicalOffset; 194 | if (!physical) 195 | logicalOffset = chunk.logicalOffset + (targetPos - pos); 196 | else 197 | { 198 | u64 physicalOffsetInExtent = (targetPos - pos); 199 | physicalOffset = chunk.physicalOffset + physicalOffsetInExtent; 200 | 201 | if (chunk.logicalOffset.among(logicalOffsetHole, logicalOffsetSlack)) 202 | { 203 | logicalOffset = chunk.logicalOffset; 204 | } 205 | else 206 | { 207 | // This is an approximation. 208 | // The exact algorithm is rather complicated, see btrfs_map_block or btrfs_map_physical.c. 209 | // Because data is distributed uniformly anyway, the only reason why we would 210 | // want to use the full algorithm would be to provide accurate offsets. 211 | // For RAID5/6 the calculation would need to be partially meaningless anyway, 212 | // as the parity blocks don't correspond to any particular single logical offset. 213 | auto physicalStripeIndex = physicalOffsetInExtent / chunk.stripeLength; 214 | auto offsetInStripe = physicalOffsetInExtent % chunk.stripeLength; 215 | auto logicalStripeIndex = (BigInt(physicalStripeIndex) * chunk.logicalLength / chunk.physicalLength).to!ulong; 216 | logicalOffset = chunk.logicalOffset + logicalStripeIndex * chunk.stripeLength + offsetInStripe; 217 | } 218 | } 219 | 220 | send(ResultStartMessage(chunk.type, Offset(logicalOffset, chunk.devID, physicalOffset))); 221 | 222 | if (chunk.type & BTRFS_BLOCK_GROUP_DATA) 223 | { 224 | foreach (ignoringOffset; [false, true]) 225 | { 226 | try 227 | { 228 | bool called; 229 | logicalIno(fd, logicalOffset, 230 | (u64 inode, u64 offset, u64 rootID) 231 | { 232 | called = true; 233 | 234 | // writeln("- ", inode, " ", offset, " ", root); 235 | cast(void) offset; // unused 236 | 237 | // Send new roots before the inode start 238 | cast(void)getRoot(fd, rootID); 239 | 240 | send(ResultInodeStartMessage(rootID)); 241 | 242 | try 243 | { 244 | static StaticAppender!char pathBuf; 245 | pathBuf.clear(); 246 | pathBuf.put(fsPath); 247 | 248 | void putRoot(u64 rootID) 249 | { 250 | auto root = getRoot(fd, rootID); 251 | if (root is Root.init) 252 | enforce(rootID == BTRFS_FS_TREE_OBJECTID, "Unresolvable root"); 253 | else 254 | putRoot(root.parent); 255 | if (root.path) 256 | { 257 | pathBuf.put('/'); 258 | pathBuf.put(root.path); 259 | } 260 | } 261 | putRoot(rootID); 262 | pathBuf.put('\0'); 263 | 264 | int rootFD = open(pathBuf.peek().ptr, O_RDONLY); 265 | if (rootFD < 0) 266 | { 267 | send(ResultInodeErrorMessage(btdu.proto.Error("open", errno, pathBuf.peek()[0 .. $-1]))); 268 | return; 269 | } 270 | scope(exit) close(rootFD); 271 | 272 | inoPaths(rootFD, inode, (char[] fn) { 273 | send(ResultMessage(fn)); 274 | }); 275 | send(ResultInodeEndMessage()); 276 | } 277 | catch (Exception e) 278 | send(ResultInodeErrorMessage(e.toError)); 279 | }, 280 | ignoringOffset, 281 | ); 282 | if (!called && !ignoringOffset) 283 | { 284 | // Retry with BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET 285 | send(ResultIgnoringOffsetMessage()); 286 | continue; 287 | } 288 | } 289 | catch (Exception e) 290 | send(ResultErrorMessage(e.toError)); 291 | break; 292 | } 293 | } 294 | send(ResultEndMessage(sw.peek.stdTime)); 295 | break; 296 | } 297 | pos = end; 298 | } 299 | } 300 | } 301 | catch (Throwable e) 302 | { 303 | debug 304 | send(FatalErrorMessage(e.toString())); 305 | else 306 | send(FatalErrorMessage(e.msg)); 307 | } 308 | } 309 | 310 | private: 311 | 312 | enum BLKGETSIZE64 = _IOR!size_t(0x12, 114); 313 | 314 | struct Root 315 | { 316 | u64 parent; 317 | string path; 318 | } 319 | Root[u64] roots; 320 | 321 | /// Performs memoized resolution of the path for a btrfs root object. 322 | Root getRoot(int fd, __u64 rootID) 323 | { 324 | if (auto existing = rootID in roots) 325 | return *existing; 326 | 327 | Root result; 328 | findRootBackRef( 329 | fd, 330 | rootID, 331 | ( 332 | __u64 parentRootID, 333 | __u64 dirID, 334 | __u64 sequence, 335 | char[] name, 336 | ) { 337 | cast(void) sequence; // unused 338 | 339 | inoLookup( 340 | fd, 341 | parentRootID, 342 | dirID, 343 | (char[] dirPath) 344 | { 345 | if (result !is Root.init) 346 | throw new Exception("Multiple root locations"); 347 | result.path = cast(string)(dirPath ~ name); 348 | result.parent = parentRootID; 349 | } 350 | ); 351 | } 352 | ); 353 | 354 | // Ensure parents are written first 355 | if (result !is Root.init) 356 | cast(void)getRoot(fd, result.parent); 357 | 358 | send(NewRootMessage(rootID, result.parent, result.path)); 359 | 360 | roots[rootID] = result; 361 | return result; 362 | } 363 | 364 | btdu.proto.Error toError(Exception e) 365 | { 366 | btdu.proto.Error error; 367 | error.msg = e.msg; 368 | if (auto ex = cast(ErrnoException) e) 369 | { 370 | // Convert to errno + string 371 | import std.range : chain; 372 | auto suffix = chain(" (".representation, errorString(ex.errno).representation, ")".representation); 373 | if (error.msg.endsWith(suffix)) 374 | { 375 | error.msg = error.msg[0 .. $ - suffix.length]; 376 | error.errno = ex.errno; 377 | } 378 | else 379 | debug assert(false, "Unexpected ErrnoException message: " ~ error.msg); 380 | } 381 | return error; 382 | } 383 | -------------------------------------------------------------------------------- /source/btdu/main.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// btdu entry point 20 | module btdu.main; 21 | 22 | import core.runtime : Runtime; 23 | import core.time; 24 | 25 | import std.algorithm.iteration; 26 | import std.algorithm.searching; 27 | import std.array; 28 | import std.conv : to; 29 | import std.exception; 30 | import std.parallelism : totalCPUs; 31 | import std.path; 32 | import std.random; 33 | import std.socket; 34 | import std.stdio : stdin, stdout, stderr; 35 | import std.string; 36 | import std.typecons; 37 | 38 | import ae.sys.file : getMounts, getPathMountInfo, MountInfo; 39 | import ae.sys.shutdown; 40 | import ae.utils.funopt; 41 | import ae.utils.main; 42 | import ae.utils.time.parsedur; 43 | import ae.utils.typecons : require; 44 | 45 | import btdu.ui.browser; 46 | import btdu.common; 47 | import btdu.impexp : importData, exportData, exportDu, exportHuman; 48 | import btdu.paths; 49 | import btdu.sample; 50 | import btdu.subproc; 51 | import btdu.state; 52 | 53 | @(`Sampling disk usage profiler for btrfs.`) 54 | void program( 55 | Parameter!(string, "Path to the root of the filesystem to analyze") path, 56 | Option!(uint, "Number of sampling subprocesses\n (default is number of logical CPUs for this system)", "N", 'j') procs = 0, 57 | Option!(Seed, "Random seed used to choose samples") seed = 0, 58 | Switch!hiddenOption subprocess = false, 59 | Switch!("Measure physical space (instead of logical).", 'p') physical = false, 60 | Switch!("Expert mode: collect and show additional metrics.\nUses more memory.", 'x') expert = false, 61 | Switch!hiddenOption man = false, 62 | Option!(string, "Set UI refresh interval.\nSpecify 0 to refresh as fast as possible.", "DURATION", 'i', "interval") refreshIntervalStr = null, 63 | Switch!("Run without launching the result browser UI.") headless = false, 64 | Option!(ulong, "Stop after collecting N samples.", "N", 'n') maxSamples = 0, 65 | Option!(string, "Stop after running for this duration.", "DURATION") maxTime = null, 66 | Option!(string, `Stop after achieving this resolution (e.g. "1MB" or "1%").`, "SIZE") minResolution = null, 67 | Switch!hiddenOption exitOnLimit = false, 68 | Option!(string, "On exit, export the collected results to the given file.", "PATH", 'o', "export") exportPath = null, 69 | Switch!("When exporting, include 'seenAs' data showing shared paths.") exportSeenAs = false, 70 | Option!(string[], "Prioritize allocating representative samples in the given path.", "PATTERN") prefer = null, 71 | Option!(string[], "Deprioritize allocating representative samples in the given path.", "PATTERN") ignore = null, 72 | Switch!("On exit, export represented size estimates in 'du' format to standard output.") du = false, 73 | Switch!("Instead of analyzing a btrfs filesystem, read previously collected results saved with --export from PATH.", 'f', "import") doImport = false, 74 | ) 75 | { 76 | if (man) 77 | { 78 | stdout.write(generateManPage!program( 79 | "btdu", 80 | ".B btdu 81 | is a sampling disk usage profiler for btrfs. 82 | 83 | For a detailed description, please see the full documentation: 84 | 85 | .I https://github.com/CyberShadow/btdu#readme", 86 | null, 87 | `.SH BUGS 88 | Please report defects and enhancement requests to the GitHub issue tracker: 89 | 90 | .I https://github.com/CyberShadow/btdu/issues 91 | 92 | .SH AUTHORS 93 | 94 | \fBbtdu\fR is written by Vladimir Panteleev and contributors: 95 | 96 | .I https://github.com/CyberShadow/btdu/graphs/contributors 97 | `, 98 | )); 99 | return; 100 | } 101 | 102 | if (doImport) 103 | { 104 | if (procs || seed || subprocess || expert || physical || maxSamples || maxTime || minResolution || exportPath || prefer || ignore) 105 | throw new Exception("Conflicting command-line options"); 106 | 107 | stderr.writeln("Loading results from file..."); 108 | importData(path); 109 | } 110 | else 111 | { 112 | rndGen = Random(seed); 113 | fsPath = path.buildNormalizedPath; 114 | 115 | .expert = expert; 116 | .physical = physical; 117 | .exportSeenAs = exportSeenAs; 118 | 119 | // TODO: respect CLI order (needs std.getopt and ae.utils.funopt changes) 120 | PathRule[] rules; 121 | rules ~= prefer.map!(p => PathRule(PathRule.Type.prefer, parsePathPattern(p, fsPath))).array; 122 | rules ~= ignore.map!(p => PathRule(PathRule.Type.ignore, parsePathPattern(p, fsPath))).array; 123 | .pathRules = rules; 124 | 125 | if (subprocess) 126 | return subprocessMain(path, physical); 127 | 128 | checkBtrfs(fsPath); 129 | 130 | if (procs == 0) 131 | procs = totalCPUs; 132 | 133 | subprocesses = new Subprocess[procs]; 134 | foreach (ref subproc; subprocesses) 135 | subproc.start(); 136 | } 137 | 138 | Duration parsedMaxTime; 139 | if (maxTime) 140 | parsedMaxTime = parseDuration(maxTime); 141 | 142 | @property real parsedMinResolution() 143 | { 144 | static Nullable!real value; 145 | assert(minResolution && totalSize); 146 | return value.require({ 147 | if (minResolution.value.endsWith("%")) 148 | return minResolution[0 .. $-1].to!real / 100 * totalSize; 149 | return parseSize(minResolution); 150 | }()); 151 | } 152 | 153 | Browser browser; 154 | if (!headless) 155 | { 156 | browser.start(); 157 | browser.update(); 158 | } 159 | 160 | auto startTime = MonoTime.currTime(); 161 | auto refreshInterval = 500.msecs; 162 | if (refreshIntervalStr) 163 | refreshInterval = parseDuration(refreshIntervalStr); 164 | auto nextRefresh = startTime; 165 | 166 | enum totalMaxDuration = 1.seconds / 60; // 60 FPS 167 | 168 | auto readSet = new SocketSet; 169 | auto exceptSet = new SocketSet; 170 | 171 | bool run = true; 172 | if (headless) 173 | { 174 | // In non-headless mode, ncurses takes care of this 175 | addShutdownHandler((reason) { 176 | run = false; 177 | }); 178 | 179 | if (doImport) 180 | run = false; 181 | } 182 | 183 | // Main event loop 184 | while (run) 185 | { 186 | readSet.reset(); 187 | exceptSet.reset(); 188 | if (browser.curses.stdinSocket) 189 | { 190 | readSet.add(browser.curses.stdinSocket); 191 | exceptSet.add(browser.curses.stdinSocket); 192 | } 193 | if (!paused) 194 | foreach (ref subproc; subprocesses) 195 | readSet.add(subproc.socket); 196 | 197 | if (!headless && browser.needRefresh()) 198 | Socket.select(readSet, null, exceptSet, refreshInterval); 199 | else 200 | Socket.select(readSet, null, exceptSet); 201 | auto now = MonoTime.currTime(); 202 | 203 | if (browser.curses.stdinSocket && browser.handleInput()) 204 | { 205 | do {} while (browser.handleInput()); // Process all input 206 | if (browser.done) 207 | break; 208 | browser.update(); 209 | nextRefresh = now + refreshInterval; 210 | } 211 | if (!paused) 212 | { 213 | auto deadline = now + totalMaxDuration; 214 | size_t numReadable; 215 | foreach (i, ref subproc; subprocesses) 216 | if (readSet.isSet(subproc.socket)) 217 | numReadable++; 218 | foreach (i, ref subproc; subprocesses) 219 | if (readSet.isSet(subproc.socket)) 220 | { 221 | auto subprocDeadline = now + (deadline - now) / numReadable; 222 | while (now < subprocDeadline && subproc.handleInput()) 223 | now = MonoTime.currTime(); 224 | numReadable--; 225 | } 226 | } 227 | if (!headless && now > nextRefresh) 228 | { 229 | browser.update(); 230 | nextRefresh = now + refreshInterval; 231 | } 232 | 233 | if ((maxSamples 234 | && browserRoot.getSamples(SampleType.represented) >= maxSamples) || 235 | (maxTime 236 | && now > startTime + parsedMaxTime) || 237 | (minResolution 238 | && browserRoot.getSamples(SampleType.represented) 239 | && totalSize 240 | && (totalSize / browserRoot.getSamples(SampleType.represented)) <= parsedMinResolution)) 241 | { 242 | if (headless || exitOnLimit) 243 | break; 244 | else 245 | { 246 | if (!paused) 247 | { 248 | browser.togglePause(); 249 | browser.curses.beep(); 250 | browser.update(); 251 | } 252 | // Only pause once 253 | maxSamples = 0; 254 | maxTime = minResolution = null; 255 | } 256 | } 257 | } 258 | 259 | if (headless) 260 | { 261 | auto totalSamples = browserRoot.getSamples(SampleType.represented); 262 | stderr.writefln( 263 | "Collected %s samples (achieving a resolution of ~%s) in %s.", 264 | totalSamples, 265 | totalSamples ? (totalSize / totalSamples).humanSize().to!string : "-", 266 | MonoTime.currTime() - startTime, 267 | ); 268 | 269 | // Print CLI tree output unless --du mode is used 270 | if (!du) 271 | exportHuman(); 272 | } 273 | 274 | if (exportPath) 275 | { 276 | stderr.writeln("Exporting results..."); 277 | exportData(exportPath); 278 | stderr.writeln("Exported results to: ", exportPath); 279 | } 280 | 281 | if (du) 282 | exportDu(); 283 | } 284 | 285 | void checkBtrfs(string fsPath) 286 | { 287 | import core.sys.posix.fcntl : open, O_RDONLY; 288 | import std.file : exists; 289 | import std.string : toStringz; 290 | import std.algorithm.searching : canFind; 291 | import btrfs : isBTRFS, isSubvolume, getSubvolumeID; 292 | import btrfs.c.kernel_shared.ctree : BTRFS_FS_TREE_OBJECTID; 293 | 294 | int fd = open(fsPath.toStringz, O_RDONLY); 295 | errnoEnforce(fd >= 0, "open"); 296 | 297 | enforce(fd.isBTRFS, 298 | fsPath ~ " is not a btrfs filesystem"); 299 | 300 | MountInfo[] mounts; 301 | try 302 | mounts = getMounts().array; 303 | catch (Exception e) {} 304 | enforce(fd.isSubvolume, { 305 | auto rootPath = mounts.getPathMountInfo(fsPath).file; 306 | if (!rootPath) 307 | rootPath = "/"; 308 | return format( 309 | "%s is not the root of a btrfs subvolume - " ~ 310 | "please specify the path to the subvolume root" ~ 311 | "\n" ~ 312 | "E.g.: %s", 313 | fsPath, 314 | [Runtime.args[0], rootPath].escapeShellCommand, 315 | ); 316 | }()); 317 | 318 | enforce(fd.getSubvolumeID() == BTRFS_FS_TREE_OBJECTID, { 319 | string msg = format( 320 | "The mount point you specified, \"%s\", " ~ 321 | "is not the top-level btrfs subvolume (\"subvolid=%d,subvol=/\").\n", 322 | fsPath, BTRFS_FS_TREE_OBJECTID); 323 | 324 | auto mountInfo = mounts.getPathMountInfo(fsPath); 325 | auto options = mountInfo.mntops 326 | .split(",") 327 | .map!(o => o.findSplit("=")) 328 | .map!(p => tuple(p[0], p[2])) 329 | .assocArray; 330 | if ("subvol" in options && "subvolid" in options) 331 | msg ~= format( 332 | "It is the btrfs subvolume \"subvolid=%s,subvol=%s\".\n", 333 | options["subvolid"], options["subvol"], 334 | ); 335 | 336 | auto device = mountInfo.spec; 337 | if (!device) 338 | device = "/dev/sda1"; // placeholder 339 | auto mountRoot = 340 | "/mnt".exists && !mounts.canFind!(m => m.file == "/mnt") ? "/mnt" : 341 | "/media".exists ? "/media" : 342 | "..." 343 | ; 344 | auto tmpName = mountRoot ~ "/" ~ device.baseName; 345 | msg ~= format( 346 | "Please specify the path to a mountpoint mounted with subvol=/ or subvolid=%d." ~ 347 | "\n" ~ 348 | "E.g.: %s && %s && %s", 349 | BTRFS_FS_TREE_OBJECTID, 350 | ["mkdir", tmpName].escapeShellCommand, 351 | ["mount", "-o", "subvol=/", device, tmpName].escapeShellCommand, 352 | [Runtime.args[0], tmpName].escapeShellCommand, 353 | ); 354 | if (fsPath == "/") 355 | msg ~= format( 356 | "\n\nNote that the top-level btrfs subvolume (\"subvolid=%d,subvol=/\") " ~ 357 | "is not the same as the root of the filesystem (\"/\").", 358 | BTRFS_FS_TREE_OBJECTID); 359 | return msg; 360 | }()); 361 | } 362 | 363 | private string escapeShellCommand(string[] args) 364 | { 365 | import std.process : escapeShellFileName; 366 | import std.algorithm.searching : all; 367 | import ae.utils.array : isOneOf; 368 | 369 | foreach (ref arg; args) 370 | if (!arg.representation.all!(c => c.isOneOf("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_/.=:@%"))) 371 | arg = arg.escapeShellFileName; 372 | return args.join(" "); 373 | } 374 | 375 | void usageFun(string usage) 376 | { 377 | stderr.writeln("btdu v" ~ btduVersion); 378 | stderr.writeln(usage); 379 | } 380 | 381 | mixin main!(funopt!(program, FunOptConfig.init, usageFun)); 382 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | 2 | GNU GENERAL PUBLIC LICENSE 3 | Version 2, June 1991 4 | 5 | Copyright (C) 1989, 1991 Free Software Foundation, Inc. 6 | 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 7 | Everyone is permitted to copy and distribute verbatim copies 8 | of this license document, but changing it is not allowed. 9 | 10 | Preamble 11 | 12 | The licenses for most software are designed to take away your 13 | freedom to share and change it. By contrast, the GNU General Public 14 | License is intended to guarantee your freedom to share and change free 15 | software--to make sure the software is free for all its users. This 16 | General Public License applies to most of the Free Software 17 | Foundation's software and to any other program whose authors commit to 18 | using it. (Some other Free Software Foundation software is covered by 19 | the GNU Library General Public License instead.) You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | this service if you wish), that you receive source code or can get it 26 | if you want it, that you can change the software or use pieces of it 27 | in new free programs; and that you know you can do these things. 28 | 29 | To protect your rights, we need to make restrictions that forbid 30 | anyone to deny you these rights or to ask you to surrender the rights. 31 | These restrictions translate to certain responsibilities for you if you 32 | distribute copies of the software, or if you modify it. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must give the recipients all the rights that 36 | you have. You must make sure that they, too, receive or can get the 37 | source code. And you must show them these terms so they know their 38 | rights. 39 | 40 | We protect your rights with two steps: (1) copyright the software, and 41 | (2) offer you this license which gives you legal permission to copy, 42 | distribute and/or modify the software. 43 | 44 | Also, for each author's protection and ours, we want to make certain 45 | that everyone understands that there is no warranty for this free 46 | software. If the software is modified by someone else and passed on, we 47 | want its recipients to know that what they have is not the original, so 48 | that any problems introduced by others will not reflect on the original 49 | authors' reputations. 50 | 51 | Finally, any free program is threatened constantly by software 52 | patents. We wish to avoid the danger that redistributors of a free 53 | program will individually obtain patent licenses, in effect making the 54 | program proprietary. To prevent this, we have made it clear that any 55 | patent must be licensed for everyone's free use or not licensed at all. 56 | 57 | The precise terms and conditions for copying, distribution and 58 | modification follow. 59 | 60 | GNU GENERAL PUBLIC LICENSE 61 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 62 | 63 | 0. This License applies to any program or other work which contains 64 | a notice placed by the copyright holder saying it may be distributed 65 | under the terms of this General Public License. The "Program", below, 66 | refers to any such program or work, and a "work based on the Program" 67 | means either the Program or any derivative work under copyright law: 68 | that is to say, a work containing the Program or a portion of it, 69 | either verbatim or with modifications and/or translated into another 70 | language. (Hereinafter, translation is included without limitation in 71 | the term "modification".) Each licensee is addressed as "you". 72 | 73 | Activities other than copying, distribution and modification are not 74 | covered by this License; they are outside its scope. The act of 75 | running the Program is not restricted, and the output from the Program 76 | is covered only if its contents constitute a work based on the 77 | Program (independent of having been made by running the Program). 78 | Whether that is true depends on what the Program does. 79 | 80 | 1. You may copy and distribute verbatim copies of the Program's 81 | source code as you receive it, in any medium, provided that you 82 | conspicuously and appropriately publish on each copy an appropriate 83 | copyright notice and disclaimer of warranty; keep intact all the 84 | notices that refer to this License and to the absence of any warranty; 85 | and give any other recipients of the Program a copy of this License 86 | along with the Program. 87 | 88 | You may charge a fee for the physical act of transferring a copy, and 89 | you may at your option offer warranty protection in exchange for a fee. 90 | 91 | 2. You may modify your copy or copies of the Program or any portion 92 | of it, thus forming a work based on the Program, and copy and 93 | distribute such modifications or work under the terms of Section 1 94 | above, provided that you also meet all of these conditions: 95 | 96 | a) You must cause the modified files to carry prominent notices 97 | stating that you changed the files and the date of any change. 98 | 99 | b) You must cause any work that you distribute or publish, that in 100 | whole or in part contains or is derived from the Program or any 101 | part thereof, to be licensed as a whole at no charge to all third 102 | parties under the terms of this License. 103 | 104 | c) If the modified program normally reads commands interactively 105 | when run, you must cause it, when started running for such 106 | interactive use in the most ordinary way, to print or display an 107 | announcement including an appropriate copyright notice and a 108 | notice that there is no warranty (or else, saying that you provide 109 | a warranty) and that users may redistribute the program under 110 | these conditions, and telling the user how to view a copy of this 111 | License. (Exception: if the Program itself is interactive but 112 | does not normally print such an announcement, your work based on 113 | the Program is not required to print an announcement.) 114 | 115 | These requirements apply to the modified work as a whole. If 116 | identifiable sections of that work are not derived from the Program, 117 | and can be reasonably considered independent and separate works in 118 | themselves, then this License, and its terms, do not apply to those 119 | sections when you distribute them as separate works. But when you 120 | distribute the same sections as part of a whole which is a work based 121 | on the Program, the distribution of the whole must be on the terms of 122 | this License, whose permissions for other licensees extend to the 123 | entire whole, and thus to each and every part regardless of who wrote it. 124 | 125 | Thus, it is not the intent of this section to claim rights or contest 126 | your rights to work written entirely by you; rather, the intent is to 127 | exercise the right to control the distribution of derivative or 128 | collective works based on the Program. 129 | 130 | In addition, mere aggregation of another work not based on the Program 131 | with the Program (or with a work based on the Program) on a volume of 132 | a storage or distribution medium does not bring the other work under 133 | the scope of this License. 134 | 135 | 3. You may copy and distribute the Program (or a work based on it, 136 | under Section 2) in object code or executable form under the terms of 137 | Sections 1 and 2 above provided that you also do one of the following: 138 | 139 | a) Accompany it with the complete corresponding machine-readable 140 | source code, which must be distributed under the terms of Sections 141 | 1 and 2 above on a medium customarily used for software interchange; or, 142 | 143 | b) Accompany it with a written offer, valid for at least three 144 | years, to give any third party, for a charge no more than your 145 | cost of physically performing source distribution, a complete 146 | machine-readable copy of the corresponding source code, to be 147 | distributed under the terms of Sections 1 and 2 above on a medium 148 | customarily used for software interchange; or, 149 | 150 | c) Accompany it with the information you received as to the offer 151 | to distribute corresponding source code. (This alternative is 152 | allowed only for noncommercial distribution and only if you 153 | received the program in object code or executable form with such 154 | an offer, in accord with Subsection b above.) 155 | 156 | The source code for a work means the preferred form of the work for 157 | making modifications to it. For an executable work, complete source 158 | code means all the source code for all modules it contains, plus any 159 | associated interface definition files, plus the scripts used to 160 | control compilation and installation of the executable. However, as a 161 | special exception, the source code distributed need not include 162 | anything that is normally distributed (in either source or binary 163 | form) with the major components (compiler, kernel, and so on) of the 164 | operating system on which the executable runs, unless that component 165 | itself accompanies the executable. 166 | 167 | If distribution of executable or object code is made by offering 168 | access to copy from a designated place, then offering equivalent 169 | access to copy the source code from the same place counts as 170 | distribution of the source code, even though third parties are not 171 | compelled to copy the source along with the object code. 172 | 173 | 4. You may not copy, modify, sublicense, or distribute the Program 174 | except as expressly provided under this License. Any attempt 175 | otherwise to copy, modify, sublicense or distribute the Program is 176 | void, and will automatically terminate your rights under this License. 177 | However, parties who have received copies, or rights, from you under 178 | this License will not have their licenses terminated so long as such 179 | parties remain in full compliance. 180 | 181 | 5. You are not required to accept this License, since you have not 182 | signed it. However, nothing else grants you permission to modify or 183 | distribute the Program or its derivative works. These actions are 184 | prohibited by law if you do not accept this License. Therefore, by 185 | modifying or distributing the Program (or any work based on the 186 | Program), you indicate your acceptance of this License to do so, and 187 | all its terms and conditions for copying, distributing or modifying 188 | the Program or works based on it. 189 | 190 | 6. Each time you redistribute the Program (or any work based on the 191 | Program), the recipient automatically receives a license from the 192 | original licensor to copy, distribute or modify the Program subject to 193 | these terms and conditions. You may not impose any further 194 | restrictions on the recipients' exercise of the rights granted herein. 195 | You are not responsible for enforcing compliance by third parties to 196 | this License. 197 | 198 | 7. If, as a consequence of a court judgment or allegation of patent 199 | infringement or for any other reason (not limited to patent issues), 200 | conditions are imposed on you (whether by court order, agreement or 201 | otherwise) that contradict the conditions of this License, they do not 202 | excuse you from the conditions of this License. If you cannot 203 | distribute so as to satisfy simultaneously your obligations under this 204 | License and any other pertinent obligations, then as a consequence you 205 | may not distribute the Program at all. For example, if a patent 206 | license would not permit royalty-free redistribution of the Program by 207 | all those who receive copies directly or indirectly through you, then 208 | the only way you could satisfy both it and this License would be to 209 | refrain entirely from distribution of the Program. 210 | 211 | If any portion of this section is held invalid or unenforceable under 212 | any particular circumstance, the balance of the section is intended to 213 | apply and the section as a whole is intended to apply in other 214 | circumstances. 215 | 216 | It is not the purpose of this section to induce you to infringe any 217 | patents or other property right claims or to contest validity of any 218 | such claims; this section has the sole purpose of protecting the 219 | integrity of the free software distribution system, which is 220 | implemented by public license practices. Many people have made 221 | generous contributions to the wide range of software distributed 222 | through that system in reliance on consistent application of that 223 | system; it is up to the author/donor to decide if he or she is willing 224 | to distribute software through any other system and a licensee cannot 225 | impose that choice. 226 | 227 | This section is intended to make thoroughly clear what is believed to 228 | be a consequence of the rest of this License. 229 | 230 | 8. If the distribution and/or use of the Program is restricted in 231 | certain countries either by patents or by copyrighted interfaces, the 232 | original copyright holder who places the Program under this License 233 | may add an explicit geographical distribution limitation excluding 234 | those countries, so that distribution is permitted only in or among 235 | countries not thus excluded. In such case, this License incorporates 236 | the limitation as if written in the body of this License. 237 | 238 | 9. The Free Software Foundation may publish revised and/or new versions 239 | of the General Public License from time to time. Such new versions will 240 | be similar in spirit to the present version, but may differ in detail to 241 | address new problems or concerns. 242 | 243 | Each version is given a distinguishing version number. If the Program 244 | specifies a version number of this License which applies to it and "any 245 | later version", you have the option of following the terms and conditions 246 | either of that version or of any later version published by the Free 247 | Software Foundation. If the Program does not specify a version number of 248 | this License, you may choose any version ever published by the Free Software 249 | Foundation. 250 | 251 | 10. If you wish to incorporate parts of the Program into other free 252 | programs whose distribution conditions are different, write to the author 253 | to ask for permission. For software which is copyrighted by the Free 254 | Software Foundation, write to the Free Software Foundation; we sometimes 255 | make exceptions for this. Our decision will be guided by the two goals 256 | of preserving the free status of all derivatives of our free software and 257 | of promoting the sharing and reuse of software generally. 258 | 259 | NO WARRANTY 260 | 261 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 262 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 263 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 264 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 265 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 266 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 267 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 268 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 269 | REPAIR OR CORRECTION. 270 | 271 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 272 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 273 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 274 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 275 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 276 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 277 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 278 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 279 | POSSIBILITY OF SUCH DAMAGES. 280 | 281 | END OF TERMS AND CONDITIONS 282 | 283 | How to Apply These Terms to Your New Programs 284 | 285 | If you develop a new program, and you want it to be of the greatest 286 | possible use to the public, the best way to achieve this is to make it 287 | free software which everyone can redistribute and change under these terms. 288 | 289 | To do so, attach the following notices to the program. It is safest 290 | to attach them to the start of each source file to most effectively 291 | convey the exclusion of warranty; and each file should have at least 292 | the "copyright" line and a pointer to where the full notice is found. 293 | 294 | 295 | Copyright (C) 296 | 297 | This program is free software; you can redistribute it and/or modify 298 | it under the terms of the GNU General Public License as published by 299 | the Free Software Foundation; either version 2 of the License, or 300 | (at your option) any later version. 301 | 302 | This program is distributed in the hope that it will be useful, 303 | but WITHOUT ANY WARRANTY; without even the implied warranty of 304 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 305 | GNU General Public License for more details. 306 | 307 | You should have received a copy of the GNU General Public License 308 | along with this program; if not, write to the Free Software 309 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 310 | 311 | 312 | Also add information on how to contact you by electronic and paper mail. 313 | 314 | If the program is interactive, make it output a short notice like this 315 | when it starts in an interactive mode: 316 | 317 | Gnomovision version 69, Copyright (C) year name of author 318 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 319 | This is free software, and you are welcome to redistribute it 320 | under certain conditions; type `show c' for details. 321 | 322 | The hypothetical commands `show w' and `show c' should show the appropriate 323 | parts of the General Public License. Of course, the commands you use may 324 | be called something other than `show w' and `show c'; they could even be 325 | mouse-clicks or menu items--whatever suits your program. 326 | 327 | You should also get your employer (if you work as a programmer) or your 328 | school, if any, to sign a "copyright disclaimer" for the program, if 329 | necessary. Here is a sample; alter the names: 330 | 331 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 332 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 333 | 334 | , 1 April 1989 335 | Ty Coon, President of Vice 336 | 337 | This General Public License does not permit incorporating your program into 338 | proprietary programs. If your program is a subroutine library, you may 339 | consider it more useful to permit linking proprietary applications with the 340 | library. If this is what you want to do, use the GNU Library General 341 | Public License instead of this License. 342 | -------------------------------------------------------------------------------- /source/btdu/ui/curses.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2023, 2024, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// ncurses wrapper 20 | module btdu.ui.curses; 21 | 22 | import core.lifetime : forward; 23 | import core.stdc.stddef : wchar_t; 24 | import core.sys.posix.locale; 25 | import core.sys.posix.stdio : FILE; 26 | 27 | import std.algorithm.comparison : min, max; 28 | import std.conv; 29 | import std.exception; 30 | import std.meta; 31 | import std.socket; 32 | import std.stdio : File; 33 | import std.typecons; 34 | 35 | import ae.utils.text.functor : stringifiable, fmtSeq; 36 | import ae.utils.functor.primitives : functor; 37 | import ae.utils.typecons : require; 38 | 39 | import deimos.ncurses; 40 | 41 | struct Curses 42 | { 43 | @disable this(this); 44 | 45 | Socket stdinSocket; 46 | 47 | void start() 48 | { 49 | setlocale(LC_CTYPE, ""); 50 | 51 | // Smarter alternative to initscr() 52 | { 53 | import core.stdc.stdlib : getenv; 54 | import core.sys.posix.unistd : isatty, STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO; 55 | import core.sys.posix.fcntl : open, O_RDWR, O_NOCTTY; 56 | import core.sys.posix.stdio : fdopen; 57 | 58 | int inputFD = { 59 | if (isatty(STDIN_FILENO)) 60 | return STDIN_FILENO; 61 | ttyFD = open("/dev/tty", O_RDWR); 62 | if (ttyFD >= 0 && isatty(ttyFD)) 63 | return ttyFD; 64 | throw new Exception("Could not detect a TTY to read interactive input from."); 65 | }(); 66 | int outputFD = { 67 | if (isatty(STDOUT_FILENO)) 68 | return STDOUT_FILENO; 69 | if (isatty(STDERR_FILENO)) 70 | return STDERR_FILENO; 71 | if (ttyFD < 0) 72 | ttyFD = open("/dev/tty", O_RDWR); 73 | if (ttyFD >= 0 && isatty(ttyFD)) 74 | return ttyFD; 75 | throw new Exception("Could not detect a TTY to display interactive UI on."); 76 | }(); 77 | 78 | inputFile.fdopen(inputFD, "rb"); 79 | outputFile.fdopen(outputFD, "wb"); 80 | auto screen = newterm(getenv("TERM"), outputFile.getFP(), inputFile.getFP()); 81 | enforce(screen, 82 | "Could not create the ncurses screen object. " ~ 83 | "Are terminfo files installed for your terminal? " ~ 84 | "Perhaps try running with \"TERM=xterm\"." 85 | ); 86 | 87 | stdinSocket = new Socket(cast(socket_t)inputFD, AddressFamily.UNSPEC); 88 | stdinSocket.blocking = false; 89 | } 90 | 91 | timeout(0); // Use non-blocking read 92 | cbreak(); // Disable line buffering 93 | noecho(); // Disable keyboard echo 94 | keypad(stdscr, true); // Enable arrow keys 95 | curs_set(0); // Hide cursor 96 | leaveok(stdscr, true); // Don't bother moving the physical cursor 97 | } 98 | 99 | ~this() 100 | { 101 | endwin(); 102 | 103 | { 104 | import core.stdc.stdio : fclose; 105 | import core.sys.posix.unistd : close; 106 | 107 | if (stdinSocket) 108 | stdinSocket.blocking = true; 109 | inputFile.close(); 110 | outputFile.close(); 111 | if (ttyFD >= 0) 112 | close(ttyFD); 113 | } 114 | } 115 | 116 | /// A tool used to fling curses at unsuspecting terminal emulators. 117 | struct Wand 118 | { 119 | private: 120 | // --- State 121 | 122 | alias xy_t = int; // Type for cursor coordinates 123 | 124 | /// What to do when the cursor goes beyond the window width. 125 | enum XOverflow 126 | { 127 | /// Text will never exceed the window width - the caller ensures it. 128 | /// If it does happen, raise an assertion and truncate. 129 | never, 130 | 131 | /// Silently truncate. 132 | hidden, 133 | 134 | /// Wrap characters (dumb wrapping). 135 | chars, 136 | 137 | /// Wrap words. 138 | words, 139 | 140 | /// Wrap paths (like words, but break on /); hyphenate. 141 | path, 142 | 143 | /// Do not wrap, but draw ellipses if truncation occurred. 144 | ellipsis, 145 | } 146 | XOverflow xOverflow = XOverflow.never; /// ditto 147 | 148 | /// What to do when the cursor goes beyond the window height. 149 | enum YOverflow 150 | { 151 | /// Text will never exceed the window height - the caller ensures it. 152 | /// If it does happen, raise an assertion and truncate. 153 | never, 154 | 155 | /// Silently truncate. 156 | hidden, 157 | } 158 | YOverflow yOverflow = YOverflow.never; /// ditto 159 | 160 | /// Current attributes. 161 | // Though we could use the ones in WINDOW (via wattr_get, wattr_set, wattr_on etc.), 162 | // we never use any ncurses APIs which use them, and this way is simpler. 163 | attr_t attr; 164 | NCURSES_PAIRS_T color; 165 | 166 | /// Set `value` to `newValue`, then run `fn`, and restore the old value on exit. 167 | void withState(T)(ref T value, T newValue, scope void delegate() fn) 168 | { 169 | auto oldValue = value; 170 | scope(exit) value = oldValue; 171 | value = newValue; 172 | fn(); 173 | } 174 | 175 | /// Run `fn`, restoring the cursor position on exit. 176 | void saveCursor(scope void delegate() fn) 177 | { 178 | auto x = this.x; 179 | auto y = this.y; 180 | scope(exit) 181 | { 182 | this.x = x; 183 | this.y = y; 184 | } 185 | fn(); 186 | } 187 | 188 | // --- ncurses primitives 189 | 190 | /// Absolute coordinates of current window top-left and bottom-right. 191 | /// These control the logical geometry, e.g. at which word wrapping happens. 192 | xy_t x0, y0, x1, y1; 193 | 194 | /// Rectangle of where we may actually draw now. 195 | /// Subset of current window (relative to x0/y0). 196 | xy_t maskX0, maskY0, maskX1, maskY1; 197 | 198 | void withNCWindow(scope void delegate(WINDOW*) fn) 199 | { 200 | auto win = derwin(stdscr, height, width, y0, x0); 201 | scope(exit) delwin(win); 202 | fn(win); 203 | } 204 | 205 | /// Returns `true` if `x` and `y` are within the bounds of the current window. 206 | bool inBounds(xy_t x, xy_t y) { return x >= 0 && x < width && y >= 0 && y < height; } 207 | 208 | /// Returns `true` if `x` and `y` are within the drawable rectangle. 209 | bool inMask(xy_t x, xy_t y) { x += x0; y += y0; return x >= maskX0 && x < maskX1 && y >= maskY0 && y < maskY1; } 210 | 211 | /// Low-level write primitive 212 | void poke(xy_t x, xy_t y, cchar_t c) 213 | { 214 | assert(inMask(x, y)); 215 | x += x0; y += y0; 216 | mvwadd_wchnstr(stdscr, y.to!int, x.to!int, &c, 1).ncenforce("mvwadd_wchnstr"); 217 | } 218 | 219 | /// Low-level read primitive 220 | cchar_t peek(xy_t x, xy_t y) 221 | { 222 | assert(inMask(x, y)); 223 | cchar_t wch; 224 | x += x0; y += y0; 225 | mvwin_wch(stdscr, y.to!int, x.to!int, &wch).ncenforce("mvwin_wch"); 226 | return wch; 227 | } 228 | 229 | // --- Output implementation 230 | 231 | void wordWrap() 232 | { 233 | // A "clever" way to do word-wrap without requiring 234 | // dynamic memory allocation is to blit strings 235 | // immediately directly to the screen, then whenever we 236 | // find that we are running out of horizontal space, move 237 | // any half-written word by reading what we wrote back 238 | // from screen. 239 | 240 | // Move the cursor to the next line. This happens regardless. 241 | auto origX = x; 242 | auto origY = y; 243 | newLine(); 244 | 245 | auto space = " "d.ptr.toCChar(attr, color); 246 | if (lastSpaceY == origY) 247 | { 248 | assert(lastSpaceX < origX); 249 | // There is a space at X coordinate `lastSpaceX`. 250 | // Move everything after it to a new line. 251 | foreach (j; lastSpaceX + 1 .. origX) 252 | { 253 | // auto ok = prePut(); 254 | // put(ok ? peek(j, origY) : space); 255 | if (inMask(j, origY)) 256 | { 257 | put(peek(j, origY), 1); 258 | poke(j, origY, space); 259 | } 260 | else 261 | { 262 | // The word that needs to be wrapped is off-screen, so we lost those characters, 263 | // and therefore can't copy them to the potentially-now-visible row. 264 | // But that's OK, because btdu draws overflow markers on the top line of 265 | // scrollable views anyway. 266 | put("•"); 267 | } 268 | } 269 | // The cursor is now after the last character, and we are ready to write more. 270 | return; 271 | } 272 | 273 | // We did not find a blank, so just put a hyphen if we can. 274 | if (origX >= 2 && inMask(origX - 1, origY)) 275 | { 276 | put(peek(origX - 1, origY), 1); 277 | poke(origX - 1, origY, "‐"d.ptr.toCChar(attr, color)); 278 | } 279 | } 280 | 281 | xy_t lastSpaceX = xy_t.min, lastSpaceY = xy_t.min; 282 | 283 | /// We are about to write a single character. 284 | /// Perform any upkeep on the current state to ensure 285 | /// that the next write will go to the resulting (x, y). 286 | /// Return true if the write at the resulting (x, y) will be valid; 287 | /// otherwise, the caller should just advance the cursor and give up. 288 | bool prePut() 289 | out (result; result == inBounds(x, y)) 290 | { 291 | if (x < 0) 292 | return false; 293 | 294 | if (x >= width) 295 | final switch (xOverflow) 296 | { 297 | case XOverflow.never: 298 | assert(x < width, "X overflow"); 299 | return false; 300 | case XOverflow.hidden: 301 | return false; 302 | case XOverflow.chars: 303 | newLine(); 304 | return prePut(); // retry 305 | case XOverflow.words: 306 | case XOverflow.path: 307 | wordWrap(); 308 | return prePut(); // retry 309 | case XOverflow.ellipsis: 310 | if (x == width) // only print the ellipsis this once 311 | { 312 | saveCursor({ 313 | x = width - 1; 314 | put("…"); 315 | }); 316 | } 317 | return false; 318 | } 319 | 320 | if (y < 0 || y >= height) 321 | final switch (yOverflow) 322 | { 323 | case YOverflow.never: 324 | assert(y >= 0 && y < height, "Y overflow"); 325 | return false; 326 | 327 | case YOverflow.hidden: 328 | return false; 329 | } 330 | 331 | return true; 332 | } 333 | 334 | /// Put a raw `cchar_t`, obeying overflow and advancing the cursor. 335 | void put(cchar_t c, int width) 336 | { 337 | bool ok = prePut(); 338 | 339 | auto breakChar = xOverflow == XOverflow.path ? '/' : ' '; 340 | if (c.chars[0] == breakChar && c.chars[1] == 0) 341 | { 342 | lastSpaceX = x; 343 | lastSpaceY = y; 344 | } 345 | 346 | if (inMask(x, y)) 347 | poke(x, y, c); 348 | x += width; 349 | maxX = max(maxX, x); 350 | } 351 | 352 | // --- Text output (low-level) 353 | 354 | alias Sink = typeof(&put); 355 | 356 | void put(const(char)[] str) 357 | { 358 | toCChars(str, &put, attr, color); 359 | } 360 | 361 | @property void delegate(const(char)[] str) sink() return { return &put; } 362 | 363 | void newLine(dchar filler = ' ') 364 | { 365 | // Fill with current background color / attributes 366 | auto fillerCChar = filler.toCChar(attr, color); 367 | auto oldMaxX = maxX; 368 | while (x + x0 < maskX0 || inMask(x, y)) 369 | put(fillerCChar, 1); 370 | maxX = oldMaxX; 371 | x = xMargin; // CR 372 | y++; // LF 373 | } 374 | 375 | public: 376 | 377 | // --- Lifetime 378 | 379 | @disable this(); 380 | @disable this(this); 381 | 382 | this(ref Curses curses) 383 | { 384 | erase(); 385 | x0 = y0 = maskX0 = maskY0 = x = y = xMargin = 0; 386 | x1 = maskX1 = getmaxx(stdscr).to!xy_t; 387 | y1 = maskY1 = getmaxy(stdscr).to!xy_t; 388 | } 389 | 390 | ~this() 391 | { 392 | refresh(); 393 | } 394 | 395 | // --- Geometry 396 | 397 | @property xy_t width() { return x1 - x0; } 398 | @property xy_t height() { return y1 - y0; } 399 | 400 | /// Cursor coordinates used by `put` et al. 401 | /// Relative to `x0` / `y0`. 402 | // ncurses does not allow the cursor to go beyond the window 403 | // geometry, but we need that to detect and handle overflow. 404 | // This is why we maintain our own cursor coordinates, and 405 | // only use ncurses' window cursor coordinates for ncurses 406 | // read/write operations. 407 | xy_t x, y; 408 | 409 | /// `x` will be reset to this value when going to a new line. 410 | // TODO: this is probably redundant with simply opening a window at a negative x. 411 | xy_t xMargin; 412 | 413 | /// High water mark for highest seen X; used by `measure`. 414 | /// Like `x`, relative to `x0`. 415 | xy_t maxX; 416 | 417 | void withWindow(xy_t x0, xy_t y0, xy_t width, xy_t height, scope void delegate() fn) 418 | { 419 | alias vars = AliasSeq!( 420 | this.x, this.y, 421 | this.x0, this.y0, 422 | this.x1, this.y1, 423 | this.xMargin, this.maxX, 424 | this.maskX0, maskY0, 425 | this.maskX1, maskY1, 426 | ); 427 | auto newX0 = this.x0 + x0; 428 | auto newY0 = this.y0 + y0; 429 | auto newX1 = newX0 + width; 430 | auto newY1 = newY0 + height; 431 | auto newMaskX0 = max(this.maskX0, newX0); 432 | auto newMaskY0 = max(this.maskY0, newY0); 433 | auto newMaskX1 = min(this.maskX1, newX0 + width); 434 | auto newMaskY1 = min(this.maskY1, newY0 + height); 435 | alias newVars = AliasSeq!( 436 | 0, 0, 437 | newX0, newY0, 438 | newX1, newY1, 439 | 0, 0, 440 | newMaskX0, newMaskY0, 441 | newMaskX1, newMaskY1, 442 | ); 443 | auto oldVars = vars; 444 | scope(exit) 445 | vars = oldVars; 446 | vars = newVars; 447 | this.lastSpaceX = this.lastSpaceY = xy_t.min; 448 | fn(); 449 | } 450 | 451 | void eraseWindow() { withNCWindow(w => .werase(w).ncenforce()); } 452 | 453 | // --- State 454 | 455 | void at(xy_t x, xy_t y, scope void delegate() fn) 456 | { 457 | saveCursor({ 458 | this.x = x; 459 | this.y = y; 460 | fn(); 461 | }); 462 | } 463 | 464 | void xOverflowHidden (scope void delegate() fn) { withState(xOverflow, XOverflow.hidden , fn); } 465 | void xOverflowChars (scope void delegate() fn) { withState(xOverflow, XOverflow.chars , fn); } 466 | void xOverflowWords (scope void delegate() fn) { withState(xOverflow, XOverflow.words , fn); } 467 | void xOverflowPath (scope void delegate() fn) { withState(xOverflow, XOverflow.path , fn); } 468 | void xOverflowEllipsis(scope void delegate() fn) { withState(xOverflow, XOverflow.ellipsis, fn); } 469 | void yOverflowHidden (scope void delegate() fn) { withState(yOverflow, YOverflow.hidden , fn); } 470 | 471 | enum Attribute : attr_t 472 | { 473 | reverse = A_REVERSE, 474 | bold = A_BOLD, 475 | } 476 | 477 | void attrSet(Attribute attribute, bool set, scope void delegate() fn) 478 | { 479 | withState(attr, set ? attr | attribute : attr & ~attribute, fn); 480 | } 481 | 482 | void attrOn (Attribute attribute, scope void delegate() fn) { attrSet(attribute, true , fn); } 483 | void attrOff(Attribute attribute, scope void delegate() fn) { attrSet(attribute, false, fn); } 484 | void reverse(scope void delegate() fn) { attrOn(Attribute.reverse, fn); } 485 | 486 | // --- Text output (high-level) 487 | 488 | /// Write some stringifiable objects. 489 | void write(Args...)(auto ref Args args) 490 | { 491 | import std.format : formattedWrite; 492 | foreach (ref arg; args) 493 | formattedWrite!"%s"(sink, arg); 494 | } 495 | 496 | /// Special stringifiable object. `write` this to end the current line. 497 | auto endl(dchar filler = ' ') { return functor!((self, filler, sink) { self.newLine(filler); })(&this, filler).stringifiable; } 498 | 499 | /// Special stringifiable objects which temporarily change attributes. 500 | auto withAttr(Args...)(Attribute attribute, bool set, auto ref Args args) 501 | { 502 | auto content = fmtSeq(args); 503 | return functor!((self, attribute, set, ref content, ref sink) { 504 | self.attrSet(attribute, set, { 505 | content.toString(sink); 506 | }); 507 | })(&this, attribute, set, content).stringifiable; 508 | } 509 | auto bold (Args...)(auto ref Args args) { return withAttr(Attribute.bold , true, forward!args); } 510 | auto reversed(Args...)(auto ref Args args) { return withAttr(Attribute.reverse, true, forward!args); } 511 | 512 | /// Get the width (in coordinates) of the given stringifiables. 513 | size_t getTextWidth(Args...)(auto ref Args args) 514 | { 515 | struct Sink 516 | { 517 | size_t count; 518 | void charSink(cchar_t, int width) { count += width; } 519 | void put(const(char)[] str) 520 | { 521 | toCChars(str, &charSink, 0, 0); 522 | } 523 | } 524 | Sink sink; 525 | import std.format : formattedWrite; 526 | foreach (ref arg; args) 527 | formattedWrite!"%s"(&sink, arg); 528 | return sink.count; 529 | } 530 | 531 | /// Measure how much space writes done by `fn` will take, 532 | /// assuming current window size and wrapping mode. 533 | xy_t[2] measure(scope void delegate() fn) 534 | { 535 | xy_t[2] result; 536 | // Start at `height` so that the text is guaranteed to be off-screen 537 | // and thus invisible for the sake of this measurement. 538 | auto oldMaxX = maxX; 539 | scope(success) maxX = oldMaxX; 540 | at(xMargin, height, { 541 | maxX = xMargin; 542 | yOverflowHidden({ 543 | fn(); 544 | }); 545 | assert(y >= height); 546 | if (x != xMargin) y++; 547 | auto localMaxX = maxX - xMargin; 548 | auto localMaxY = y - height; 549 | result = [localMaxX, localMaxY]; 550 | }); 551 | return result; 552 | } 553 | 554 | enum Alignment : byte { left = -1, center = 0, right = 1 } 555 | 556 | /// Tables! 557 | void writeTable( 558 | int columns, int rows, 559 | scope void delegate(int, int) writeCell, 560 | scope Alignment delegate(int, int) getAlignment, 561 | ) 562 | { 563 | auto columnX = this.x; 564 | auto rowY(int row) { return y + row + (row >= 1 ? 1 : 0); } 565 | foreach (column; 0 .. columns) 566 | { 567 | if (column > 0) 568 | { 569 | foreach (row; 0 .. rows) 570 | withWindow(columnX, rowY(row), 3, 1, { write(" │ "); }); 571 | withWindow(columnX, y + 1, 3, 1, { write("─┼─"); }); 572 | columnX += 3; 573 | } 574 | int maxWidth = 0; 575 | foreach (row; 0 .. rows) 576 | withWindow(columnX, rowY(row), xy_t.max / 2, 1, { 577 | auto cellSize = measure({ writeCell(column, row); }); 578 | assert(cellSize[1] <= 1, "Multi-line table cells not supported"); 579 | maxWidth = max(maxWidth, cellSize[0]); 580 | }); 581 | foreach (row; 0 .. rows) 582 | withWindow(columnX, rowY(row), maxWidth, 1, { 583 | final switch (getAlignment(column, row)) 584 | { 585 | case Alignment.left: 586 | writeCell(column, row); 587 | break; 588 | case Alignment.center: 589 | auto cellSize = measure({ writeCell(column, row); }); 590 | x += (width - cellSize[0]) / 2; 591 | writeCell(column, row); 592 | break; 593 | case Alignment.right: 594 | auto cellSize = measure({ writeCell(column, row); }); 595 | x += width - cellSize[0]; 596 | writeCell(column, row); 597 | assert(x == width); 598 | break; 599 | } 600 | }); 601 | withWindow(columnX, y + 1, maxWidth, 1, { write(endl('─')); }); 602 | columnX += maxWidth; 603 | } 604 | maxX = max(maxX, columnX); 605 | y += rows + 1; 606 | } 607 | 608 | void middleTruncate(scope void delegate() fn) 609 | { 610 | // This should be used in a fresh window, and will span the entire width 611 | assert(x == 0 && maxX == 0); 612 | xOverflowHidden({ 613 | fn(); 614 | assert(maxX >= x); 615 | auto totalWidth = maxX; 616 | if (totalWidth > width) 617 | { 618 | auto ellipsis = /*width >= 9 ? "..."d : */"…"d; 619 | auto ellipsisWidth = ellipsis.length.to!xy_t; 620 | auto leftWidth = (width - ellipsisWidth) / 2; 621 | auto rightWidth = width - ellipsisWidth - leftWidth; 622 | at(leftWidth, 0, { 623 | write(ellipsis); 624 | }); 625 | withWindow(leftWidth + ellipsisWidth, 0, rightWidth, 1, { 626 | x -= totalWidth - rightWidth; 627 | fn(); 628 | }); 629 | } 630 | }); 631 | } 632 | } 633 | 634 | Wand getWand() { return Wand(this); } 635 | 636 | static struct Key 637 | { 638 | enum : int 639 | { 640 | none = ERR , /// No key - try again 641 | 642 | down = KEY_DOWN , /// down-arrow key 643 | up = KEY_UP , /// up-arrow key 644 | left = KEY_LEFT , /// left-arrow key 645 | right = KEY_RIGHT , /// right-arrow key 646 | home = KEY_HOME , /// home key 647 | // backspace = KEY_BACKSPACE , /// backspace key 648 | // f0 = KEY_F0 , /// Function keys. Space for 64 649 | f1 = KEY_F(1) , 650 | // f2 = KEY_F(2) , 651 | // f3 = KEY_F(3) , 652 | // f4 = KEY_F(4) , 653 | // f5 = KEY_F(5) , 654 | // f6 = KEY_F(6) , 655 | // f7 = KEY_F(7) , 656 | // f8 = KEY_F(8) , 657 | // f9 = KEY_F(9) , 658 | // f10 = KEY_F(10) , 659 | // f11 = KEY_F(11) , 660 | // f12 = KEY_F(12) , 661 | // dl = KEY_DL , /// delete-line key 662 | // il = KEY_IL , /// insert-line key 663 | // dc = KEY_DC , /// delete-character key 664 | // ic = KEY_IC , /// insert-character key 665 | // eic = KEY_EIC , /// sent by rmir or smir in insert mode 666 | // clear = KEY_CLEAR , /// clear-screen or erase key 667 | // eos = KEY_EOS , /// clear-to-end-of-screen key 668 | // eol = KEY_EOL , /// clear-to-end-of-line key 669 | // sf = KEY_SF , /// scroll-forward key 670 | // sr = KEY_SR , /// scroll-backward key 671 | pageDown = KEY_NPAGE , /// next-page key 672 | pageUp = KEY_PPAGE , /// previous-page key 673 | // stab = KEY_STAB , /// set-tab key 674 | // ctab = KEY_CTAB , /// clear-tab key 675 | // catab = KEY_CATAB , /// clear-all-tabs key 676 | // enter = KEY_ENTER , /// enter/send key 677 | // print = KEY_PRINT , /// print key 678 | // ll = KEY_LL , /// lower-left key (home down) 679 | // a1 = KEY_A1 , /// upper left of keypad 680 | // a3 = KEY_A3 , /// upper right of keypad 681 | // b2 = KEY_B2 , /// center of keypad 682 | // c1 = KEY_C1 , /// lower left of keypad 683 | // c3 = KEY_C3 , /// lower right of keypad 684 | // btab = KEY_BTAB , /// back-tab key 685 | // beg = KEY_BEG , /// begin key 686 | // cancel = KEY_CANCEL , /// cancel key 687 | // close = KEY_CLOSE , /// close key 688 | // command = KEY_COMMAND , /// command key 689 | // copy = KEY_COPY , /// copy key 690 | // create = KEY_CREATE , /// create key 691 | end = KEY_END , /// end key 692 | // exit = KEY_EXIT , /// exit key 693 | // find = KEY_FIND , /// find key 694 | // help = KEY_HELP , /// help key 695 | // mark = KEY_MARK , /// mark key 696 | // message = KEY_MESSAGE , /// message key 697 | // move = KEY_MOVE , /// move key 698 | // next = KEY_NEXT , /// next key 699 | // open = KEY_OPEN , /// open key 700 | // options = KEY_OPTIONS , /// options key 701 | // previous = KEY_PREVIOUS , /// previous key 702 | // redo = KEY_REDO , /// redo key 703 | // reference = KEY_REFERENCE , /// reference key 704 | // refresh = KEY_REFRESH , /// refresh key 705 | // replace = KEY_REPLACE , /// replace key 706 | // restart = KEY_RESTART , /// restart key 707 | // resume = KEY_RESUME , /// resume key 708 | // save = KEY_SAVE , /// save key 709 | // sbeg = KEY_SBEG , /// shifted begin key 710 | // scancel = KEY_SCANCEL , /// shifted cancel key 711 | // scommand = KEY_SCOMMAND , /// shifted command key 712 | // scopy = KEY_SCOPY , /// shifted copy key 713 | // screate = KEY_SCREATE , /// shifted create key 714 | // sdc = KEY_SDC , /// shifted delete-character key 715 | // sdl = KEY_SDL , /// shifted delete-line key 716 | // select = KEY_SELECT , /// select key 717 | // send = KEY_SEND , /// shifted end key 718 | // seol = KEY_SEOL , /// shifted clear-to-end-of-line key 719 | // sexit = KEY_SEXIT , /// shifted exit key 720 | // sfind = KEY_SFIND , /// shifted find key 721 | // shelp = KEY_SHELP , /// shifted help key 722 | // shome = KEY_SHOME , /// shifted home key 723 | // sic = KEY_SIC , /// shifted insert-character key 724 | // sleft = KEY_SLEFT , /// shifted left-arrow key 725 | // smessage = KEY_SMESSAGE , /// shifted message key 726 | // smove = KEY_SMOVE , /// shifted move key 727 | // snext = KEY_SNEXT , /// shifted next key 728 | // soptions = KEY_SOPTIONS , /// shifted options key 729 | // sprevious = KEY_SPREVIOUS , /// shifted previous key 730 | // sprint = KEY_SPRINT , /// shifted print key 731 | // sredo = KEY_SREDO , /// shifted redo key 732 | // sreplace = KEY_SREPLACE , /// shifted replace key 733 | // sright = KEY_SRIGHT , /// shifted right-arrow key 734 | // srsume = KEY_SRSUME , /// shifted resume key 735 | // ssave = KEY_SSAVE , /// shifted save key 736 | // ssuspend = KEY_SSUSPEND , /// shifted suspend key 737 | // sundo = KEY_SUNDO , /// shifted undo key 738 | // suspend = KEY_SUSPEND , /// suspend key 739 | // undo = KEY_UNDO , /// undo key 740 | // mouse = KEY_MOUSE , /// Mouse event has occurred 741 | } 742 | 743 | typeof(none) key; alias key this; 744 | this(typeof(none) key) { this.key = key; } 745 | } 746 | 747 | Key readKey() { return Key(getch()); } 748 | 749 | void suspend(scope void delegate(File input, File output) fn) 750 | { 751 | def_prog_mode(); 752 | endwin(); 753 | stdinSocket.blocking = true; 754 | scope (exit) 755 | { 756 | stdinSocket.blocking = false; 757 | reset_prog_mode(); 758 | refresh(); 759 | } 760 | fn(inputFile, outputFile); 761 | } 762 | 763 | void beep() 764 | { 765 | .beep(); 766 | } 767 | 768 | private: 769 | int ttyFD = -1; 770 | File inputFile, outputFile; 771 | } 772 | 773 | private: 774 | 775 | // TODO: upstream into Druntime 776 | extern (C) int wcwidth(wchar_t c); 777 | 778 | void ncenforce(int value, string message = "ncurses call failed") 779 | { 780 | enforce(value == OK, message); 781 | } 782 | 783 | /// Convert nul-terminated string of wchar_t `c` to `cchar_t`, using `setcchar`. 784 | cchar_t toCChar(const(wchar_t)* c, uint attr, NCURSES_PAIRS_T color = 0) 785 | { 786 | import std.utf : replacementDchar; 787 | static immutable wchar_t[2] fallback = [replacementDchar, 0]; 788 | cchar_t cchar; 789 | if (setcchar(&cchar, c, attr, color, null) != OK) 790 | enforce(setcchar(&cchar, fallback.ptr, attr, color, null) == OK, "Can't encode replacement character"); 791 | return cchar; 792 | } 793 | 794 | /// Convert a single (spacing) character to `cchar_t`. 795 | cchar_t toCChar(dchar c, uint attr, NCURSES_PAIRS_T color = 0) 796 | { 797 | wchar_t[2] wchars = [c, 0]; 798 | return toCChar(wchars.ptr, attr, color); 799 | } 800 | 801 | /// Decode UTF-8 string `str`, passing resulting `cchar_t`s to the provided sink. 802 | void toCChars(const(char)[] str, scope void delegate(cchar_t, int) sink, uint attr, NCURSES_PAIRS_T color = 0) 803 | { 804 | import std.utf : byDchar; 805 | auto dchars = str.byDchar(); // This will also replace bad UTF-8 with replacementDchar. 806 | while (!dchars.empty) 807 | { 808 | // Discard leading nonspacing characters. ncurses cannot accept them anyway. 809 | while (!dchars.empty && wcwidth(dchars.front) == 0) 810 | dchars.popFront(); 811 | // Copy one spacing and up to CCHARW_MAX-1 nonspacing characters 812 | if (dchars.empty) 813 | break; 814 | auto width = wcwidth(dchars.front); 815 | assert(width > 0); 816 | wchar_t[CCHARW_MAX + /*nul-terminator*/ 1] wchars; 817 | size_t i = 0; 818 | wchars[i++] = dchars.front; 819 | dchars.popFront(); 820 | while (i < CCHARW_MAX && !dchars.empty && wcwidth(dchars.front) == 0) 821 | { 822 | wchars[i++] = dchars.front; 823 | dchars.popFront(); 824 | } 825 | wchars[i] = 0; 826 | sink(toCChar(wchars.ptr, attr, color), width); 827 | } 828 | } 829 | -------------------------------------------------------------------------------- /source/btdu/paths.d: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2020, 2021, 2022, 2023, 2024, 2025 Vladimir Panteleev 3 | * 4 | * This program is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU General Public 6 | * License v2 as published by the Free Software Foundation. 7 | * 8 | * This program is distributed in the hope that it will be useful, 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 | * General Public License for more details. 12 | * 13 | * You should have received a copy of the GNU General Public 14 | * License along with this program; if not, write to the 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 | * Boston, MA 021110-1307, USA. 17 | */ 18 | 19 | /// Path manipulation and storage 20 | module btdu.paths; 21 | 22 | import std.algorithm.comparison; 23 | import std.algorithm.iteration; 24 | import std.algorithm.mutation; 25 | import std.algorithm.searching; 26 | import std.array : array; 27 | import std.bitmanip; 28 | import std.exception : enforce; 29 | import std.experimental.allocator : makeArray, make; 30 | import std.range; 31 | import std.range.primitives; 32 | import std.string; 33 | import std.traits : Unqual, EnumMembers; 34 | import std.typecons : Nullable, nullable; 35 | 36 | import containers.hashmap; 37 | import containers.internal.hash : generateHash; 38 | 39 | import ae.utils.appender; 40 | import ae.utils.json : JSONName, JSONOptional, JSONFragment; 41 | import ae.utils.meta; 42 | import ae.utils.path.glob; 43 | 44 | import btdu.alloc; 45 | 46 | public import btdu.proto : Offset; 47 | 48 | alias PathPattern = CompiledGlob!char[]; 49 | 50 | struct PathRule 51 | { 52 | enum Type 53 | { 54 | prefer, 55 | ignore, 56 | } 57 | 58 | Type type; 59 | PathPattern pattern; 60 | } 61 | 62 | private static doubleGlob = compileGlob("**"); 63 | 64 | /// Check if a PathPattern is literal (no wildcards except the trailing **) 65 | bool isLiteral(PathPattern pattern) @nogc 66 | { 67 | // PathPattern has doubleGlob at the beginning (after reverse), so check all but the first 68 | if (pattern.length <= 1) 69 | return true; 70 | foreach (glob; pattern[1 .. $]) 71 | if (!glob.isLiteral()) 72 | return false; 73 | return true; 74 | } 75 | 76 | PathPattern parsePathPattern(string p, string fsPath) 77 | { 78 | import std.path : buildNormalizedPath, absolutePath, pathSplitter; 79 | 80 | // Normalize both paths for comparison 81 | auto normalizedPattern = p.absolutePath.buildNormalizedPath; 82 | auto normalizedFsPath = fsPath.absolutePath.buildNormalizedPath; 83 | 84 | // Split paths into segments for proper comparison 85 | string[] patternSegments = normalizedPattern.pathSplitter.array; 86 | string[] fsPathSegments = normalizedFsPath.pathSplitter.array; 87 | 88 | enforce(patternSegments.length, "Path pattern cannot be empty"); 89 | enforce(patternSegments.startsWith("/"), "Path pattern must be an absolute path"); 90 | 91 | auto relativePattern = { 92 | bool startsWithFsPath = equal(fsPathSegments, patternSegments.take(fsPathSegments.length)); 93 | 94 | if (startsWithFsPath) 95 | return patternSegments[fsPathSegments.length .. $]; 96 | else 97 | { 98 | import std.stdio : stderr; 99 | stderr.writefln("Warning: --prefer/--ignore path '%s' does not start with '%s', assuming you meant '%s%s'", 100 | normalizedPattern, normalizedFsPath, normalizedFsPath, normalizedPattern); 101 | return patternSegments[1 .. $]; // already relative to fsPath 102 | } 103 | }(); 104 | 105 | auto parts = relativePattern.map!compileGlob.array; 106 | parts ~= doubleGlob; // Implied prefix match 107 | parts.reverse(); // Path nodes are stored as a tree and traversed leaf-to-root 108 | return parts; 109 | } 110 | 111 | /// Ordered prefer/ignore rules. First match wins. 112 | __gshared PathRule[] pathRules; 113 | 114 | /// Represents a group of paths that share the same extent 115 | struct SharingGroup 116 | { 117 | BrowserPath* root; /// The root BrowserPath for all filesystem paths 118 | GlobalPath[] paths; /// All filesystem paths that share this extent 119 | SampleData data; /// Sampling statistics for this extent 120 | ulong[historySize] lastSeen; /// Counter snapshots of the last 3 times we've seen this extent 121 | 122 | /// Additional per-path data - one item per GlobalPath 123 | struct PathData 124 | { 125 | /// Direct pointer to the corresponding BrowserPath 126 | BrowserPath* path; 127 | /// Link to next SharingGroup for a specific BrowserPath 128 | SharingGroup* next; 129 | } 130 | PathData* pathData; /// ditto 131 | size_t representativeIndex; /// Index of the representative path in paths array 132 | 133 | /// Find the index of a path matching the given element range 134 | /// Returns size_t.max if not found 135 | size_t findIndex(R)(R elementRange) const 136 | { 137 | import std.algorithm.comparison : equal; 138 | foreach (i, ref path; this.paths) 139 | { 140 | auto sp = const SamplePath(root, path); 141 | if (equal(elementRange, sp.elementRange)) 142 | return i; 143 | } 144 | return size_t.max; 145 | } 146 | 147 | /// Find the next group pointer for a given element range 148 | /// Returns null if the element range doesn't match any path in this group 149 | inout(SharingGroup)* getNext(R)(R elementRange) inout 150 | { 151 | auto index = findIndex(elementRange); 152 | return index != size_t.max ? this.pathData[index].next : null; 153 | } 154 | 155 | /// Wrapper type for hashing/equality based on root and paths 156 | /// Used as key in HashSet for deduplication 157 | static struct Paths 158 | { 159 | SharingGroup* group; 160 | 161 | bool opEquals(const ref Paths other) const 162 | { 163 | import std.algorithm.comparison : equal; 164 | return group.root is other.group.root 165 | && equal(group.paths, other.group.paths); 166 | } 167 | 168 | static size_t hashOf(const ref Paths key) 169 | { 170 | import containers.internal.hash : generateHash; 171 | // Combine root pointer and paths array hashes 172 | size_t h = cast(size_t)key.group.root; 173 | h ^= generateHash(key.group.paths); 174 | return h; 175 | } 176 | } 177 | } 178 | 179 | /// Common definitions for a deduplicated trie for paths. 180 | mixin template SimplePath() 181 | { 182 | // Size selected empirically 183 | alias NameString = InlineString!23; 184 | 185 | /// Parent directory 186 | typeof(this)* parent; 187 | /// Directory items, if any 188 | typeof(this)* firstChild; 189 | /// Next item in the parent directory, if any 190 | typeof(this)* nextSibling; 191 | /// Base name 192 | /// Names prefixed with a NUL character indicate "special" nodes, 193 | /// which do not correspond to a filesystem path. 194 | immutable NameString name; 195 | 196 | /*private*/ this(typeof(this)* parent, NameString name) 197 | { 198 | this.parent = parent; 199 | this.name = name; 200 | } 201 | 202 | // Returns pointer to pointer to child, or pointer to where it should be added. 203 | private inout(typeof(this)*)* find(in char[] name) inout 204 | { 205 | inout(typeof(this)*)* child; 206 | for (child = &firstChild; *child; child = &(*child).nextSibling) 207 | if ((*child).name[] == name) 208 | break; 209 | return child; 210 | } 211 | 212 | inout(typeof(this)*) opBinaryRight(string op : "in")(in char[] name) inout { return *find(name); } 213 | ref inout(typeof(this)) opIndex(in char[] name) inout { return *(name in this); } 214 | 215 | debug invariant 216 | { 217 | import btdu.state : importing; 218 | if (importing) 219 | return; 220 | if (name) 221 | { 222 | assert(parent !is null, "Named node without parent"); 223 | // assert((*parent)[name.toString()] is &this, "Child/parent mismatch"); 224 | } 225 | else // root 226 | assert(!parent, "Unnamed node with parent"); 227 | } 228 | 229 | /// Append a single path segment to this one. 230 | typeof(this)* appendName(bool existingOnly = false)(in char[] name) 231 | { 232 | assert(name.length, "Empty path segment"); 233 | assert(name.indexOf('/') < 0, "Path segment contains /: " ~ name); 234 | auto ppnext = find(name); 235 | if (auto pnext = *ppnext) 236 | return pnext; 237 | else 238 | static if (existingOnly) 239 | return null; 240 | else 241 | return *ppnext = growAllocator.make!(typeof(this))(&this, NameString(name)); 242 | } 243 | 244 | /// ditto 245 | private typeof(this)* appendName(bool existingOnly = false)(NameString name) 246 | { 247 | auto ppnext = find(name[]); 248 | if (auto pnext = *ppnext) 249 | return pnext; 250 | else 251 | static if (existingOnly) 252 | return null; 253 | else 254 | return *ppnext = growAllocator.make!(typeof(this))(&this, name); 255 | } 256 | 257 | /// Append a normalized relative string path to this one. 258 | typeof(this)* appendPath(bool existingOnly = false)(in char[] path) 259 | { 260 | auto p = path.indexOf('/'); 261 | auto nextName = p < 0 ? path : path[0 .. p]; 262 | auto next = appendName!existingOnly(nextName); 263 | if (p < 0) 264 | return next; 265 | else 266 | return next.appendPath!existingOnly(path[p + 1 .. $]); 267 | } 268 | 269 | /// ditto 270 | typeof(this)* appendPath(bool existingOnly = false)(in SubPath* path) 271 | { 272 | typeof(this)* recurse(typeof(this)* base, in SubPath* path) 273 | { 274 | if (!path.parent) // root 275 | return base; 276 | base = recurse(base, path.parent); 277 | return base.appendName!existingOnly(path.name); 278 | } 279 | 280 | return recurse(&this, path); 281 | } 282 | 283 | /// ditto 284 | typeof(this)* appendPath(bool existingOnly = false)(in GlobalPath* path) 285 | { 286 | typeof(this)* recurse(typeof(this)* base, in GlobalPath* path) 287 | { 288 | if (path.parent) 289 | base = recurse(base, path.parent); 290 | return base.appendPath!existingOnly(path.subPath); 291 | } 292 | 293 | return recurse(&this, path); 294 | } 295 | 296 | /// Perform the reverse operation, returning a parent path, 297 | /// or `null` if `path` is not a suffix of `this`. 298 | typeof(this)* unappendPath(in SubPath* path) 299 | { 300 | typeof(this)* recurse(typeof(this)* base, in SubPath* path) 301 | { 302 | if (!path.parent) // root 303 | return base; 304 | if (!base.parent) 305 | return null; 306 | if (path.name[] != base.name[]) 307 | return null; 308 | return recurse(base.parent, path.parent); 309 | } 310 | 311 | return recurse(&this, path); 312 | } 313 | 314 | /// ditto 315 | typeof(this)* unappendPath(in GlobalPath* path) 316 | { 317 | typeof(this)* recurse(typeof(this)* base, in GlobalPath* path) 318 | { 319 | if (!path) // root 320 | return base; 321 | base = base.unappendPath(path.subPath); 322 | if (!base) 323 | return null; 324 | return recurse(base, path.parent); 325 | } 326 | 327 | return recurse(&this, path); 328 | } 329 | 330 | /// Return an iterator for path fragments. 331 | /// Iterates from inner-most to top level. 332 | auto range() const 333 | { 334 | alias This = typeof(this)*; 335 | static struct Range 336 | { 337 | @nogc: 338 | This p; 339 | bool empty() const { return !p; } 340 | string front() { return p.name[]; } 341 | void popFront() { p = p.parent; } 342 | } 343 | return Range(&this); 344 | } 345 | 346 | /// Return an iterator for path element strings. 347 | /// For SimplePath types, this is the same as range(). 348 | auto elementRange() const { return this.range; } 349 | 350 | void toString(scope void delegate(const(char)[]) sink) const 351 | { 352 | if (parent) 353 | { 354 | parent.toString(sink); 355 | sink("/"); 356 | } 357 | humanName.toString(sink); 358 | } 359 | 360 | auto humanName() const 361 | { 362 | struct HumanName 363 | { 364 | string name; 365 | void toString(scope void delegate(const(char)[]) sink) const 366 | { 367 | if (name.startsWith("\0")) 368 | { 369 | sink("<"); 370 | sink(name[1 .. $]); 371 | sink(">"); 372 | } 373 | else 374 | sink(name); 375 | } 376 | } 377 | return HumanName(name[]); 378 | } 379 | } 380 | 381 | /// Common operations for linked-list-like path structures 382 | mixin template PathCommon() 383 | { 384 | /// Returns the total length of this path chain, 385 | /// including this instance. 386 | private size_t chainLength() const 387 | { 388 | return 1 + (parent ? parent.chainLength() : 0); 389 | } 390 | 391 | /// Returns the common prefix of `paths`. 392 | /// Assumes that if two pointers are different, they point at different paths. 393 | /// Destructively mutates `paths` as scratch space. 394 | static typeof(this)* commonPrefix(typeof(this)*[] paths) 395 | { 396 | // First, calculate the lengths 397 | static StaticAppender!size_t lengths; 398 | lengths.clear(); 399 | foreach (ref path; paths) 400 | lengths.put(path.chainLength); 401 | 402 | // Rewind all paths to the minimal path's length 403 | auto minLength = lengths.peek().reduce!min; 404 | foreach (i, ref path; paths) 405 | while (lengths.peek()[i] > minLength) 406 | { 407 | lengths.peek()[i]--; 408 | path = path.parent; 409 | } 410 | 411 | // Rewind all paths until the tip points at the same thing 412 | while (paths.any!(path => path !is paths[0])) 413 | foreach (ref path; paths) 414 | path = path.parent; 415 | 416 | // All paths now point at the same thing. 417 | return paths[0]; 418 | } 419 | 420 | /// Get the path length in characters 421 | size_t length() const 422 | { 423 | size_t len = 0; 424 | toString((const(char)[] s) { len += s.length; }); 425 | return len; 426 | } 427 | 428 | /// Check if this path matches a pattern (for preferred/ignored paths) 429 | bool matches(PathPattern pattern) const @nogc 430 | { 431 | return pathMatches(this.elementRange, pattern); 432 | } 433 | 434 | /// Check if a path exactly matches a pattern (not just a prefix match via **). 435 | /// Patterns always start with ** followed by path segment compiled globs. 436 | /// Slicing off the ** and matching checks for exact path match. 437 | bool matchesExactly(PathPattern pattern) const @nogc 438 | { 439 | assert(pattern.length > 0 && pattern[0] is doubleGlob); 440 | return pattern.isLiteral() && this.matches(pattern[1 .. $]); 441 | } 442 | 443 | /// Check if this path has resolved roots (no TREE_ markers) 444 | private bool isResolved() const 445 | { 446 | import std.algorithm.searching : canFind, startsWith; 447 | return !this.elementRange.canFind!(n => n.startsWith("\0TREE_")); 448 | } 449 | } 450 | 451 | /// Implements comparison for linked-list-like path structures. 452 | /// Requires `PathCommon` and a `compareContents` definition. 453 | mixin template PathCmp() 454 | { 455 | int opCmp(const ref typeof(this) b) const 456 | { 457 | if (this is b) 458 | return 0; 459 | 460 | // Because the lengths may be uneven, query them first 461 | auto aLength = this.chainLength(); 462 | auto bLength = b .chainLength(); 463 | auto maxLength = max(aLength, bLength); 464 | 465 | // We are starting from the tail end of two 466 | // linked lists with possibly different length 467 | int recurse( 468 | // The tail so far 469 | in typeof(this)*[2] paths, 470 | // How many nodes this side is "shorter" by 471 | size_t[2] rem, 472 | ) 473 | { 474 | if (paths[0] is paths[1]) 475 | return 0; // Also covers the [null, null] case which stops recursion 476 | 477 | // What we will recurse with 478 | const(typeof(this))*[2] recPaths; 479 | size_t[2] recRem; 480 | // What we will compare in this step (if recursion returns 0) 481 | const(typeof(this))*[2] thisPaths; 482 | 483 | foreach (n; 0 .. 2) 484 | { 485 | if (rem[n]) 486 | { 487 | thisPaths[n] = null; 488 | recPaths[n] = paths[n]; 489 | recRem[n] = rem[n] - 1; 490 | } 491 | else 492 | { 493 | thisPaths[n] = paths[n]; 494 | recPaths[n] = paths[n].parent; 495 | recRem[n] = 0; 496 | } 497 | } 498 | 499 | int res = recurse(recPaths, recRem); 500 | if (res) 501 | return res; 502 | 503 | if ((thisPaths[0] is null) != (thisPaths[1] is null)) 504 | return thisPaths[0] is null ? -1 : 1; 505 | return thisPaths[0].compareContents(*thisPaths[1]); 506 | } 507 | return recurse([&this, &b], [ 508 | maxLength - aLength, 509 | maxLength - bLength, 510 | ]); 511 | } 512 | } 513 | 514 | /// Path within a tree (subvolume) 515 | struct SubPath 516 | { 517 | mixin SimplePath; 518 | mixin PathCommon; 519 | mixin PathCmp; 520 | 521 | /// PathCmp implementation 522 | private int compareContents(const ref typeof(this) b) const 523 | { 524 | return cmp(name[], b.name[]); 525 | } 526 | } 527 | 528 | /// Global path (spanning multiple trees) 529 | /// This is to allow efficiently representing paths where the prefix 530 | /// (subvolume path) varies, e.g.: 531 | /// - /@root/usr/lib/libfoo.so.1.0.0 532 | /// - /backups/@root-20200101000000/usr/lib/libfoo.so.1.0.0 533 | /// - /backups/@root-20200102000000/usr/lib/libfoo.so.1.0.0 534 | /// etc. 535 | /// Here we can store /backups/@root-20200102000000 etc. as one 536 | /// SubPath and /usr/lib/libfoo.so.1.0.0 as another, with the 537 | /// GlobalPath representing a concatenation of the two. 538 | struct GlobalPath 539 | { 540 | GlobalPath* parent; /// Parent tree (or null if none) 541 | SubPath* subPath; /// Path within this filesystem 542 | 543 | void toString(scope void delegate(const(char)[]) sink) const 544 | { 545 | if (parent) 546 | parent.toString(sink); 547 | subPath.toString(sink); 548 | } 549 | 550 | size_t length() const 551 | { 552 | size_t length = 0; 553 | toString((const(char)[] s) { length += s.length; }); 554 | return length; 555 | } 556 | 557 | /// PathCmp implementation 558 | private int compareContents(const ref typeof(this) b) const 559 | { 560 | return subPath.opCmp(*b.subPath); 561 | } 562 | 563 | /// Return an iterator for subpaths. 564 | /// Iterates from inner-most to top level. 565 | auto range() const 566 | { 567 | static struct Range 568 | { 569 | @nogc: 570 | const(GlobalPath)* p; 571 | bool empty() const { return !p; } 572 | const(SubPath)* front() { return p.subPath; } 573 | void popFront() { p = p.parent; } 574 | } 575 | return Range(&this); 576 | } 577 | 578 | /// Return an iterator for path element strings (flattened). 579 | /// Iterates from inner-most to top level. 580 | auto elementRange() const 581 | { 582 | import std.algorithm.iteration : map, joiner; 583 | return this.range 584 | .map!(g => g 585 | .range 586 | .filter!(s => s.length) 587 | ) 588 | .joiner; 589 | } 590 | 591 | mixin PathCommon; 592 | mixin PathCmp; 593 | } 594 | 595 | /// Sample path (BrowserPath root + GlobalPath) 596 | /// Combines a BrowserPath prefix (containing special flags like \0DATA) 597 | /// with a GlobalPath (filesystem path) to provide the same path semantics 598 | /// as BrowserPath, but as a non-materialized rvalue. 599 | struct SamplePath 600 | { 601 | BrowserPath* root; /// Root BrowserPath containing special flags 602 | GlobalPath globalPath; /// Filesystem path 603 | 604 | void toString(scope void delegate(const(char)[]) sink) const 605 | { 606 | if (root) 607 | root.toString(sink); 608 | globalPath.toString(sink); 609 | } 610 | 611 | size_t length() const 612 | { 613 | size_t length = 0; 614 | toString((const(char)[] s) { length += s.length; }); 615 | return length; 616 | } 617 | 618 | /// Return an iterator for path element strings (flattened). 619 | /// Matches BrowserPath.elementRange semantics by including special flags from root. 620 | auto elementRange() const 621 | { 622 | return chain(globalPath.elementRange, root.elementRange); 623 | } 624 | 625 | /// Comparison operator 626 | int opCmp(const ref typeof(this) b) const 627 | { 628 | // First compare roots 629 | if (root !is b.root) 630 | { 631 | if (!root) return -1; 632 | if (!b.root) return 1; 633 | auto rootCmp = root.opCmp(*b.root); 634 | if (rootCmp != 0) return rootCmp; 635 | } 636 | // Then compare GlobalPaths 637 | return globalPath.opCmp(b.globalPath); 638 | } 639 | 640 | /// Check if this path matches a pattern (for preferred/ignored paths) 641 | bool matches(PathPattern pattern) const @nogc 642 | { 643 | return pathMatches(this.elementRange, pattern); 644 | } 645 | 646 | /// Check if this path has resolved roots (no TREE_ markers) 647 | private bool isResolved() const 648 | { 649 | import std.algorithm.searching : canFind, startsWith; 650 | return !this.elementRange.canFind!(n => n.startsWith("\0TREE_")); 651 | } 652 | } 653 | 654 | enum SampleType 655 | { 656 | represented, 657 | exclusive, 658 | shared_, 659 | } 660 | 661 | enum Mark : ubyte 662 | { 663 | parent, /// Default state - see parent 664 | marked, /// Positive mark 665 | unmarked, /// Negative mark (cancels out a positive mark in an ancestor) 666 | } 667 | 668 | /// How many of the most recent samples we track 669 | enum historySize = 3; 670 | 671 | /// Aggregated sampling statistics for an extent or path 672 | struct SampleData 673 | { 674 | ulong samples; /// Number of samples 675 | ulong duration; /// Total hnsecs 676 | Offset[historySize] offsets; /// Examples (the last 3 seen) of sample offsets 677 | 678 | /// Add samples to this data 679 | void add(ulong samples, const(Offset)[] offsets, ulong duration) 680 | { 681 | this.samples += samples; 682 | this.duration += duration; 683 | foreach (offset; offsets) 684 | if (offset != Offset.init) 685 | // Add new offsets at the end, pushing existing ones towards 0 686 | foreach (i; 0 .. this.offsets.length) 687 | this.offsets[i] = i + 1 == SampleData.offsets.length 688 | ? offset 689 | : this.offsets[i + 1]; 690 | } 691 | 692 | /// Remove samples from this data 693 | void remove(ulong samples, const(Offset)[] offsets, ulong duration) 694 | { 695 | import std.algorithm.searching : canFind; 696 | assert(samples <= this.samples && duration <= this.duration); 697 | this.samples -= samples; 698 | this.duration -= duration; 699 | foreach (i; 0 .. this.offsets.length) 700 | if (this.offsets[i] != Offset.init && offsets.canFind(this.offsets[i])) 701 | // Delete matching offsets, pushing existing ones from the start towards the end 702 | foreach_reverse (j; 0 .. i + 1) 703 | this.offsets[j] = j == 0 704 | ? Offset.init 705 | : this.offsets[j - 1]; 706 | } 707 | } 708 | 709 | /// Aggregate sampling data for a BrowserPath 710 | struct AggregateData 711 | { 712 | SampleData[enumLength!SampleType] data; 713 | double distributedSamples = 0; 714 | double distributedDuration = 0; 715 | } 716 | 717 | /// Browser path (GUI hierarchy) 718 | struct BrowserPath 719 | { 720 | mixin SimplePath; 721 | mixin PathCommon; 722 | 723 | mixin PathCmp; 724 | 725 | /// PathCmp implementation 726 | private int compareContents(const ref typeof(this) b) const 727 | { 728 | return cmp(name[], b.name[]); 729 | } 730 | 731 | private AggregateData* aggregateData; 732 | private bool deleting; 733 | 734 | /// Ensure aggregateData is allocated, allocating if needed. 735 | /// When first allocated, migrates dynamically-computed values into it. 736 | private AggregateData* ensureAggregateData() 737 | { 738 | if (!aggregateData) 739 | { 740 | // Capture current dynamically-computed values before allocation 741 | SampleData[enumLength!SampleType] currentData; 742 | static foreach (type; EnumMembers!SampleType) 743 | { 744 | currentData[type].samples = getSamples(type); 745 | currentData[type].duration = getDuration(type); 746 | currentData[type].offsets = getOffsets(type); 747 | } 748 | auto currentDistributedSamples = getDistributedSamples(); 749 | auto currentDistributedDuration = getDistributedDuration(); 750 | 751 | // Allocate and initialize with captured values 752 | aggregateData = growAllocator.make!AggregateData(); 753 | aggregateData.data = currentData; 754 | aggregateData.distributedSamples = currentDistributedSamples; 755 | aggregateData.distributedDuration = currentDistributedDuration; 756 | } 757 | return aggregateData; 758 | } 759 | 760 | /// Check if a sharing group is relevant for a given sample type 761 | private bool groupIsRelevant(const(SharingGroup)* group, SampleType type) const 762 | { 763 | final switch (type) 764 | { 765 | case SampleType.shared_: 766 | // All samples that touch this path 767 | return true; 768 | case SampleType.represented: 769 | // Samples where this path is the representative 770 | auto ourIndex = group.findIndex(this.elementRange); 771 | return ourIndex == group.representativeIndex; 772 | case SampleType.exclusive: 773 | // Samples exclusive to this path (only path in group) 774 | return group.paths.length == 1; 775 | } 776 | } 777 | 778 | /// Get the number of samples for a given sample type 779 | ulong getSamples(SampleType type) const 780 | { 781 | if (aggregateData) 782 | return aggregateData.data[type].samples; 783 | 784 | // Fallback: delegate to single child 785 | if (firstChild && !firstChild.nextSibling) 786 | return firstChild.getSamples(type); 787 | 788 | // Fallback: compute from sharing groups for leaf nodes 789 | if (!firstSharingGroup) 790 | return 0; 791 | 792 | ulong sum = 0; 793 | for (const(SharingGroup)* group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 794 | if (groupIsRelevant(group, type)) 795 | sum += group.data.samples; 796 | return sum; 797 | } 798 | 799 | /// Get the duration for a given sample type 800 | ulong getDuration(SampleType type) const 801 | { 802 | if (aggregateData) 803 | return aggregateData.data[type].duration; 804 | 805 | // Fallback: delegate to single child 806 | if (firstChild && !firstChild.nextSibling) 807 | return firstChild.getDuration(type); 808 | 809 | // Fallback: compute from sharing groups for leaf nodes 810 | if (!firstSharingGroup) 811 | return 0; 812 | 813 | ulong sum = 0; 814 | for (const(SharingGroup)* group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 815 | if (groupIsRelevant(group, type)) 816 | sum += group.data.duration; 817 | return sum; 818 | } 819 | 820 | /// Get the offsets for a given sample type 821 | const(Offset[historySize]) getOffsets(SampleType type) const 822 | { 823 | if (aggregateData) 824 | return aggregateData.data[type].offsets; 825 | 826 | // Fallback: delegate to single child 827 | if (firstChild && !firstChild.nextSibling) 828 | return firstChild.getOffsets(type); 829 | 830 | // Fallback: collect most recent offsets from sharing groups for leaf nodes 831 | static immutable Offset[historySize] emptyOffsets; 832 | if (!firstSharingGroup) 833 | return emptyOffsets; 834 | 835 | // Keep track of the most recent offsets (sorted by lastSeen ascending) 836 | Offset[historySize] result; 837 | ulong[historySize] resultLastSeen; 838 | 839 | for (const(SharingGroup)* group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 840 | { 841 | if (groupIsRelevant(group, type)) 842 | { 843 | foreach (i; 0 .. historySize) 844 | { 845 | if (group.data.offsets[i] == Offset.init) 846 | continue; 847 | 848 | auto lastSeen = group.lastSeen[i]; 849 | // Check if this is more recent than our oldest (index 0) 850 | if (lastSeen > resultLastSeen[0]) 851 | { 852 | // Find insertion point (keep sorted ascending by lastSeen) 853 | size_t insertAt = 0; 854 | foreach (j; 1 .. historySize) 855 | if (lastSeen > resultLastSeen[j]) 856 | insertAt = j; 857 | 858 | // Shift older entries down 859 | foreach_reverse (j; 0 .. insertAt) 860 | { 861 | result[j] = result[j + 1]; 862 | resultLastSeen[j] = resultLastSeen[j + 1]; 863 | } 864 | 865 | // Insert new entry 866 | result[insertAt] = group.data.offsets[i]; 867 | resultLastSeen[insertAt] = lastSeen; 868 | } 869 | } 870 | } 871 | } 872 | 873 | return result; 874 | } 875 | 876 | /// Get the distributed samples 877 | double getDistributedSamples() const 878 | { 879 | if (aggregateData) 880 | return aggregateData.distributedSamples; 881 | 882 | // Fallback: delegate to single child 883 | if (firstChild && !firstChild.nextSibling) 884 | return firstChild.getDistributedSamples(); 885 | 886 | // Fallback: compute from sharing groups for leaf nodes 887 | if (!firstSharingGroup) 888 | return 0; 889 | 890 | double sum = 0; 891 | for (const(SharingGroup)* group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 892 | sum += cast(double) group.data.samples / group.paths.length; 893 | return sum; 894 | } 895 | 896 | /// Get the distributed duration 897 | double getDistributedDuration() const 898 | { 899 | if (aggregateData) 900 | return aggregateData.distributedDuration; 901 | 902 | // Fallback: delegate to single child 903 | if (firstChild && !firstChild.nextSibling) 904 | return firstChild.getDistributedDuration(); 905 | 906 | // Fallback: compute from sharing groups for leaf nodes 907 | if (!firstSharingGroup) 908 | return 0; 909 | 910 | double sum = 0; 911 | for (const(SharingGroup)* group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 912 | sum += cast(double) group.data.duration / group.paths.length; 913 | return sum; 914 | } 915 | 916 | /// Reset distributed samples and duration 917 | void resetDistributedSamples() 918 | { 919 | if (aggregateData) 920 | { 921 | aggregateData.distributedSamples = 0; 922 | aggregateData.distributedDuration = 0; 923 | } 924 | } 925 | 926 | void addSamples(SampleType type, ulong samples, const(Offset)[] offsets, ulong duration) 927 | { 928 | // Only allocate aggregateData for nodes with multiple children; 929 | // leaves compute from sharing groups, single-child nodes delegate 930 | if (!firstSharingGroup && firstChild && firstChild.nextSibling) 931 | ensureAggregateData().data[type].add(samples, offsets, duration); 932 | if (parent) 933 | parent.addSamples(type, samples, offsets, duration); 934 | } 935 | 936 | void removeSamples(SampleType type, ulong samples, const(Offset)[] offsets, ulong duration) 937 | { 938 | if (aggregateData) 939 | aggregateData.data[type].remove(samples, offsets, duration); 940 | if (parent) 941 | parent.removeSamples(type, samples, offsets, duration); 942 | } 943 | 944 | void addDistributedSample(double sampleShare, double durationShare) 945 | { 946 | // Only allocate aggregateData for nodes with multiple children; 947 | // leaves compute from sharing groups, single-child nodes delegate 948 | if (!firstSharingGroup && firstChild && firstChild.nextSibling) 949 | { 950 | auto data = ensureAggregateData(); 951 | data.distributedSamples += sampleShare; 952 | data.distributedDuration += durationShare; 953 | } 954 | if (parent) 955 | parent.addDistributedSample(sampleShare, durationShare); 956 | } 957 | 958 | void removeDistributedSample(double sampleShare, double durationShare) 959 | { 960 | addDistributedSample(-sampleShare, -durationShare); 961 | } 962 | 963 | /// Linked list head pointing to the first sharing group containing this path 964 | /// Each group represents one extent/sample where multiple paths share data 965 | SharingGroup* firstSharingGroup; 966 | 967 | struct SeenAs 968 | { 969 | size_t[GlobalPath] paths; 970 | size_t total; 971 | } 972 | 973 | /// Collect seenAs data from all sharing groups 974 | /// Returns a map of path string -> sample count 975 | SeenAs collectSeenAs() 976 | { 977 | import std.conv : to; 978 | SeenAs result; 979 | 980 | // Traverse the linked list of sharing groups 981 | // Each group represents one extent where multiple paths share data 982 | for (auto group = firstSharingGroup; group !is null; group = group.getNext(this.elementRange)) 983 | { 984 | // Add all paths in this group to the result 985 | foreach (ref path; group.paths) 986 | result.paths[path] += group.data.samples; 987 | result.total += group.data.samples; 988 | } 989 | 990 | return result; 991 | } 992 | 993 | /// Serialized representation 994 | struct SerializedForm 995 | { 996 | string name; 997 | 998 | struct SerializedData 999 | { 1000 | // Same order as SampleType 1001 | @JSONOptional SampleData represented; 1002 | @JSONOptional SampleData exclusive; 1003 | @JSONName("shared") 1004 | @JSONOptional SampleData shared_; 1005 | @JSONOptional JSONFragment distributedSamples = JSONFragment("0"); 1006 | @JSONOptional JSONFragment distributedDuration = JSONFragment("0"); 1007 | } 1008 | SerializedData data; 1009 | @JSONOptional Nullable!bool mark; 1010 | @JSONOptional ulong[string] seenAs; // Map: path -> sample count 1011 | 1012 | BrowserPath*[] children; 1013 | } 1014 | 1015 | SerializedForm toJSON() 1016 | { 1017 | import std.conv : to; 1018 | import btdu.state : exportSeenAs; 1019 | 1020 | SerializedForm s; 1021 | s.name = this.name[]; 1022 | for (auto p = firstChild; p; p = p.nextSibling) 1023 | s.children ~= p; 1024 | static foreach (sampleType; EnumMembers!SampleType) 1025 | { 1026 | s.data.tupleof[sampleType].samples = getSamples(sampleType); 1027 | s.data.tupleof[sampleType].duration = getDuration(sampleType); 1028 | s.data.tupleof[sampleType].offsets = getOffsets(sampleType); 1029 | } 1030 | if (getDistributedSamples() !is 0.) 1031 | s.data.distributedSamples.json = getDistributedSamples().format!"%17e"; 1032 | if (getDistributedDuration() !is 0.) 1033 | s.data.distributedDuration.json = getDistributedDuration().format!"%17e"; 1034 | s.mark = 1035 | this.mark == Mark.parent ? Nullable!bool.init : 1036 | this.mark == Mark.marked ? true.nullable : 1037 | false.nullable; 1038 | if (exportSeenAs) 1039 | foreach (path, samples; this.collectSeenAs().paths) 1040 | s.seenAs[path.to!string] = samples; 1041 | return s; 1042 | } 1043 | 1044 | static BrowserPath fromJSON(ref SerializedForm s) 1045 | { 1046 | import std.conv : to; 1047 | 1048 | auto p = BrowserPath(null, NameString(s.name)); 1049 | foreach_reverse (child; s.children) 1050 | { 1051 | child.nextSibling = p.firstChild; 1052 | p.firstChild = child; 1053 | } 1054 | auto aggData = p.ensureAggregateData(); 1055 | static foreach (sampleType; EnumMembers!SampleType) 1056 | aggData.data[sampleType] = s.data.tupleof[sampleType]; 1057 | aggData.distributedSamples = s.data.distributedSamples.json.strip.to!double; 1058 | aggData.distributedDuration = s.data.distributedDuration.json.strip.to!double; 1059 | p.mark = 1060 | s.mark.isNull() ? Mark.parent : 1061 | s.mark.get() ? Mark.marked : 1062 | Mark.unmarked; 1063 | return p; 1064 | } 1065 | 1066 | void resetParents() 1067 | { 1068 | for (auto p = firstChild; p; p = p.nextSibling) 1069 | { 1070 | p.parent = &this; 1071 | p.resetParents(); 1072 | } 1073 | } 1074 | 1075 | /// Approximate the effect of deleting the filesystem object represented by the path. 1076 | void remove(bool obeyMarks) 1077 | { 1078 | if (deleting) 1079 | return; // already deleted 1080 | 1081 | assert(parent); 1082 | 1083 | // Mark this subtree for deletion, to aid the rebalancing below. 1084 | markForDeletion(obeyMarks); 1085 | 1086 | // Delete the subtree recursively. 1087 | doDelete(); 1088 | } 1089 | 1090 | // Mark this subtree for deletion, to aid the rebalancing below. 1091 | private bool markForDeletion(bool obeyMarks) 1092 | { 1093 | if (obeyMarks && mark == Mark.unmarked) 1094 | return false; 1095 | deleting = true; 1096 | for (auto p = firstChild; p; p = p.nextSibling) 1097 | if (!p.markForDeletion(obeyMarks)) 1098 | deleting = false; 1099 | return deleting; 1100 | } 1101 | 1102 | private void doDelete() 1103 | { 1104 | // Evict children first 1105 | for (auto p = firstChild; p; p = p.nextSibling) 1106 | p.doDelete(); 1107 | 1108 | if (!deleting) 1109 | return; 1110 | 1111 | // Rebalance the hierarchy's statistics by updating and moving sample data as needed. 1112 | evict(); 1113 | 1114 | // Unlink this node, removing it from the tree. 1115 | { 1116 | auto pp = parent.find(this.name[]); 1117 | assert(*pp == &this); 1118 | *pp = this.nextSibling; 1119 | } 1120 | } 1121 | 1122 | /// Clear all samples or move them elsewhere. 1123 | private void evict() 1124 | { 1125 | assert(parent); 1126 | 1127 | // Save this node's remaining stats before we remove them. 1128 | auto aggData = aggregateData ? *aggregateData : AggregateData.init; 1129 | 1130 | // Remove sample data from this node and its parents. 1131 | // After recursion, for non-leaf nodes, most of these should now be at zero (as far as we can estimate). 1132 | static foreach (sampleType; EnumMembers!SampleType) 1133 | if (aggData.data[sampleType].samples) // avoid quadratic complexity 1134 | removeSamples(sampleType, aggData.data[sampleType].samples, aggData.data[sampleType].offsets[], aggData.data[sampleType].duration); 1135 | if (aggData.distributedSamples) // avoid quadratic complexity 1136 | removeDistributedSample(aggData.distributedSamples, aggData.distributedDuration); 1137 | 1138 | if (firstSharingGroup is null) 1139 | return; // Directory (non-leaf) node - nothing else to do here. 1140 | 1141 | // Determine if we are the representative path (have represented samples) 1142 | // in at least one situation 1143 | bool isRepresentative = aggData.data[SampleType.represented].samples > 0; 1144 | 1145 | // Get the root BrowserPath from one of our sharing groups 1146 | // (This is always the same as `btdu.state.browserRoot`.) 1147 | BrowserPath* root = firstSharingGroup.root; 1148 | debug assert(root); 1149 | if (!root) 1150 | return; 1151 | 1152 | // Process each sharing group separately 1153 | for (auto group = firstSharingGroup; group !is null; ) 1154 | { 1155 | // Find our index in this group 1156 | auto ourIndex = group.findIndex(this.elementRange); 1157 | assert(ourIndex != size_t.max, "Could not find self in sharing group"); 1158 | if (ourIndex == size_t.max) 1159 | break; 1160 | 1161 | // Collect remaining (non-deleted) paths in this group 1162 | SamplePath[] remainingPathsInGroup; 1163 | foreach (i, ref path; group.paths) 1164 | { 1165 | if (i != ourIndex) 1166 | { 1167 | auto bp = root.appendPath!true(&path); 1168 | if (bp && !bp.deleting) 1169 | remainingPathsInGroup ~= SamplePath(group.root, path); 1170 | } 1171 | } 1172 | 1173 | // Check if we are the representative for this specific group 1174 | bool isRepresentativeForThisGroup = { 1175 | if (!isRepresentative) 1176 | return false; // We have never been representative. 1177 | 1178 | // Check if we would be selected as representative from this group's paths 1179 | auto groupRepresentative = selectRepresentativePath(group.paths); 1180 | import std.algorithm.comparison : equal; 1181 | return equal(this.elementRange, groupRepresentative.elementRange); 1182 | }(); 1183 | 1184 | // Handle all redistributions for this group 1185 | if (remainingPathsInGroup.length > 0) 1186 | { 1187 | // Select the most representative path from this group's remaining members 1188 | auto newRepresentative = selectRepresentativePath(remainingPathsInGroup); 1189 | auto newRepBrowserPath = root.appendPath(&newRepresentative.globalPath); 1190 | 1191 | // Represented samples: if we're representative for this group, transfer to new representative 1192 | if (isRepresentativeForThisGroup) 1193 | { 1194 | // Calculate this group's weighted share of duration from represented samples 1195 | auto groupDuration = (group.data.samples * aggData.data[SampleType.represented].duration) / aggData.data[SampleType.represented].samples; 1196 | 1197 | // Transfer represented samples (without per-group offsets) 1198 | newRepBrowserPath.addSamples( 1199 | SampleType.represented, 1200 | group.data.samples, 1201 | [], // Skip offsets - we don't have them per-group 1202 | groupDuration, 1203 | ); 1204 | } 1205 | 1206 | // Distributed samples: redistribute our share in this group 1207 | // Our share in this group is: group.data.samples / group.paths.length 1208 | // We distribute this among the remaining members 1209 | auto ourShareSamples = group.data.samples / group.paths.length; 1210 | auto perPathSamples = ourShareSamples / remainingPathsInGroup.length; 1211 | 1212 | // Calculate duration using shared samples as basis (sum of all group.data.samples = shared samples) 1213 | auto sharedSamples = aggData.data[SampleType.shared_].samples; 1214 | auto sharedDuration = aggData.data[SampleType.shared_].duration; 1215 | auto groupTotalDuration = sharedSamples > 0 1216 | ? (group.data.samples * sharedDuration) / sharedSamples 1217 | : 0; 1218 | auto ourShareDuration = groupTotalDuration / group.paths.length; 1219 | auto perPathDuration = ourShareDuration / remainingPathsInGroup.length; 1220 | 1221 | foreach (ref path; remainingPathsInGroup) 1222 | root.appendPath(&path.globalPath).addDistributedSample(perPathSamples, perPathDuration); 1223 | 1224 | // Exclusive samples: if group drops to 1 member, that member becomes exclusive 1225 | if (remainingPathsInGroup.length == 1) 1226 | { 1227 | // Calculate this group's weighted share of duration from shared samples 1228 | auto groupDuration = sharedSamples > 0 1229 | ? (group.data.samples * sharedDuration) / sharedSamples 1230 | : 0; 1231 | 1232 | // Add exclusive samples (without per-group offsets) 1233 | newRepBrowserPath.addSamples( 1234 | SampleType.exclusive, 1235 | group.data.samples, 1236 | [], // Skip offsets - we don't have them per-group 1237 | groupDuration, 1238 | ); 1239 | } 1240 | } 1241 | 1242 | // Shared samples: no action needed (correct!) 1243 | 1244 | // Move to next group following our chain 1245 | group = group.pathData[ourIndex].next; 1246 | } 1247 | } 1248 | 1249 | /// Reset samples for a specific sample type on this node only 1250 | void resetNodeSamples(SampleType type) 1251 | { 1252 | if (aggregateData) 1253 | aggregateData.data[type] = SampleData.init; 1254 | } 1255 | 1256 | /// Reset all sample data on this node only 1257 | void resetNodeSamples() 1258 | { 1259 | static foreach (sampleType; EnumMembers!SampleType) 1260 | resetNodeSamples(sampleType); 1261 | resetDistributedSamples(); 1262 | } 1263 | 1264 | /// Recursively reset all sample data for this path and its children 1265 | void resetTreeSamples() 1266 | { 1267 | // Recursively reset all children first (depth-first traversal) 1268 | for (auto child = firstChild; child; child = child.nextSibling) 1269 | child.resetTreeSamples(); 1270 | 1271 | // Reset this node 1272 | resetNodeSamples(); 1273 | } 1274 | 1275 | /// Recursively clear all sharing group links for this path and its children 1276 | private void clearSharingGroupLinks() 1277 | { 1278 | firstSharingGroup = null; 1279 | for (auto child = firstChild; child; child = child.nextSibling) 1280 | child.clearSharingGroupLinks(); 1281 | } 1282 | 1283 | /// Reset this path tree for rebuild: clears all sample data and sharing group links 1284 | void reset() 1285 | { 1286 | resetTreeSamples(); 1287 | clearSharingGroupLinks(); 1288 | } 1289 | 1290 | @property bool deleted() { return deleting; } 1291 | 1292 | // Marks 1293 | 1294 | mixin(bitfields!( 1295 | Mark , q{mark} , 2, 1296 | ubyte, null , 5, 1297 | bool , q{childrenHaveMark}, 1, 1298 | )); 1299 | 1300 | /// Returns the mark as it is inherited from the parent, if any. 1301 | private bool getParentMark() 1302 | { 1303 | return parent ? parent.getEffectiveMark() : false; 1304 | } 1305 | 1306 | /// Returns true for marked, false for unmarked. 1307 | bool getEffectiveMark() 1308 | { 1309 | final switch (mark) 1310 | { 1311 | case Mark.parent: 1312 | return getParentMark(); 1313 | case Mark.marked: 1314 | return true; 1315 | case Mark.unmarked: 1316 | return false; 1317 | } 1318 | } 1319 | 1320 | private void clearMark() 1321 | { 1322 | mark = Mark.parent; 1323 | if (childrenHaveMark) 1324 | { 1325 | for (auto p = firstChild; p; p = p.nextSibling) 1326 | p.clearMark(); 1327 | childrenHaveMark = false; 1328 | } 1329 | } 1330 | 1331 | private void setMarkWithoutChildren(bool marked) 1332 | { 1333 | if (getParentMark() == marked) 1334 | { 1335 | mark = Mark.parent; 1336 | return; 1337 | } 1338 | mark = marked ? Mark.marked : Mark.unmarked; 1339 | for (auto p = parent; p && !p.childrenHaveMark; p = p.parent) 1340 | p.childrenHaveMark = true; 1341 | } 1342 | 1343 | void setMark(bool marked) 1344 | { 1345 | clearMark(); 1346 | assert(mark == Mark.parent); 1347 | setMarkWithoutChildren(marked); 1348 | } 1349 | 1350 | /// Flips all marks, including this node. 1351 | void invertMarks() 1352 | { 1353 | setMarkWithoutChildren(!getEffectiveMark()); 1354 | invertChildMarks(); 1355 | } 1356 | 1357 | private void invertChildMarks() 1358 | { 1359 | for (auto p = firstChild; p; p = p.nextSibling) 1360 | { 1361 | if (p.mark) 1362 | p.mark = p.mark == Mark.marked ? Mark.unmarked : Mark.marked; 1363 | if (p.childrenHaveMark) 1364 | p.invertChildMarks(); 1365 | } 1366 | } 1367 | 1368 | void enumerateMarks(scope void delegate(BrowserPath*, bool marked, scope void delegate() recurse) callback) 1369 | { 1370 | void recurse() 1371 | { 1372 | if (childrenHaveMark) 1373 | for (auto p = firstChild; p; p = p.nextSibling) 1374 | p.enumerateMarks(callback); 1375 | } 1376 | 1377 | if (mark != Mark.parent) 1378 | callback(&this, mark == Mark.marked, &recurse); 1379 | else 1380 | recurse(); 1381 | } 1382 | 1383 | void enumerateMarks(scope void delegate(BrowserPath*, bool marked) callback) 1384 | { 1385 | enumerateMarks((BrowserPath* path, bool marked, scope void delegate() recurse) { callback(path, marked); recurse(); }); 1386 | } 1387 | } 1388 | 1389 | /// Core pattern matching logic for path ranges 1390 | /// Works with any range of strings representing path components 1391 | bool pathMatches(R)(R r, PathPattern pattern) @nogc 1392 | { 1393 | if (pattern.empty && r.empty) 1394 | return true; 1395 | if (r.empty) 1396 | return false; 1397 | if (r.front.length == 0 || r.front.startsWith("\0")) 1398 | return pathMatches(r.dropOne, pattern); // Skip special nodes 1399 | if (pattern.empty) 1400 | return false; 1401 | if (pattern.front == doubleGlob) 1402 | { 1403 | pattern.popFront(); 1404 | while (!r.empty) 1405 | { 1406 | if (pathMatches(r, pattern)) 1407 | return true; 1408 | r.popFront(); 1409 | } 1410 | return false; 1411 | } 1412 | if (pattern.front.match(r.front)) 1413 | return pathMatches(r.dropOne, pattern.dropOne); 1414 | return false; 1415 | } 1416 | 1417 | /// Returns true if path 'a' is more representative than path 'b' 1418 | /// This is the full comparison logic for representativeness ordering 1419 | /// Works with both GlobalPath and BrowserPath 1420 | bool isMoreRepresentative(A, B)(ref A a, ref B b) 1421 | { 1422 | // Process path rules sequentially in order 1423 | foreach (rule; pathRules) 1424 | { 1425 | bool aMatches = a.matches(rule.pattern); 1426 | bool bMatches = b.matches(rule.pattern); 1427 | 1428 | if (aMatches != bMatches) 1429 | { 1430 | // One matches, the other doesn't 1431 | if (rule.type == PathRule.Type.prefer) 1432 | return aMatches; // Prefer the matching path 1433 | else // rule.type == PathRule.Type.ignore 1434 | return bMatches; // Prefer the non-matching path (i.e., not ignored) 1435 | } 1436 | // Both match or neither matches - continue to next rule 1437 | } 1438 | // Prefer paths with resolved roots 1439 | auto aResolved = a.isResolved(); 1440 | auto bResolved = b.isResolved(); 1441 | if (aResolved != bResolved) 1442 | return aResolved; 1443 | // Shortest path always wins 1444 | auto aLength = a.length; 1445 | auto bLength = b.length; 1446 | if (aLength != bLength) 1447 | return aLength < bLength; 1448 | // If the length is the same, pick the lexicographically smallest one 1449 | return a < b; 1450 | } 1451 | 1452 | Path selectRepresentativePath(Path)(Path[] paths) 1453 | { 1454 | return paths.fold!((a, b) { 1455 | return isMoreRepresentative(a, b) ? a : b; 1456 | })(); 1457 | } 1458 | 1459 | /// Find the index of the most representative path in an array. 1460 | /// Returns size_t.max if the array is empty. 1461 | size_t selectRepresentativeIndex(Path)(Path[] paths) 1462 | { 1463 | if (paths.length == 0) 1464 | return size_t.max; 1465 | 1466 | size_t bestIndex = 0; 1467 | foreach (i; 1 .. paths.length) 1468 | if (isMoreRepresentative(paths[i], paths[bestIndex])) 1469 | bestIndex = i; 1470 | return bestIndex; 1471 | } 1472 | 1473 | // We prefix "special" names with one NUL character to 1474 | // distinguish them from filesystem entries. 1475 | bool skipOverNul(C)(ref C[] str) 1476 | { 1477 | // Workaround for https://issues.dlang.org/show_bug.cgi?id=22302 1478 | if (str.startsWith("\0")) 1479 | { 1480 | str = str[1 .. $]; 1481 | return true; 1482 | } 1483 | return false; 1484 | } 1485 | 1486 | /// Inline string type. 1487 | alias InlineString(size_t size) = InlineArr!(immutable(char), size); 1488 | 1489 | union InlineArr(T, size_t size) 1490 | { 1491 | private: 1492 | static assert(size * T.sizeof > T[].sizeof); 1493 | alias InlineSize = ubyte; 1494 | static assert(size < InlineSize.max); 1495 | 1496 | T[] str; 1497 | struct 1498 | { 1499 | T[size] inlineBuf; 1500 | InlineSize inlineLength; 1501 | } 1502 | 1503 | public: 1504 | this(in Unqual!T[] s) 1505 | { 1506 | if (s.length > size) 1507 | str = growAllocator.makeArray!T(s[]); 1508 | else 1509 | { 1510 | inlineBuf[0 .. s.length] = s; 1511 | inlineLength = cast(InlineSize)s.length; 1512 | } 1513 | } 1514 | 1515 | inout(T)[] opSlice() inout 1516 | { 1517 | if (inlineLength) 1518 | return inlineBuf[0 .. inlineLength]; 1519 | else 1520 | return str; 1521 | } 1522 | 1523 | bool opCast(T : bool)() const { return this !is typeof(this).init; } 1524 | 1525 | bool opEquals(ref const InlineArr other) const 1526 | { 1527 | return this[] == other[]; 1528 | } 1529 | } 1530 | --------------------------------------------------------------------------------