├── backup-juicefs-metadata.sh ├── bulk-delete-buckets-r2.sh ├── readme.md └── sharded-buckets-info-r2.sh /backup-juicefs-metadata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Update PATH to include /usr/local/bin 4 | export PATH=$PATH:/usr/local/bin 5 | 6 | # Variables 7 | DAYS_TO_KEEP=30 8 | BACKUP_DIR="/home/juicefs_mount/juicefs_metadata_backups" 9 | BACKUP_FILE_NAME="meta-dump" 10 | COMPRESSION="pigz" # change to "zstd" for zstd compression 11 | PIGZ_COMP_LEVEL='-4' 12 | ZSTD_COMP_LEVEL='-1' 13 | 14 | # Check if metadata source argument is passed 15 | if [ $# -eq 0 ] 16 | then 17 | echo "No arguments supplied. Please provide the metadata source as an argument." 18 | echo 19 | echo "Examples:" 20 | echo 21 | echo "$0 sqlite3:///home/juicefs/myjuicefs.db" 22 | echo "$0 redis://:password@localhost:6479/1" 23 | exit 1 24 | fi 25 | 26 | METADATA_SOURCE=$1 27 | 28 | # Create the backup directory if it does not exist 29 | mkdir -p $BACKUP_DIR 30 | 31 | # Timestamp 32 | TIMESTAMP=$(date +%Y%m%d%H%M%S) 33 | 34 | # Backup 35 | BACKUP_FILE=$BACKUP_DIR/$BACKUP_FILE_NAME-$TIMESTAMP.json 36 | juicefs dump $METADATA_SOURCE $BACKUP_FILE 37 | 38 | # Check if the backup was successful 39 | if [ $? -eq 0 ]; then 40 | echo "Backup successful!" 41 | 42 | # Compress the backup file 43 | case $COMPRESSION in 44 | "pigz") 45 | pigz $PIGZ_COMP_LEVEL $BACKUP_FILE 46 | BACKUP_FILE=$BACKUP_FILE.gz 47 | ;; 48 | "zstd") 49 | zstd $ZSTD_COMP_LEVEL $BACKUP_FILE 50 | BACKUP_FILE=$BACKUP_FILE.zst 51 | ;; 52 | *) 53 | echo "Invalid compression method. Please set COMPRESSION to either 'pigz' or 'zstd'." 54 | exit 1 55 | ;; 56 | esac 57 | 58 | # Delete files older than DAYS_TO_KEEP days 59 | find $BACKUP_DIR -type f -name "$BACKUP_FILE_NAME-*.json.*" -mtime +$DAYS_TO_KEEP -exec rm {} \; 60 | echo "Deleted backups older than $DAYS_TO_KEEP days." 61 | echo "Backup metadata file: $BACKUP_FILE" 62 | 63 | else 64 | echo "Backup failed!" 65 | fi 66 | -------------------------------------------------------------------------------- /bulk-delete-buckets-r2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | CPUS=$(nproc) 3 | MAX_CONCURRENT=$(($CPUS*2)) 4 | 5 | # Update PATH to include /usr/local/bin 6 | export PATH=$PATH:/usr/local/bin 7 | 8 | # Check if metadata source argument is passed 9 | if [ $# -eq 0 ] 10 | then 11 | echo "No arguments supplied. Please provide the following:" 12 | echo 13 | echo "aws cli profile name i.e. r2" 14 | echo "s3 sharded bucket prefix" 15 | echo "and shard count as an argument." 16 | echo "r2 endpoint-url i.e. https://your_cf_acount_id.r2.cloudflarestorage.com/" 17 | echo 18 | echo "Example if you're JuiceFS sharded bucket name prefix is:" 19 | echo "juicefs-shard-% for juicefs-shard-0, juicefs-shard-1 ... juicefs-shard-60 etc" 20 | echo 21 | echo "$0 r2 juicefs-shard- 60 https://your_cf_acount_id.r2.cloudflarestorage.com/" 22 | exit 1 23 | fi 24 | 25 | AWS_PROFILE=$1 26 | BUCKET_PREFIX=$2 27 | SHARD_COUNT=$(($3-1)) 28 | ENDPOINT=$4 29 | 30 | AWS_DEFAULT_CONCURRENT_REQUESTS=$(aws configure get s3.max_concurrent_requests --profile $AWS_PROFILE) 31 | aws configure set s3.max_concurrent_requests $MAX_CONCURRENT --profile $AWS_PROFILE 32 | AWS_OPTIMAL_CONCURRENT_REQUESTS=$(aws configure get s3.max_concurrent_requests --profile $AWS_PROFILE) 33 | 34 | echo "Default s3.max_concurrent_requests: $AWS_DEFAULT_CONCURRENT_REQUESTS" 35 | echo "Optimally set s3.max_concurrent_requests: $AWS_OPTIMAL_CONCURRENT_REQUESTS" 36 | 37 | i=0 38 | while [ $i -le $SHARD_COUNT ] 39 | do 40 | # Check if the bucket exists 41 | if aws s3api head-bucket --bucket ${BUCKET_PREFIX}$i --profile "$AWS_PROFILE" --endpoint-url=${ENDPOINT} > /dev/null 2>&1 42 | then 43 | aws s3 rm s3://${BUCKET_PREFIX}$i/myjuicefs --recursive --profile "$AWS_PROFILE" --endpoint-url=${ENDPOINT} 44 | else 45 | echo "Bucket s3://${BUCKET_PREFIX}$i does not exist or you do not have permission to access it." 46 | fi 47 | ((i++)) 48 | done 49 | 50 | aws configure set s3.max_concurrent_requests $AWS_DEFAULT_CONCURRENT_REQUESTS --profile $AWS_PROFILE 51 | echo "Reset to default s3.max_concurrent_requests: $AWS_DEFAULT_CONCURRENT_REQUESTS" 52 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # JuiceFS Setup 2 | 3 | Installing [JuiceFS](https://juicefs.com/docs/community/introduction/) high performanced POSIX compatible shared file system on Centmin Mod LEMP stack using [JuiceFS caching](https://juicefs.com/docs/community/cache_management) with [Cloudflare R2](https://blog.cloudflare.com/r2-open-beta/) - S3 compatible object storage and local sqlite3 Metadata Engine. Check out JuiceFS Github discussion forum https://github.com/juicedata/juicefs/discussions if you have questions. 4 | 5 | JuiceFS implements an architecture that seperates "data" and "metadata" storage. When using JuiceFS to store data, the data itself is persisted in [object storage](https://juicefs.com/docs/community/how_to_setup_object_storage/) (e.g., Amazon S3, OpenStack Swift, Ceph, Azure Blob or MinIO), and the corresponding metadata can be persisted in various databases ([Metadata Engines](https://juicefs.com/docs/community/databases_for_metadata/)) such as Redis, Amazon MemoryDB, MariaDB, MySQL, TiKV, etcd, SQLite, KeyDB, PostgreSQL, BadgerDB, or FoundationDB. 6 | 7 | From https://juicefs.com/en/blog/usage-tips/juicefs-24-qas-for-beginners 8 | 9 | **How is the performance of JuiceFS?** 10 | 11 | JuiceFS is a distributed file system. The latency of metadata is determined by 1 to 2 network round trip(s) between the mount point and metadata service (generally 1-3 ms), and the latency of data depends on the object storage latency (generally 20-100ms). The throughput of sequential read and write can reachup to 2800 MiB/s (see Benchmark with fio), depending on the network bandwidth and whether the data can be easily compressed. 12 | 13 | JuiceFS has a built-in multi-level cache (invalidated automatically). Once the cache is warmed up, latency and throughput can be very close to a local file system (although the use of FUSE may bring a small amount of overhead). 14 | 15 | # Table Of Contents 16 | 17 | * [Install JuiceFS binary](#install-juicefs-binary) 18 | * [Upgrade JuiceFS binary](#upgrade-juicefs-binary) 19 | * [Setup JuiceFS logrotation](#setup-juicefs-logrotation) 20 | * [Format Cloudflare R2 S3 Storage](#format-cloudflare-r2-s3-storage) 21 | * [Mount the JuiceFS Formatted R2 S3 Storage](#mount-the-juicefs-formatted-r2-s3-storage) 22 | * [Manual Mount](#manual-mount) 23 | * [systemd service Mount](#systemd-service-mount) 24 | * [Setup JuiceFS S3 Gateway](#setup-juicefs-s3-gateway) 25 | * [Manually Starting JuiceFS S3 Gateway](#manually-starting-juicefs-s3-gateway) 26 | * [systemd service Starting JuiceFS S3 Gateway](#systemd-service-starting-juicefs-s3-gateway) 27 | * [Working With Cloudflare R2 S3 Mounted Directory and JuiceFS S3 Gateway](#working-with-cloudflare-r2-s3-mounted-directory-and-juicefs-s3-gateway) 28 | * [Mount Info](#mount-info) 29 | * [Inspecting JuiceFS metadata engine status](#inspecting-juicefs-metadata-engine-status) 30 | * [Warmup Local Cache](#warmup-local-cache) 31 | * [Check Disk Size](#check-disk-size) 32 | * [JuiceFS Benchmarks](#juicefs-benchmarks) 33 | * [Redis Metadata Cache + Sharded R2 Mount On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1](#redis-metadata-cache--sharded-r2-mount-on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1) 34 | * [JuiceFS Benchmarks 100x R2 Sharded Mount + Redis Metadata Caching](#juicefs-benchmarks-100x-r2-sharded-mount--redis-metadata-caching) 35 | * [JuiceFS Benchmarks 61x R2 Sharded Mount + Redis Metadata Caching](#juicefs-benchmarks-61x-r2-sharded-mount--redis-metadata-caching) 36 | * [JuiceFS Benchmarks 21x R2 Sharded Mount + Redis Metadata Caching](#juicefs-benchmarks-21x-r2-sharded-mount--redis-metadata-caching) 37 | * [JuiceFS Benchmarks 10x R2 Sharded Mount + Redis Metadata Caching](#juicefs-benchmarks-10x-r2-sharded-mount--redis-metadata-caching) 38 | * [Sharded R2 Mount On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1](#sharded-r2-mount-on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1) 39 | * [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) 40 | * [5x Cloudflare R2 sharded JuiceFS mount](#5x-r2-sharded-juicefs-mount) 41 | * [On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1) 42 | * [with R2 bucket created with location hint North American East](#with-r2-bucket-created-with-location-hint-north-american-east) 43 | * [with R2 bucket created with location hint North American West](#with-r2-bucket-created-with-location-hint-north-american-west) 44 | * [with R2 bucket created on server](#with-r2-bucket-created-on-server) 45 | * [File copy tests](#file-copy-tests) 46 | * [fio test for E-2276G server](#fio-test-for-e-2276g-server) 47 | * [On Intel Core i7 4790K 4C/8T, 32GB memory and 2x 240GB SSD raid 1](#on-intel-core-i7-4790k-4c8t-32gb-memory-and-2x-240gb-ssd-raid-1) 48 | * [fio tests](#fio-test) 49 | * [Destroying JuiceFS Filesystem](#destroying-juicefs-filesystem) 50 | * [Backup JuiceFS Metadata Script](#backup-juicefs-metadata-script) 51 | * [JuiceFS Backup Metadata Cronjob](#juicefs-backup-metadata-cronjob) 52 | 53 | # Install JuiceFS binary 54 | 55 | ``` 56 | cd /svr-setup 57 | 58 | JFS_LATEST_TAG=$(curl -s https://api.github.com/repos/juicedata/juicefs/releases/latest | grep 'tag_name' | cut -d '"' -f 4 | tr -d 'v') 59 | 60 | wget "https://github.com/juicedata/juicefs/releases/download/v${JFS_LATEST_TAG}/juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz" -O juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz 61 | 62 | tar -zxf "juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz" 63 | 64 | install juicefs /usr/local/bin 65 | \cp -af /usr/local/bin/juicefs /sbin/mount.juicefs 66 | ``` 67 | ``` 68 | juicefs -V 69 | juicefs version 1.0.0-beta3+2022-05-05.0fb9155 70 | ``` 71 | ``` 72 | juicefs --help 73 | NAME: 74 | juicefs - A POSIX file system built on Redis and object storage. 75 | 76 | USAGE: 77 | juicefs [global options] command [command options] [arguments...] 78 | 79 | VERSION: 80 | 1.0.0-beta3+2022-05-05.0fb9155 81 | 82 | COMMANDS: 83 | ADMIN: 84 | format Format a volume 85 | config Change configuration of a volume 86 | destroy Destroy an existing volume 87 | gc Garbage collector of objects in data storage 88 | fsck Check consistency of a volume 89 | dump Dump metadata into a JSON file 90 | load Load metadata from a previously dumped JSON file 91 | INSPECTOR: 92 | status Show status of a volume 93 | stats Show real time performance statistics of JuiceFS 94 | profile Show profiling of operations completed in JuiceFS 95 | info Show internal information of a path or inode 96 | SERVICE: 97 | mount Mount a volume 98 | umount Unmount a volume 99 | gateway Start an S3-compatible gateway 100 | webdav Start a WebDAV server 101 | TOOL: 102 | bench Run benchmark on a path 103 | warmup Build cache for target directories/files 104 | rmr Remove directories recursively 105 | sync Sync between two storages 106 | 107 | GLOBAL OPTIONS: 108 | --verbose, --debug, -v enable debug log (default: false) 109 | --quiet, -q only warning and errors (default: false) 110 | --trace enable trace log (default: false) 111 | --no-agent disable pprof (:6060) and gops (:6070) agent (default: false) 112 | --no-color disable colors (default: false) 113 | --help, -h show help (default: false) 114 | --version, -V print only the version (default: false) 115 | 116 | COPYRIGHT: 117 | Apache License 2.0 118 | ``` 119 | 120 | # Upgrade JuiceFS Binary 121 | 122 | Following instructions for upgrading JuiceFS client [here](https://github.com/juicedata/juicefs/blob/main/docs/en/faq.md#how-to-upgrade-juicefs-client) involves: 123 | 124 | 1. Unmounting the JuiceFS mount. If you setup using [systemd JuiceFS service file](#mount-the-juicefs-formatted-r2-s3-storage), then it's just a service stop for it and the [JuiceFS S3 Gateway service](#systemd-service-starting-juicefs-s3-gateway). 125 | 126 | Upgrading to [JuiceFS v1.0.0](https://github.com/juicedata/juicefs/releases/tag/v1.0.0): 127 | 128 | ``` 129 | systemctl stop juicefs.service juicefs-gateway.service 130 | ``` 131 | 132 | 2. Updating JuiceFS binary 133 | 134 | ``` 135 | cd /svr-setup 136 | 137 | JFS_LATEST_TAG=$(curl -s https://api.github.com/repos/juicedata/juicefs/releases/latest | grep 'tag_name' | cut -d '"' -f 4 | tr -d 'v') 138 | 139 | wget "https://github.com/juicedata/juicefs/releases/download/v${JFS_LATEST_TAG}/juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz" -O juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz 140 | 141 | tar -zxf "juicefs-${JFS_LATEST_TAG}-linux-amd64.tar.gz" 142 | 143 | install juicefs /usr/local/bin 144 | \cp -af /usr/local/bin/juicefs /sbin/mount.juicefs 145 | ``` 146 | 147 | 3. Starting JuiceFS and JuiceFS S3 Gateway services 148 | 149 | ``` 150 | systemctl start juicefs.service juicefs-gateway.service 151 | systemctl status juicefs.service juicefs-gateway.service --no-pager 152 | ``` 153 | 154 | 4. Checking updated JuiceFS binary and mount. 155 | 156 | ``` 157 | juicefs -V 158 | juicefs version 1.0.4+2023-04-06.f1c475d 159 | 160 | df -hT /home/juicefs_mount 161 | Filesystem Type Size Used Avail Use% Mounted on 162 | JuiceFS:myjuicefs fuse.juicefs 1.0P 0 1.0P 0% /home/juicefs_mount 163 | ``` 164 | 165 | ``` 166 | mkdir -p /home/juicefs 167 | cd /home/juicefs 168 | 169 | juicefs status sqlite3:///home/juicefs/myjuicefs.db 170 | 2022/06/21 13:54:45.570232 juicefs[28472] : Meta address: sqlite3:///home/juicefs/myjuicefs.db [interface.go:397] 171 | { 172 | "Setting": { 173 | "Name": "myjuicefs", 174 | "UUID": "2109366a-5f4f-4449-8723-dfec21f48e8f", 175 | "Storage": "s3", 176 | "Bucket": "https://juicefs.cfaccountid.r2.cloudflarestorage.com", 177 | "AccessKey": "cfaccesskey", 178 | "SecretKey": "removed", 179 | "BlockSize": 4096, 180 | "Compression": "none", 181 | "Shards": 0, 182 | "HashPrefix": false, 183 | "Capacity": 0, 184 | "Inodes": 0, 185 | "KeyEncrypted": true, 186 | "TrashDays": 0, 187 | "MetaVersion": 1, 188 | "MinClientVersion": "", 189 | "MaxClientVersion": "" 190 | }, 191 | "Sessions": [ 192 | { 193 | "Sid": 19, 194 | "Expire": "2022-08-12T12:58:32Z", 195 | "Version": "1.0.4+2023-04-06.f1c475d", 196 | "HostName": "host.domain.com", 197 | "MountPoint": "/home/juicefs_mount", 198 | "ProcessID": 28376 199 | }, 200 | { 201 | "Sid": 20, 202 | "Expire": "2022-08-12T12:58:32Z", 203 | "Version": "1.0.4+2023-04-06.f1c475d", 204 | "HostName": "host.domain.com", 205 | "MountPoint": "s3gateway", 206 | "ProcessID": 28387 207 | } 208 | ] 209 | } 210 | ``` 211 | 212 | ## Setup JuiceFS logrotation 213 | 214 | ``` 215 | cat > "/etc/logrotate.d/juicefs" <= 524288000 are rotated earlier, old logs are removed 237 | considering log /var/log/juicefs.log 238 | log does not need rotating (log has been rotated at 2022-5-25 3:0, that is not day ago yet) 239 | ``` 240 | 241 | # Format Cloudflare R2 S3 Storage 242 | 243 | Fill in variables for your Cloudflare account id, R2 bucket access key and secret key and the R2 bucket name - create the R2 bucket before hand. The sqlite3 database will be saved at `/home/juicefs/myjuicefs.db`. 244 | 245 | * JuiceFS supports compression algorithms which can be enabled via `--compress` option which have 3 available options - lz4, zstd or none (default). 246 | * `--trash-days` - number of days after which removed files will be permanently deleted. Default = 1. 247 | * `--block-size` - size of block in KiB 248 | * Other various format options listed at https://juicefs.com/docs/community/command_reference#options. 249 | 250 | ``` 251 | cfaccountid='CF_ACCOUNT_ID' 252 | cfaccesskey='' 253 | cfsecretkey='' 254 | cfbucketname='juicefs' 255 | 256 | mkdir -p /home/juicefs 257 | cd /home/juicefs 258 | 259 | juicefs format --storage s3 \ 260 | --bucket https://${cfbucketname}.${cfaccountid}.r2.cloudflarestorage.com \ 261 | --access-key $cfaccesskey \ 262 | --secret-key $cfsecretkey \ 263 | --compress none \ 264 | --trash-days 0 \ 265 | --block-size 4096 \ 266 | sqlite3:///home/juicefs/myjuicefs.db myjuicefs 267 | ``` 268 | 269 | # Mount the JuiceFS Formatted R2 S3 Storage 270 | 271 | Create the mount directory and cache directories. 272 | 273 | ``` 274 | mkdir -p /home/juicefs_mount /home/juicefs_cache 275 | ``` 276 | 277 | ## Manual Mount 278 | 279 | There are additional JuiceFS mounting options outlined at https://juicefs.com/docs/community/command_reference#options-1 280 | 281 | Manually mount the R2 S3 storage at `/home/juicefs_mount`. Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) with `--backup-meta 0`. Otherwise, for non-R2 S3 providers, you can set `--backup-meta 1h`. 282 | 283 | ``` 284 | juicefs mount sqlite3:///home/juicefs/myjuicefs.db /home/juicefs_mount \ 285 | --cache-dir /home/juicefs_cache \ 286 | --cache-size 102400 \ 287 | --buffer-size 2048 \ 288 | --open-cache 0 \ 289 | --attr-cache 1 \ 290 | --entry-cache 1 \ 291 | --dir-entry-cache 1 \ 292 | --cache-partial-only false \ 293 | --free-space-ratio 0.1 \ 294 | --writeback \ 295 | --no-usage-report \ 296 | --max-uploads 20 \ 297 | --max-deletes 10 \ 298 | --backup-meta 0 \ 299 | --log /var/log/juicefs.log \ 300 | --get-timeout 300 \ 301 | --put-timeout 900 \ 302 | --io-retries 90 \ 303 | --prefetch 1 -d 304 | ``` 305 | 306 | Note: As `--backup-meta 0` is set for Cloudflare R2 to disable automatic metadata backups, you can manually run the backup command to backup to a file i.e. `meta-dump.json`: 307 | 308 | ``` 309 | juicefs dump sqlite3:///home/juicefs/myjuicefs.db meta-dump.json 310 | ``` 311 | 312 | ## systemd service Mount 313 | 314 | Or instead of manually mounting, setup systemd service file to manage mounting and unmounting the directory 315 | 316 | `/usr/lib/systemd/system/juicefs.service` 317 | 318 | ``` 319 | [Unit] 320 | Description=JuiceFS 321 | AssertPathIsDirectory=/home/juicefs_mount 322 | After=network-online.target 323 | 324 | [Service] 325 | Type=simple 326 | WorkingDirectory=/home/juicefs 327 | ExecStart=/usr/local/bin/juicefs mount \ 328 | "sqlite3:///home/juicefs/myjuicefs.db" \ 329 | /home/juicefs_mount \ 330 | --no-usage-report \ 331 | --writeback \ 332 | --cache-size 102400 \ 333 | --cache-dir /home/juicefs_cache \ 334 | --buffer-size 2048 \ 335 | --open-cache 0 \ 336 | --attr-cache 1 \ 337 | --entry-cache 1 \ 338 | --dir-entry-cache 1 \ 339 | --cache-partial-only false \ 340 | --free-space-ratio 0.1 \ 341 | --max-uploads 20 \ 342 | --max-deletes 10 \ 343 | --backup-meta 0 \ 344 | --log /var/log/juicefs.log \ 345 | --get-timeout 300 \ 346 | --put-timeout 900 \ 347 | --io-retries 90 \ 348 | --prefetch 1 349 | 350 | ExecStop=/usr/local/bin/juicefs umount /home/juicefs_mount 351 | Restart=always 352 | RestartSec=5 353 | 354 | [Install] 355 | WantedBy=default.target 356 | ``` 357 | ``` 358 | mkdir -p /etc/systemd/system/juicefs.service.d 359 | 360 | cat > "/etc/systemd/system/juicefs.service.d/openfileslimit.conf" <: Meta address: sqlite3:///home/juicefs/myjuicefs.db [interface.go:385] 390 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.126772 juicefs[26947] : Data use s3://juicefs/myjuicefs/ [mount.go:289] 391 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.127088 juicefs[26947] : Disk cache (/home/juicefs_cache/3c874e07-a62c-42a9-ae67-5865491dd4a8/): capacity (102400 MB), free ratio (10%), max pending pages (51) [disk_cache.go:90] 392 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.138212 juicefs[26947] : create session 1 OK [base.go:185] 393 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.138802 juicefs[26947] : Prometheus metrics listening on 127.0.0.1:9567 [mount.go:157] 394 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.138890 juicefs[26947] : Mounting volume myjuicefs at /home/juicefs_mount ... [mount_unix.go:177] 395 | May 25 04:26:33 hostname juicefs[26947]: 2022/05/25 04:26:33.628570 juicefs[26947] : OK, myjuicefs is ready at /home/juicefs_mount [mount_unix.go:45] 396 | May 25 04:32:19 hostname juicefs[26947]: 2022/05/25 04:32:19.284310 juicefs[26947] : Secret key is removed for the sake of safety [sql.go:2770] 397 | May 25 04:32:20 hostname juicefs[26947]: 2022/05/25 04:32:20.804652 juicefs[26947] : backup metadata succeed, used 1.527736137s [backup.go:69] 398 | ``` 399 | 400 | Using AWS CLI profile for r2 user to check underlying JuiceFS metadata: 401 | 402 | ``` 403 | cfbucketname='juicefs' 404 | 405 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://$cfbucketname/ 406 | 2022-05-25 04:26:25 36 myjuicefs/juicefs_uuid 407 | 2022-05-25 04:32:20 598 myjuicefs/meta/dump-2022-05-25-043219.json.gz 408 | ``` 409 | 410 | # Setup JuiceFS S3 Gateway 411 | 412 | Setup [JuiceFS S3 Gateway](https://juicefs.com/docs/community/s3_gateway#use-the-aws-cli) and setup AWS CLI profile `juicefs` using my [awscli-get.sh](https://awscli-get.centminmod.com/) script to configure the profile. 413 | 414 | Install `awscli-get.sh`: 415 | 416 | ``` 417 | curl -4s https://awscli-get.centminmod.com/awscli-get.sh -o awscli-get.sh 418 | chmod +x awscli-get.sh 419 | ``` 420 | 421 | Change `MINIO_ROOT_USER` and `MINIO_ROOT_PASSWORD` variable values to your descired S3 Gateway access and secret keys. Make sure they're different from your Cloudflare R2 access and secret key credentials. 422 | 423 | Setup AWS CLI profile using `awscli-get.sh`: 424 | 425 | ``` 426 | export MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE 427 | export MINIO_ROOT_PASSWORD=12345678 428 | 429 | # https://awscli-get.centminmod.com/ 430 | export AWS_ACCESS_KEY_ID=$MINIO_ROOT_USER 431 | export AWS_SECRET_ACCESS_KEY=$MINIO_ROOT_PASSWORD 432 | export AWS_DEFAULT_REGION=auto 433 | export AWS_DEFAULT_OUTPUT=text 434 | ./awscli-get.sh install juicefs 435 | ``` 436 | 437 | Example output from [awscli-get.sh](https://awscli-get.centminmod.com/) script installing AWS CLI profile named `juicefs`: 438 | 439 | ``` 440 | ./awscli-get.sh install juicefs 441 | 442 | existing config file detected: /root/.aws/config 443 | existing credential file detected: /root/.aws/credentials 444 | 445 | configure aws-cli profile: juicefs 446 | configure aws cli for Cloudflare R2 447 | aws configure set s3.max_concurrent_requests 2 --profile juicefs 448 | aws configure set s3.multipart_threshold 50MB --profile juicefs 449 | aws configure set s3.multipart_chunksize 50MB --profile juicefs 450 | aws configure set s3.addressing_style path --profile juicefs 451 | 452 | aws-cli profile: juicefs set: 453 | 454 | aws_access_key_id: AKIAIOSFODNN7EXAMPLE 455 | aws_secret_access_key: 12345678 456 | default.region: auto 457 | default output format: text 458 | 459 | list aws-cli profiles: 460 | 461 | default 462 | r2 463 | juicefs 464 | ``` 465 | 466 | ## Manually Starting JuiceFS S3 Gateway 467 | 468 | Manually starting created JuiceFS S3 Gateway. Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) with `--backup-meta 0`. Otherwise, for non-R2 S3 providers, you can set `--backup-meta 1h`. 469 | 470 | Private local access only: 471 | 472 | ``` 473 | # local private access 474 | juicefs gateway \ 475 | --cache-dir /home/juicefs_cache \ 476 | --cache-size 102400 \ 477 | --attr-cache 1 \ 478 | --entry-cache 0 \ 479 | --dir-entry-cache 1 \ 480 | --prefetch 1 \ 481 | --free-space-ratio 0.1 \ 482 | --writeback \ 483 | --backup-meta 0 \ 484 | --no-usage-report \ 485 | --buffer-size 2048 sqlite3:///home/juicefs/myjuicefs.db localhost:3777 486 | ``` 487 | 488 | Public net accessible mode: 489 | 490 | ``` 491 | # public access 492 | juicefs gateway \ 493 | --cache-dir /home/juicefs_cache \ 494 | --cache-size 102400 \ 495 | --attr-cache 1 \ 496 | --entry-cache 0 \ 497 | --dir-entry-cache 1 \ 498 | --prefetch 1 \ 499 | --free-space-ratio 0.1 \ 500 | --writeback \ 501 | --backup-meta 0 \ 502 | --no-usage-report \ 503 | --buffer-size 2048 sqlite3:///home/juicefs/myjuicefs.db 0.0.0.0:3777 504 | ``` 505 | 506 | ## systemd service Starting JuiceFS S3 Gateway 507 | 508 | Or instead of manually creating JuiceFS S3 Gateway, use systemd service file. 509 | 510 | Below is using private local access only. 511 | 512 | `/usr/lib/systemd/system/juicefs-gateway.service` 513 | 514 | ``` 515 | [Unit] 516 | Description=JuiceFS Gateway 517 | After=network-online.target 518 | 519 | [Service] 520 | Environment='MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE' 521 | Environment='MINIO_ROOT_PASSWORD=12345678' 522 | Type=simple 523 | WorkingDirectory=/home/juicefs 524 | ExecStart=/usr/local/bin/juicefs gateway \ 525 | --no-usage-report \ 526 | --writeback \ 527 | --cache-size 102400 \ 528 | --cache-dir /home/juicefs_cache \ 529 | --attr-cache 1 \ 530 | --entry-cache 0 \ 531 | --dir-entry-cache 1 \ 532 | --prefetch 1 \ 533 | --free-space-ratio 0.1 \ 534 | --max-uploads 20 \ 535 | --max-deletes 10 \ 536 | --backup-meta 0 \ 537 | --get-timeout 300 \ 538 | --put-timeout 900 \ 539 | --io-retries 90 \ 540 | --buffer-size 2048 \ 541 | "sqlite3:///home/juicefs/myjuicefs.db" \ 542 | localhost:3777 543 | 544 | Restart=always 545 | RestartSec=5 546 | 547 | [Install] 548 | WantedBy=default.target 549 | ``` 550 | ``` 551 | mkdir -p /etc/systemd/system/juicefs-gateway.service.d 552 | 553 | cat > "/etc/systemd/system/juicefs-gateway.service.d/openfileslimit.conf" <: Prometheus metrics listening on 127.0.0.1:10037 [mount.go:157] 579 | May 25 04:26:33 hostname juicefs[26957]: Endpoint: http://localhost:3777 580 | May 25 04:26:33 hostname juicefs[26957]: Browser Access: 581 | May 25 04:26:33 hostname juicefs[26957]: http://localhost:3777 582 | May 25 04:26:33 hostname juicefs[26957]: Object API (Amazon S3 compatible): 583 | May 25 04:26:33 hostname juicefs[26957]: Go: https://docs.min.io/docs/golang-client-quickstart-guide 584 | May 25 04:26:33 hostname juicefs[26957]: Java: https://docs.min.io/docs/java-client-quickstart-guide 585 | May 25 04:26:33 hostname juicefs[26957]: Python: https://docs.min.io/docs/python-client-quickstart-guide 586 | May 25 04:26:33 hostname juicefs[26957]: JavaScript: https://docs.min.io/docs/javascript-client-quickstart-guide 587 | May 25 04:26:33 hostname juicefs[26957]: .NET: https://docs.min.io/docs/dotnet-client-quickstart-guide 588 | ``` 589 | 590 | # Working With Cloudflare R2 S3 Mounted Directory and JuiceFS S3 Gateway 591 | 592 | Using AWS CLI `r2` profile to inspect underlying JuiceFS metadata engine data. 593 | 594 | ``` 595 | url=${cfaccountid}.r2.cloudflarestorage.com 596 | 597 | echo 1 > /home/juicefs_mount/file1.txt 598 | 599 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://$cfbucketname/ 600 | 601 | 2022-05-25 04:48:46 2 myjuicefs/chunks/0/0/1_0_2 602 | 2022-05-25 04:26:25 36 myjuicefs/juicefs_uuid 603 | 2022-05-25 04:32:20 598 myjuicefs/meta/dump-2022-05-25-043219.json.gz 604 | ``` 605 | 606 | Using AWS CLI `juicefs` profile to inspect the JuiceFS S3 Gateway. 607 | 608 | ``` 609 | aws --endpoint-url http://localhost:3777 s3 ls --recursive myjuicefs 610 | 2022-05-25 04:48:45 2 file1.txt 611 | ``` 612 | 613 | ## Mount Info 614 | 615 | ``` 616 | juicefs info /home/juicefs_mount/ 617 | /home/juicefs_mount/ : 618 | inode: 1 619 | files: 1 620 | dirs: 1 621 | length: 2 622 | size: 8192 623 | ``` 624 | 625 | ## Inspecting JuiceFS metadata engine status 626 | 627 | ``` 628 | juicefs status sqlite3:///home/juicefs/myjuicefs.db 629 | 2022/05/25 04:50:06.356669 juicefs[33155] : Meta address: sqlite3:///home/juicefs/myjuicefs.db [interface.go:385] 630 | { 631 | "Setting": { 632 | "Name": "myjuicefs", 633 | "UUID": "3c874e07-a62c-42a9-ae67-5865491dd4a8", 634 | "Storage": "s3", 635 | "Bucket": "https://juicefs.cfaccountid.r2.cloudflarestorage.com", 636 | "AccessKey": "cfaccesskey", 637 | "SecretKey": "removed", 638 | "BlockSize": 4096, 639 | "Compression": "none", 640 | "Shards": 0, 641 | "HashPrefix": false, 642 | "Capacity": 0, 643 | "Inodes": 0, 644 | "KeyEncrypted": true, 645 | "TrashDays": 1, 646 | "MetaVersion": 1, 647 | "MinClientVersion": "", 648 | "MaxClientVersion": "" 649 | }, 650 | "Sessions": [ 651 | { 652 | "Sid": 1, 653 | "Expire": "2022-05-25T04:50:59Z", 654 | "Version": "1.0.0-beta3+2022-05-05.0fb9155", 655 | "HostName": "host.domain.com", 656 | "MountPoint": "/home/juicefs_mount", 657 | "ProcessID": 26947 658 | }, 659 | { 660 | "Sid": 2, 661 | "Expire": "2022-05-25T04:51:03Z", 662 | "Version": "1.0.0-beta3+2022-05-05.0fb9155", 663 | "HostName": "host.domain.com", 664 | "MountPoint": "s3gateway", 665 | "ProcessID": 26957 666 | } 667 | ] 668 | } 669 | ``` 670 | 671 | ## Warmup Local Cache 672 | 673 | ``` 674 | juicefs warmup -p 2 /home/juicefs_mount 675 | Warmed up paths count: 1 / 1 [==============================================================] done 676 | 2022/05/25 05:29:18.497915 juicefs[43684] : Successfully warmed up 1 paths [warmup.go:209] 677 | ``` 678 | 679 | ## Check Disk Size 680 | 681 | ``` 682 | df -hT /home/juicefs_mount 683 | Filesystem Type Size Used Avail Use% Mounted on 684 | JuiceFS:myjuicefs fuse.juicefs 1.0P 4.0K 1.0P 1% /home/juicefs_mount 685 | ``` 686 | 687 | ## Metrics 688 | 689 | ``` 690 | curl -s http://localhost:9567/metrics 691 | ``` 692 | 693 | checking blockcache metrics 694 | 695 | ``` 696 | curl -s http://localhost:9567/metrics | grep blockcache | egrep -v '\#|hist' 697 | 698 | juicefs_blockcache_blocks{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 699 | juicefs_blockcache_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 700 | juicefs_blockcache_drops{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 701 | juicefs_blockcache_evicts{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 702 | juicefs_blockcache_hit_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.62144e+07 703 | juicefs_blockcache_hits{mp="/home/juicefs_mount",vol_name="myjuicefs"} 200 704 | juicefs_blockcache_miss{mp="/home/juicefs_mount",vol_name="myjuicefs"} 647 705 | juicefs_blockcache_miss_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.680160256e+09 706 | juicefs_blockcache_write_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.173698048e+09 707 | juicefs_blockcache_writes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 712 708 | ``` 709 | 710 | filtered metrics 711 | 712 | ``` 713 | curl -s http://localhost:9567/metrics | egrep -v '\#|hist|bucket' 714 | 715 | juicefs_blockcache_blocks{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 716 | juicefs_blockcache_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 717 | juicefs_blockcache_drops{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 718 | juicefs_blockcache_evicts{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 719 | juicefs_blockcache_hit_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.62144e+07 720 | juicefs_blockcache_hits{mp="/home/juicefs_mount",vol_name="myjuicefs"} 200 721 | juicefs_blockcache_miss{mp="/home/juicefs_mount",vol_name="myjuicefs"} 647 722 | juicefs_blockcache_miss_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.680160256e+09 723 | juicefs_blockcache_write_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.173698048e+09 724 | juicefs_blockcache_writes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 712 725 | juicefs_cpu_usage{mp="/home/juicefs_mount",vol_name="myjuicefs"} 21.072261 726 | juicefs_fuse_open_handlers{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 727 | juicefs_fuse_read_size_bytes_sum{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.173698048e+09 728 | juicefs_fuse_read_size_bytes_count{mp="/home/juicefs_mount",vol_name="myjuicefs"} 16584 729 | juicefs_fuse_written_size_bytes_sum{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.173698048e+09 730 | juicefs_fuse_written_size_bytes_count{mp="/home/juicefs_mount",vol_name="myjuicefs"} 16584 731 | juicefs_go_build_info{checksum="",mp="/home/juicefs_mount",path="github.com/juicedata/juicefs",version="(devel)",vol_name="myjuicefs"} 1 732 | juicefs_go_gc_duration_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs",quantile="0"} 2.4418e-05 733 | juicefs_go_gc_duration_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs",quantile="0.25"} 4.3148e-05 734 | juicefs_go_gc_duration_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs",quantile="0.5"} 5.6996e-05 735 | juicefs_go_gc_duration_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs",quantile="0.75"} 0.000106379 736 | juicefs_go_gc_duration_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs",quantile="1"} 0.000342952 737 | juicefs_go_gc_duration_seconds_sum{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0.001999786 738 | juicefs_go_gc_duration_seconds_count{mp="/home/juicefs_mount",vol_name="myjuicefs"} 22 739 | juicefs_go_goroutines{mp="/home/juicefs_mount",vol_name="myjuicefs"} 62 740 | juicefs_go_info{mp="/home/juicefs_mount",version="go1.17.8",vol_name="myjuicefs"} 1 741 | juicefs_go_memstats_alloc_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.8662952e+07 742 | juicefs_go_memstats_alloc_bytes_total{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.377878736e+09 743 | juicefs_go_memstats_buck_hash_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.537716e+06 744 | juicefs_go_memstats_frees_total{mp="/home/juicefs_mount",vol_name="myjuicefs"} 4.703242e+06 745 | juicefs_go_memstats_gc_cpu_fraction{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.1818653907586683e-05 746 | juicefs_go_memstats_gc_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 4.8828976e+07 747 | juicefs_go_memstats_heap_alloc_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.8662952e+07 748 | juicefs_go_memstats_heap_idle_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.28196608e+09 749 | juicefs_go_memstats_heap_inuse_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 3.3079296e+07 750 | juicefs_go_memstats_heap_objects{mp="/home/juicefs_mount",vol_name="myjuicefs"} 53970 751 | juicefs_go_memstats_heap_released_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.278754816e+09 752 | juicefs_go_memstats_heap_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.315045376e+09 753 | juicefs_go_memstats_last_gc_time_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.6535426808430629e+09 754 | juicefs_go_memstats_lookups_total{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 755 | juicefs_go_memstats_mallocs_total{mp="/home/juicefs_mount",vol_name="myjuicefs"} 4.757212e+06 756 | juicefs_go_memstats_mcache_inuse_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 9600 757 | juicefs_go_memstats_mcache_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 16384 758 | juicefs_go_memstats_mspan_inuse_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 312256 759 | juicefs_go_memstats_mspan_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.736128e+06 760 | juicefs_go_memstats_next_gc_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 5.738088e+07 761 | juicefs_go_memstats_other_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.769556e+06 762 | juicefs_go_memstats_stack_inuse_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.0354688e+07 763 | juicefs_go_memstats_stack_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.0354688e+07 764 | juicefs_go_memstats_sys_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.380288824e+09 765 | juicefs_go_threads{mp="/home/juicefs_mount",vol_name="myjuicefs"} 271 766 | juicefs_memory{mp="/home/juicefs_mount",vol_name="myjuicefs"} 9.64608e+07 767 | juicefs_object_request_data_bytes{method="GET",mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.147483648e+09 768 | juicefs_object_request_data_bytes{method="PUT",mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.205155328e+09 769 | juicefs_object_request_errors{mp="/home/juicefs_mount",vol_name="myjuicefs"} 337 770 | juicefs_process_cpu_seconds_total{mp="/home/juicefs_mount",vol_name="myjuicefs"} 21.06 771 | juicefs_process_max_fds{mp="/home/juicefs_mount",vol_name="myjuicefs"} 524288 772 | juicefs_process_open_fds{mp="/home/juicefs_mount",vol_name="myjuicefs"} 23 773 | juicefs_process_resident_memory_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 9.64608e+07 774 | juicefs_process_start_time_seconds{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.65354147984e+09 775 | juicefs_process_virtual_memory_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 2.159013888e+09 776 | juicefs_process_virtual_memory_max_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1.8446744073709552e+19 777 | juicefs_staging_block_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 778 | juicefs_staging_blocks{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 779 | juicefs_store_cache_size_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 780 | juicefs_transaction_restart{mp="/home/juicefs_mount",vol_name="myjuicefs"} 368 781 | juicefs_uptime{mp="/home/juicefs_mount",vol_name="myjuicefs"} 1246.457965465 782 | juicefs_used_buffer_size_bytes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 7.471104e+06 783 | juicefs_used_inodes{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 784 | juicefs_used_space{mp="/home/juicefs_mount",vol_name="myjuicefs"} 0 785 | ``` 786 | 787 | # JuiceFS Benchmarks 788 | 789 | ## Sharded R2 Mount On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1 790 | 791 | The server runs on 2x mismatched 960GB NVMe drives in raid 1 so take note of that for the potential peak read and write performance of the resulting benchmarks: 792 | 793 | * Samsung SSD PM983 960GB 2.5 U.2 Gen 3.0 x4 PCIe NVMe 794 | * Up to 3,000MB/s Read, 1,050MB/s Write 795 | * 4K random read/write 400,000/40,000 IOPS 796 | * 1366 TBW / 1.3 DWPD 797 | * Power: 4 Watt (idle) 8.6 Watt (read) 8.1 Watt (write) 798 | * Kingston DC1500M U.2 Enterprise SSD Gen 3.0 x4 PCIe NVME 799 | * Up to 3,100MB/s Read, 1,700MB/s Write 800 | * Steady-state 4k read/write 440,000/150,000 IOPS 801 | * 1681 TBW (1 DWPD/5yrs) (1.6 DWPD/3yrs) 802 | * Power: Idle: 6.30W Average read: 6.21W Average write: 11.40W Max read: 6.60W Max write: 12.24W 803 | 804 | The table below shows comparison between [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) vs [5x Cloudflare R2 sharded JuiceFS mount](#5x-r2-sharded-juicefs-mount) vs [1x Cloudflare JuiceFS mount (default)](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1). All R2 storage locations are with location hint North American East. 805 | 806 | For 1024MB big file size 807 | 808 | | ITEM | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (5x R2 Sharded) | COST (5x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 809 | | --- | --- | --- | --- | --- | --- | --- | 810 | | Write big file | 906.04 MiB/s | 4.52 s/file | 960.47 MiB/s | 4.26 s/file | 1374.08 MiB/s | 2.98 s/file | 811 | | Read big file | 223.19 MiB/s | 18.35 s/file | 174.17 MiB/s | 23.52 s/file | 152.23 MiB/s | 26.91 s/file | 812 | | Write small file | 701.2 files/s | 5.70 ms/file | 777.4 files/s | 5.15 ms/file | 780.3 files/s | 5.13 ms/file | 813 | | Read small file | 6378.3 files/s | 0.63 ms/file | 7940.0 files/s | 0.50 ms/file | 8000.9 files/s | 0.50 ms/file | 814 | | Stat file | 21123.7 files/s | 0.19 ms/file | 29344.7 files/s | 0.14 ms/file | 27902.2 files/s | 0.14 ms/file | 815 | | FUSE operation | 71555 operations | 2.16 ms/op | 71597 operations | 2.67 ms/op | 71649 operations | 3.06 ms/op | 816 | | Update meta | 6271 operations | 9.01 ms/op | 6041 operations | 4.09 ms/op | 6057 operations | 2.50 ms/op | 817 | | Put object | 1152 operations | 403.23 ms/op | 1136 operations | 428.27 ms/op | 1106 operations | 547.32 ms/op | 818 | | Get object | 1034 operations | 278.61 ms/op | 1049 operations | 299.50 ms/op | 1030 operations | 301.80 ms/op | 819 | | Delete object | 316 operations | 124.32 ms/op | 60 operations | 120.73 ms/op | 29 operations | 234.02 ms/op | 820 | | Write into cache | 1424 operations | 24.92 ms/op | 1424 operations | 83.12 ms/op | 1424 operations | 12.91 ms/op | 821 | | Read from cache | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.04 ms/op | 822 | 823 | For 1MB big file size 824 | 825 | | ITEM | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (5x R2 Sharded) | COST (5x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 826 | | --- | --- | --- | --- | --- | --- | --- | 827 | | Write big file | 452.66 MiB/s | 0.01 s/file | 448.20 MiB/s | 0.01 s/file | 230.82 MiB/s | 0.02 s/file | 828 | | Read big file | 1545.95 MiB/s | 0.00 s/file | 1376.38 MiB/s | 0.00 s/file | 1276.38 MiB/s | 0.00 s/file | 829 | | Write small file | 682.8 files/s | 5.86 ms/file | 792.5 files/s | 5.05 ms/file | 675.7 files/s | 5.92 ms/file | 830 | | Read small file | 6299.4 files/s | 0.63 ms/file | 7827.1 files/s | 0.51 ms/file | 7833.1 files/s | 0.51 ms/file | 831 | | Stat file | 21365.2 files/s | 0.19 ms/file | 24308.1 files/s | 0.16 ms/file | 28226.1 files/s | 0.14 ms/file | 832 | | FUSE operation | 5757 operations | 0.42 ms/op | 5750 operations | 0.38 ms/op | 5756 operations | 0.41 ms/op | 833 | | Update meta | 5814 operations | 0.72 ms/op | 5740 operations | 0.74 ms/op | 5770 operations | 0.70 ms/op | 834 | | Put object | 107 operations | 282.68 ms/op | 94 operations | 286.35 ms/op | 118 operations | 242.35 ms/op | 835 | | Get object | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 836 | | Delete object | 133 operations | 116.84 ms/op | 59 operations | 117.93 ms/op | 95 operations | 83.94 ms/op | 837 | | Write into cache | 404 operations | 0.12 ms/op | 404 operations | 0.12 ms/op | 404 operations | 0.14 ms/op | 838 | | Read from cache | 408 operations | 0.06 ms/op | 408 operations | 0.05 ms/op | 408 operations | 0.06 ms/op | 839 | 840 | 841 | ### 10x R2 Sharded JuiceFS Mount 842 | 843 | Benchmark with [`--shard`](https://juicefs.com/docs/community/how_to_setup_object_storage#enable-data-sharding) mount option for [sharded Cloudflare R2 mounted JuiceFS](https://juicefs.com/docs/community/how_to_setup_object_storage#enable-data-sharding) over 10x sharded R2 object storage locations - `juicefs-shard-0`,`juicefs-shard-`,`juicefs-shard-1`,`juicefs-shard-3`, `juicefs-shard-4`, `juicefs-shard-5`, `juicefs-shard-6`, `juicefs-shard-7`, `juicefs-shard-8`, `juicefs-shard-9` with location hint North American East. 844 | 845 | ``` 846 | cfaccountid='CF_ACCOUNT_ID' 847 | cfaccesskey='' 848 | cfsecretkey='' 849 | cfbucketname='juicefs-shard' 850 | 851 | mkdir -p /home/juicefs 852 | cd /home/juicefs 853 | 854 | juicefs format --storage s3 \ 855 | --shards 10 \ 856 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 857 | --access-key $cfaccesskey \ 858 | --secret-key $cfsecretkey \ 859 | --compress none \ 860 | --trash-days 0 \ 861 | --block-size 4096 \ 862 | sqlite3:///home/juicefs/myjuicefs.db myjuicefs 863 | ``` 864 | 865 | JuiceFS 10x sharded Cloudflare R2 benchmark with location hint North American East and 1024MB big file size. 866 | 867 | ``` 868 | juicefs bench -p 4 /home/juicefs_mount/ 869 | Write big blocks count: 4096 / 4096 [===========================================================] done 870 | Read big blocks count: 4096 / 4096 [===========================================================] done 871 | Write small blocks count: 400 / 400 [=============================================================] done 872 | Read small blocks count: 400 / 400 [=============================================================] done 873 | Stat small files count: 400 / 400 [=============================================================] done 874 | Benchmark finished! 875 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 876 | Time used: 25.4 s, CPU: 127.8%, Memory: 1742.7 MiB 877 | +------------------+------------------+--------------+ 878 | | ITEM | VALUE | COST | 879 | +------------------+------------------+--------------+ 880 | | Write big file | 906.04 MiB/s | 4.52 s/file | 881 | | Read big file | 223.19 MiB/s | 18.35 s/file | 882 | | Write small file | 701.2 files/s | 5.70 ms/file | 883 | | Read small file | 6378.3 files/s | 0.63 ms/file | 884 | | Stat file | 21123.7 files/s | 0.19 ms/file | 885 | | FUSE operation | 71555 operations | 2.16 ms/op | 886 | | Update meta | 6271 operations | 9.01 ms/op | 887 | | Put object | 1152 operations | 403.23 ms/op | 888 | | Get object | 1034 operations | 278.61 ms/op | 889 | | Delete object | 316 operations | 124.32 ms/op | 890 | | Write into cache | 1424 operations | 24.92 ms/op | 891 | | Read from cache | 400 operations | 0.05 ms/op | 892 | +------------------+------------------+--------------+ 893 | ``` 894 | 895 | JuiceFS 10x sharded Cloudflare R2 benchmark with location hint North American East and 1MB big file size. 896 | 897 | ``` 898 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 899 | Write big blocks count: 4 / 4 [==============================================================] done 900 | Read big blocks count: 4 / 4 [==============================================================] done 901 | Write small blocks count: 400 / 400 [=============================================================] done 902 | Read small blocks count: 400 / 400 [=============================================================] done 903 | Stat small files count: 400 / 400 [=============================================================] done 904 | Benchmark finished! 905 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 906 | Time used: 1.7 s, CPU: 97.8%, Memory: 1946.3 MiB 907 | +------------------+-----------------+--------------+ 908 | | ITEM | VALUE | COST | 909 | +------------------+-----------------+--------------+ 910 | | Write big file | 452.66 MiB/s | 0.01 s/file | 911 | | Read big file | 1545.95 MiB/s | 0.00 s/file | 912 | | Write small file | 682.8 files/s | 5.86 ms/file | 913 | | Read small file | 6299.4 files/s | 0.63 ms/file | 914 | | Stat file | 21365.2 files/s | 0.19 ms/file | 915 | | FUSE operation | 5757 operations | 0.42 ms/op | 916 | | Update meta | 5814 operations | 0.72 ms/op | 917 | | Put object | 107 operations | 282.68 ms/op | 918 | | Get object | 0 operations | 0.00 ms/op | 919 | | Delete object | 133 operations | 116.84 ms/op | 920 | | Write into cache | 404 operations | 0.12 ms/op | 921 | | Read from cache | 408 operations | 0.06 ms/op | 922 | +------------------+-----------------+--------------+ 923 | ``` 924 | 925 | Inspecting actual JuiceFS mount 926 | 927 | ``` 928 | ls -lah /home/juicefs_mount 929 | total 9.5K 930 | drwxrwxrwx 2 root root 4.0K May 24 21:09 . 931 | drwxr-xr-x. 13 root root 4.0K May 21 18:58 .. 932 | -r-------- 1 root root 0 May 24 21:04 .accesslog 933 | -r-------- 1 root root 1.3K May 24 21:04 .config 934 | -r--r--r-- 1 root root 0 May 24 21:04 .stats 935 | dr-xr-xr-x 2 root root 0 May 24 21:04 .trash 936 | 937 | ``` 938 | 939 | JuiceFS mount stats 940 | 941 | ``` 942 | cat /home/juicefs_mount/.stats 943 | juicefs_blockcache_blocks 0 944 | juicefs_blockcache_bytes 0 945 | juicefs_blockcache_drops 0 946 | juicefs_blockcache_evicts 0 947 | juicefs_blockcache_hit_bytes 436207616 948 | juicefs_blockcache_hits 3232 949 | juicefs_blockcache_miss 4241 950 | juicefs_blockcache_miss_bytes 17720934400 951 | juicefs_blockcache_read_hist_seconds_total 3232 952 | juicefs_blockcache_read_hist_seconds_sum 0.16809886599999965 953 | juicefs_blockcache_write_bytes 17616076800 954 | juicefs_blockcache_write_hist_seconds_total 7312 955 | juicefs_blockcache_write_hist_seconds_sum 391.8615498160001 956 | juicefs_blockcache_writes 7312 957 | juicefs_compact_size_histogram_bytes_total 0 958 | juicefs_compact_size_histogram_bytes_sum 0 959 | juicefs_cpu_usage 141.95404200000002 960 | juicefs_fuse_open_handlers 1 961 | juicefs_fuse_ops_durations_histogram_seconds_total 310446 962 | juicefs_fuse_ops_durations_histogram_seconds_sum 897.3332114739511 963 | juicefs_fuse_read_size_bytes_total 134400 964 | juicefs_fuse_read_size_bytes_sum 17616076800 965 | juicefs_fuse_written_size_bytes_total 134400 966 | juicefs_fuse_written_size_bytes_sum 17616076800 967 | juicefs_go_build_info__github.com/juicedata/juicefs_(devel) 1 968 | juicefs_go_goroutines 79 969 | juicefs_go_info_go1.19.2 1 970 | juicefs_go_memstats_alloc_bytes 32336136 971 | juicefs_go_memstats_alloc_bytes_total 2939952080 972 | juicefs_go_memstats_buck_hash_sys_bytes 1805266 973 | juicefs_go_memstats_frees_total 35324415 974 | juicefs_go_memstats_gc_cpu_fraction 0.000014170426577562373 975 | juicefs_go_memstats_gc_sys_bytes 84006704 976 | juicefs_go_memstats_heap_alloc_bytes 32336136 977 | juicefs_go_memstats_heap_idle_bytes 1917329408 978 | juicefs_go_memstats_heap_inuse_bytes 37642240 979 | juicefs_go_memstats_heap_objects 61603 980 | juicefs_go_memstats_heap_released_bytes 1916534784 981 | juicefs_go_memstats_heap_sys_bytes 1954971648 982 | juicefs_go_memstats_last_gc_time_seconds 1684981564.030393 983 | juicefs_go_memstats_lookups_total 0 984 | juicefs_go_memstats_mallocs_total 35386018 985 | juicefs_go_memstats_mcache_inuse_bytes 14400 986 | juicefs_go_memstats_mcache_sys_bytes 15600 987 | juicefs_go_memstats_mspan_inuse_bytes 357136 988 | juicefs_go_memstats_mspan_sys_bytes 10918080 989 | juicefs_go_memstats_next_gc_bytes 64394520 990 | juicefs_go_memstats_other_sys_bytes 2965030 991 | juicefs_go_memstats_stack_inuse_bytes 12156928 992 | juicefs_go_memstats_stack_sys_bytes 12156928 993 | juicefs_go_memstats_sys_bytes 2066839256 994 | juicefs_go_threads 278 995 | juicefs_memory 154210304 996 | juicefs_meta_ops_durations_histogram_seconds_total 33621 997 | juicefs_meta_ops_durations_histogram_seconds_sum 304.82784600500264 998 | juicefs_object_request_data_bytes_GET 17234395136 999 | juicefs_object_request_data_bytes_PUT 17573871616 1000 | juicefs_object_request_durations_histogram_seconds_DELETE_total 7312 1001 | juicefs_object_request_durations_histogram_seconds_DELETE_sum 894.7742219350012 1002 | juicefs_object_request_durations_histogram_seconds_GET_total 4225 1003 | juicefs_object_request_durations_histogram_seconds_GET_sum 1226.6492657670003 1004 | juicefs_object_request_durations_histogram_seconds_PUT_total 6866 1005 | juicefs_object_request_durations_histogram_seconds_PUT_sum 2722.8829050600116 1006 | juicefs_object_request_errors 499 1007 | juicefs_process_cpu_seconds_total 141.95 1008 | juicefs_process_max_fds 524288 1009 | juicefs_process_open_fds 15 1010 | juicefs_process_resident_memory_bytes 154210304 1011 | juicefs_process_start_time_seconds 1684980243.38 1012 | juicefs_process_virtual_memory_bytes 2880843776 1013 | juicefs_process_virtual_memory_max_bytes 18446744073709552000 1014 | juicefs_staging_block_bytes 0 1015 | juicefs_staging_blocks 0 1016 | juicefs_store_cache_size_bytes 0 1017 | juicefs_transaction_durations_histogram_seconds_total 51442 1018 | juicefs_transaction_durations_histogram_seconds_sum 140.4373622320012 1019 | juicefs_transaction_restart 0 1020 | juicefs_uptime 1416.787316108 1021 | juicefs_used_buffer_size_bytes 8388608 1022 | juicefs_used_inodes 0 1023 | juicefs_used_space 0 1024 | ``` 1025 | 1026 | ### 5x R2 Sharded JuiceFS Mount 1027 | 1028 | Benchmark with [`--shard`](https://juicefs.com/docs/community/how_to_setup_object_storage#enable-data-sharding) mount option for [sharded Cloudflare R2 mounted JuiceFS](https://juicefs.com/docs/community/how_to_setup_object_storage#enable-data-sharding) over 5x sharded R2 object storage locations - `juicefs-shard-0`,`juicefs-shard-`,`juicefs-shard-1`,`juicefs-shard-3`, and `juicefs-shard-4` with location hint North American East. 1029 | 1030 | ``` 1031 | cfaccountid='CF_ACCOUNT_ID' 1032 | cfaccesskey='' 1033 | cfsecretkey='' 1034 | cfbucketname='juicefs-shard' 1035 | 1036 | mkdir -p /home/juicefs 1037 | cd /home/juicefs 1038 | 1039 | juicefs format --storage s3 \ 1040 | --shards 5 \ 1041 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 1042 | --access-key $cfaccesskey \ 1043 | --secret-key $cfsecretkey \ 1044 | --compress none \ 1045 | --trash-days 0 \ 1046 | --block-size 4096 \ 1047 | sqlite3:///home/juicefs/myjuicefs.db myjuicefs 1048 | ``` 1049 | 1050 | output 1051 | 1052 | ``` 1053 | 2023/05/24 17:45:14.116161 juicefs[3701895] : Meta address: sqlite3:///home/juicefs/myjuicefs.db [interface.go:401] 1054 | 2023/05/24 17:45:14.117248 juicefs[3701895] : Data use shard5://s3://juicefs-shard-0/myjuicefs/ [format.go:434] 1055 | 2023/05/24 17:45:18.423901 juicefs[3701895] : Can't list s3://juicefs-shard-0/: InvalidMaxKeys: MaxKeys params must be positive integer <= 1000. 1056 | status code: 400, request id: , host id: [sharding.go:85] 1057 | 2023/05/24 17:45:18.423955 juicefs[3701895] : List storage shard5://s3://juicefs-shard-0/myjuicefs/ failed: list s3://juicefs-shard-0/: InvalidMaxKeys: MaxKeys params must be positive integer <= 1000. 1058 | status code: 400, request id: , host id: [format.go:452] 1059 | 2023/05/24 17:45:18.709793 juicefs[3701895] : Volume is formatted as { 1060 | "Name": "myjuicefs", 1061 | "UUID": "UUID-UUID-UUID-UUID", 1062 | "Storage": "s3", 1063 | "Bucket": "https://juicefs-shard-%d.CF_ACCOUNT_ID.r2.cloudflarestorage.com", 1064 | "AccessKey": "CF_ACCESS_KEY", 1065 | "SecretKey": "removed", 1066 | "BlockSize": 4096, 1067 | "Compression": "none", 1068 | "Shards": 5, 1069 | "KeyEncrypted": true, 1070 | "MetaVersion": 1 1071 | } [format.go:471] 1072 | ``` 1073 | 1074 | JuiceFS mount info 1075 | 1076 | ``` 1077 | juicefs info /home/juicefs_mount/ 1078 | /home/juicefs_mount/ : 1079 | inode: 1 1080 | files: 0 1081 | dirs: 1 1082 | length: 0 Bytes 1083 | size: 4.00 KiB (4096 Bytes) 1084 | path: / 1085 | ``` 1086 | 1087 | JuiceFS 5x sharded Cloudflare R2 benchmark with location hint North American East and 1024MB big file size. 1088 | 1089 | ``` 1090 | juicefs bench -p 4 /home/juicefs_mount/ 1091 | Write big blocks count: 4096 / 4096 [===========================================================] done 1092 | Read big blocks count: 4096 / 4096 [===========================================================] done 1093 | Write small blocks count: 400 / 400 [=============================================================] done 1094 | Read small blocks count: 400 / 400 [=============================================================] done 1095 | Stat small files count: 400 / 400 [=============================================================] done 1096 | Benchmark finished! 1097 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 1098 | Time used: 30.2 s, CPU: 103.5%, Memory: 1364.5 MiB 1099 | +------------------+------------------+--------------+ 1100 | | ITEM | VALUE | COST | 1101 | +------------------+------------------+--------------+ 1102 | | Write big file | 960.47 MiB/s | 4.26 s/file | 1103 | | Read big file | 174.17 MiB/s | 23.52 s/file | 1104 | | Write small file | 777.4 files/s | 5.15 ms/file | 1105 | | Read small file | 7940.0 files/s | 0.50 ms/file | 1106 | | Stat file | 29344.7 files/s | 0.14 ms/file | 1107 | | FUSE operation | 71597 operations | 2.67 ms/op | 1108 | | Update meta | 6041 operations | 4.09 ms/op | 1109 | | Put object | 1136 operations | 428.27 ms/op | 1110 | | Get object | 1049 operations | 299.50 ms/op | 1111 | | Delete object | 60 operations | 120.73 ms/op | 1112 | | Write into cache | 1424 operations | 83.12 ms/op | 1113 | | Read from cache | 400 operations | 0.05 ms/op | 1114 | +------------------+------------------+--------------+ 1115 | ``` 1116 | 1117 | JuiceFS sharded Cloudflare R2 benchmark with location hint North American East and 1MB big file size. 1118 | 1119 | ``` 1120 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 1121 | Write big blocks count: 4 / 4 [==============================================================] done 1122 | Read big blocks count: 4 / 4 [==============================================================] done 1123 | Write small blocks count: 400 / 400 [=============================================================] done 1124 | Read small blocks count: 400 / 400 [=============================================================] done 1125 | Stat small files count: 400 / 400 [=============================================================] done 1126 | Benchmark finished! 1127 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 1128 | Time used: 1.6 s, CPU: 102.4%, Memory: 164.9 MiB 1129 | +------------------+-----------------+--------------+ 1130 | | ITEM | VALUE | COST | 1131 | +------------------+-----------------+--------------+ 1132 | | Write big file | 448.20 MiB/s | 0.01 s/file | 1133 | | Read big file | 1376.38 MiB/s | 0.00 s/file | 1134 | | Write small file | 792.5 files/s | 5.05 ms/file | 1135 | | Read small file | 7827.1 files/s | 0.51 ms/file | 1136 | | Stat file | 24308.1 files/s | 0.16 ms/file | 1137 | | FUSE operation | 5750 operations | 0.38 ms/op | 1138 | | Update meta | 5740 operations | 0.74 ms/op | 1139 | | Put object | 94 operations | 286.35 ms/op | 1140 | | Get object | 0 operations | 0.00 ms/op | 1141 | | Delete object | 59 operations | 117.93 ms/op | 1142 | | Write into cache | 404 operations | 0.12 ms/op | 1143 | | Read from cache | 408 operations | 0.05 ms/op | 1144 | +------------------+-----------------+--------------+ 1145 | ``` 1146 | 1147 | Inspecting Cloudflare R2 sharded storage buckets after JuiceFS benchmark run with location hint North American East 1148 | 1149 | ``` 1150 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://juicefs-shard-0 1151 | 2023-05-24 18:46:01 131072 myjuicefs/chunks/0/0/980_0_131072 1152 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1146_0_131072 1153 | 2023-05-24 18:46:30 131072 myjuicefs/chunks/0/1/1540_0_131072 1154 | 1155 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://juicefs-shard-1 1156 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1154_0_131072 1157 | 2023-05-24 18:46:29 131072 myjuicefs/chunks/0/1/1386_0_131072 1158 | 2023-05-24 18:46:31 131072 myjuicefs/chunks/0/1/1688_0_131072 1159 | 2023-05-24 17:45:18 36 myjuicefs/juicefs_uuid 1160 | 1161 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://juicefs-shard-2 1162 | 2023-05-24 17:52:09 131072 myjuicefs/chunks/0/0/574_0_131072 1163 | 2023-05-24 18:46:01 131072 myjuicefs/chunks/0/1/1000_0_131072 1164 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1142_0_131072 1165 | 1166 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://juicefs-shard-3 1167 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1130_0_131072 1168 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1150_0_131072 1169 | 2023-05-24 18:46:05 131072 myjuicefs/chunks/0/1/1226_0_131072 1170 | 2023-05-24 18:46:28 131072 myjuicefs/chunks/0/1/1382_0_131072 1171 | 2023-05-24 18:46:30 131072 myjuicefs/chunks/0/1/1532_0_131072 1172 | 2023-05-24 18:46:30 131072 myjuicefs/chunks/0/1/1552_0_131072 1173 | 2023-05-24 18:46:31 131072 myjuicefs/chunks/0/1/1560_0_131072 1174 | 2023-05-24 18:46:30 131072 myjuicefs/chunks/0/1/1564_0_131072 1175 | 2023-05-24 18:46:31 131072 myjuicefs/chunks/0/1/1568_0_131072 1176 | 2023-05-24 18:46:32 131072 myjuicefs/chunks/0/1/1728_0_131072 1177 | 2023-05-24 17:53:44 581 myjuicefs/meta/dump-2023-05-24-225343.json.gz 1178 | 1179 | aws s3 ls --recursive --profile r2 --endpoint-url=$url s3://juicefs-shard-4 1180 | 2023-05-24 18:46:01 131072 myjuicefs/chunks/0/0/988_0_131072 1181 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1134_0_131072 1182 | 2023-05-24 18:46:03 131072 myjuicefs/chunks/0/1/1138_0_131072 1183 | 2023-05-24 18:46:28 131072 myjuicefs/chunks/0/1/1390_0_131072 1184 | 2023-05-24 18:46:28 131072 myjuicefs/chunks/0/1/1394_0_131072 1185 | 2023-05-24 18:46:30 131072 myjuicefs/chunks/0/1/1556_0_131072 1186 | ``` 1187 | 1188 | ### fio sharded Cloudflare R2 test for E-2276G server with location hint North American East 1189 | 1190 | fio Sequential Write 1191 | ``` 1192 | mkdir -p /home/juicefs_mount/fio 1193 | 1194 | fio --name=sequential-write --directory=/home/juicefs_mount/fio --rw=write --refill_buffers --bs=4M --size=1G --end_fsync=1 1195 | sequential-write: (g=0): rw=write, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 1196 | fio-3.19 1197 | Starting 1 process 1198 | sequential-write: Laying out IO file (1 file / 1024MiB) 1199 | Jobs: 1 (f=1) 1200 | sequential-write: (groupid=0, jobs=1): err= 0: pid=3704701: Wed May 24 19:01:25 2023 1201 | write: IOPS=279, BW=1119MiB/s (1173MB/s)(1024MiB/915msec); 0 zone resets 1202 | clat (usec): min=2221, max=7356, avg=2961.60, stdev=807.86 1203 | lat (usec): min=2222, max=7357, avg=2962.43, stdev=808.05 1204 | clat percentiles (usec): 1205 | | 1.00th=[ 2245], 5.00th=[ 2311], 10.00th=[ 2376], 20.00th=[ 2442], 1206 | | 30.00th=[ 2540], 40.00th=[ 2638], 50.00th=[ 2704], 60.00th=[ 2802], 1207 | | 70.00th=[ 2966], 80.00th=[ 3163], 90.00th=[ 4424], 95.00th=[ 4948], 1208 | | 99.00th=[ 5735], 99.50th=[ 6718], 99.90th=[ 7373], 99.95th=[ 7373], 1209 | | 99.99th=[ 7373] 1210 | bw ( MiB/s): min= 1067, max= 1067, per=95.35%, avg=1067.08, stdev= 0.00, samples=1 1211 | iops : min= 266, max= 266, avg=266.00, stdev= 0.00, samples=1 1212 | lat (msec) : 4=89.84%, 10=10.16% 1213 | cpu : usr=16.19%, sys=38.95%, ctx=8195, majf=0, minf=9 1214 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 1215 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1216 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1217 | issued rwts: total=0,256,0,0 short=0,0,0,0 dropped=0,0,0,0 1218 | latency : target=0, window=0, percentile=100.00%, depth=1 1219 | 1220 | Run status group 0 (all jobs): 1221 | WRITE: bw=1119MiB/s (1173MB/s), 1119MiB/s-1119MiB/s (1173MB/s-1173MB/s), io=1024MiB (1074MB), run=915-915msec 1222 | ``` 1223 | ``` 1224 | ls -lah /home/juicefs_mount/fio 1225 | total 1.1G 1226 | drwxr-xr-x 2 root root 4.0K May 24 19:01 . 1227 | drwxrwxrwx 3 root root 4.0K May 24 19:01 .. 1228 | -rw-r--r-- 1 root root 1.0G May 24 19:01 sequential-write.0.0 1229 | ``` 1230 | ``` 1231 | juicefs warmup -p 4 /home/juicefs_mount/fio 1232 | Warming up count: 5 0.06/s 1233 | Warming up bytes: 5.00 GiB (5368709120 Bytes) 57.32 MiB/s 1234 | 2023/05/24 19:37:02.236625 juicefs[3705549] : Successfully warmed up 5 files (5368709120 bytes) [warmup.go:233] 1235 | ``` 1236 | 1237 | fio Sequential Read 1238 | 1239 | ``` 1240 | fio --name=sequential-read --directory=/home/juicefs_mount/fio --rw=read --refill_buffers --bs=4M --size=1G --numjobs=4 1241 | sequential-read: (g=0): rw=read, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 1242 | ... 1243 | fio-3.19 1244 | Starting 4 processes 1245 | Jobs: 4 (f=4): [R(4)][-.-%][r=2270MiB/s][r=567 IOPS][eta 00m:00s] 1246 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3705616: Wed May 24 19:37:25 2023 1247 | read: IOPS=132, BW=532MiB/s (557MB/s)(1024MiB/1926msec) 1248 | clat (usec): min=2368, max=15013, avg=7167.80, stdev=1697.61 1249 | lat (usec): min=2368, max=15013, avg=7169.52, stdev=1697.67 1250 | clat percentiles (usec): 1251 | | 1.00th=[ 2540], 5.00th=[ 5473], 10.00th=[ 5735], 20.00th=[ 6063], 1252 | | 30.00th=[ 6390], 40.00th=[ 6652], 50.00th=[ 6915], 60.00th=[ 7242], 1253 | | 70.00th=[ 7504], 80.00th=[ 7898], 90.00th=[ 9110], 95.00th=[10421], 1254 | | 99.00th=[13304], 99.50th=[13829], 99.90th=[15008], 99.95th=[15008], 1255 | | 99.99th=[15008] 1256 | bw ( KiB/s): min=457227, max=573440, per=24.57%, avg=534320.67, stdev=66767.53, samples=3 1257 | iops : min= 111, max= 140, avg=130.00, stdev=16.46, samples=3 1258 | lat (msec) : 4=2.34%, 10=92.19%, 20=5.47% 1259 | cpu : usr=0.52%, sys=62.55%, ctx=3056, majf=0, minf=1036 1260 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 1261 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1262 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1263 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 1264 | latency : target=0, window=0, percentile=100.00%, depth=1 1265 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3705617: Wed May 24 19:37:25 2023 1266 | read: IOPS=132, BW=531MiB/s (557MB/s)(1024MiB/1929msec) 1267 | clat (usec): min=1536, max=18497, avg=7181.80, stdev=1753.73 1268 | lat (usec): min=1536, max=18500, avg=7183.40, stdev=1753.80 1269 | clat percentiles (usec): 1270 | | 1.00th=[ 2343], 5.00th=[ 5211], 10.00th=[ 5669], 20.00th=[ 6063], 1271 | | 30.00th=[ 6456], 40.00th=[ 6718], 50.00th=[ 7046], 60.00th=[ 7373], 1272 | | 70.00th=[ 7701], 80.00th=[ 8225], 90.00th=[ 8979], 95.00th=[10552], 1273 | | 99.00th=[12518], 99.50th=[12649], 99.90th=[18482], 99.95th=[18482], 1274 | | 99.99th=[18482] 1275 | bw ( KiB/s): min=450877, max=572295, per=24.23%, avg=526742.67, stdev=66141.94, samples=3 1276 | iops : min= 110, max= 139, avg=128.33, stdev=15.95, samples=3 1277 | lat (msec) : 2=0.78%, 4=2.34%, 10=91.41%, 20=5.47% 1278 | cpu : usr=0.47%, sys=62.14%, ctx=3051, majf=0, minf=1037 1279 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 1280 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1281 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1282 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 1283 | latency : target=0, window=0, percentile=100.00%, depth=1 1284 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3705618: Wed May 24 19:37:25 2023 1285 | read: IOPS=133, BW=536MiB/s (562MB/s)(1024MiB/1911msec) 1286 | clat (usec): min=4751, max=13813, avg=7109.46, stdev=1330.79 1287 | lat (usec): min=4754, max=13815, avg=7111.26, stdev=1330.78 1288 | clat percentiles (usec): 1289 | | 1.00th=[ 5014], 5.00th=[ 5342], 10.00th=[ 5800], 20.00th=[ 6128], 1290 | | 30.00th=[ 6390], 40.00th=[ 6652], 50.00th=[ 6849], 60.00th=[ 7111], 1291 | | 70.00th=[ 7439], 80.00th=[ 7832], 90.00th=[ 8586], 95.00th=[ 9503], 1292 | | 99.00th=[12125], 99.50th=[12518], 99.90th=[13829], 99.95th=[13829], 1293 | | 99.99th=[13829] 1294 | bw ( KiB/s): min=476279, max=589824, per=25.24%, avg=548858.00, stdev=63028.99, samples=3 1295 | iops : min= 116, max= 144, avg=133.67, stdev=15.37, samples=3 1296 | lat (msec) : 10=96.48%, 20=3.52% 1297 | cpu : usr=0.63%, sys=64.08%, ctx=3023, majf=0, minf=1036 1298 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 1299 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1300 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1301 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 1302 | latency : target=0, window=0, percentile=100.00%, depth=1 1303 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3705619: Wed May 24 19:37:25 2023 1304 | read: IOPS=134, BW=536MiB/s (562MB/s)(1024MiB/1910msec) 1305 | clat (usec): min=4812, max=13160, avg=7107.62, stdev=1252.07 1306 | lat (usec): min=4814, max=13163, avg=7109.17, stdev=1252.09 1307 | clat percentiles (usec): 1308 | | 1.00th=[ 4883], 5.00th=[ 5473], 10.00th=[ 5669], 20.00th=[ 6063], 1309 | | 30.00th=[ 6456], 40.00th=[ 6652], 50.00th=[ 6980], 60.00th=[ 7242], 1310 | | 70.00th=[ 7635], 80.00th=[ 7963], 90.00th=[ 8586], 95.00th=[ 9503], 1311 | | 99.00th=[11469], 99.50th=[11731], 99.90th=[13173], 99.95th=[13173], 1312 | | 99.99th=[13173] 1313 | bw ( KiB/s): min=476279, max=598016, per=25.24%, avg=548863.33, stdev=64161.96, samples=3 1314 | iops : min= 116, max= 146, avg=133.67, stdev=15.70, samples=3 1315 | lat (msec) : 10=96.88%, 20=3.12% 1316 | cpu : usr=0.31%, sys=63.75%, ctx=3115, majf=0, minf=1036 1317 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 1318 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1319 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 1320 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 1321 | latency : target=0, window=0, percentile=100.00%, depth=1 1322 | 1323 | Run status group 0 (all jobs): 1324 | READ: bw=2123MiB/s (2227MB/s), 531MiB/s-536MiB/s (557MB/s-562MB/s), io=4096MiB (4295MB), run=1910-1929msec 1325 | ``` 1326 | 1327 | directory fio test 1328 | 1329 | ``` 1330 | ls -lah /home/juicefs_mount/fio 1331 | total 5.1G 1332 | drwxr-xr-x 2 root root 4.0K May 24 19:08 . 1333 | drwxrwxrwx 3 root root 4.0K May 24 19:01 .. 1334 | -rw-r--r-- 1 root root 1.0G May 24 19:08 sequential-read.0.0 1335 | -rw-r--r-- 1 root root 1.0G May 24 19:08 sequential-read.1.0 1336 | -rw-r--r-- 1 root root 1.0G May 24 19:08 sequential-read.2.0 1337 | -rw-r--r-- 1 root root 1.0G May 24 19:08 sequential-read.3.0 1338 | -rw-r--r-- 1 root root 1.0G May 24 19:01 sequential-write.0.0 1339 | ``` 1340 | 1341 | Checking the S3 Gateway 1342 | 1343 | ``` 1344 | aws --endpoint-url http://localhost:3777 s3 ls --recursive myjuicefs 1345 | 1346 | 2023-05-24 19:08:04 1073741824 fio/sequential-read.0.0 1347 | 2023-05-24 19:08:09 1073741824 fio/sequential-read.1.0 1348 | 2023-05-24 19:08:07 1073741824 fio/sequential-read.2.0 1349 | 2023-05-24 19:08:05 1073741824 fio/sequential-read.3.0 1350 | 2023-05-24 19:01:24 1073741824 fio/sequential-write.0.0 1351 | ``` 1352 | 1353 | ## Redis Metadata Cache + Sharded R2 Mount On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1 1354 | 1355 | Switched from local [sqlite3 metadata caching](https://juicefs.com/docs/community/databases_for_metadata#sqlite) to [Redis server metadata caching](https://juicefs.com/docs/community/databases_for_metadata#redis) with 10x sharded R2 object storage buckets and location hint North American East. 1356 | 1357 | On Centmin Mod LEMP stack server rather than use default Redis server on port 6379 which maybe used for other applications in a caching capacity, setup a separate Redis server on port 6479 using [`redis-generator.sh`](https://github.com/centminmod/centminmod-redis). 1358 | 1359 | ``` 1360 | pushd /root/tools 1361 | git clone https://github.com/centminmod/centminmod-redis 1362 | cd centminmod-redis 1363 | ``` 1364 | 1365 | `redis-generator.sh` options 1366 | 1367 | ``` 1368 | ./redis-generator.sh 1369 | 1370 | * Usage: where X equal postive integer for number of redis 1371 | servers to create with incrementing TCP redis ports 1372 | starting at STARTPORT=6479. 1373 | * prep - standalone prep command installs redis-cluster-tool 1374 | * prepupdate - standalone prep update command updates redis-cluster-tool 1375 | * multi X - no. of standalone redis instances to create 1376 | * multi-cache X - no. of standalone redis instances + disable ondisk persistence 1377 | * clusterprep X - no. of cluster enabled config instances 1378 | * clustermake 6 - to enable cluster mode + create cluster 1379 | * clustermake 9 - flag to enable cluster mode + create cluster 1380 | * replication X - create redis replication 1381 | * replication X 6579 - create replication with custom start port 6579 1382 | * replication-cache X - create redis replication + disable ondisk persistence 1383 | * replication-cache X 6579 - create replication with custom start port 6579 1384 | * delete X - no. of redis instances to delete 1385 | * delete X 6579 - no. of redis instances to delete + custom start port 6579 1386 | 1387 | ./redis-generator.sh prep 1388 | ./redis-generator.sh prepupdate 1389 | ./redis-generator.sh multi X 1390 | ./redis-generator.sh multi-cache X 1391 | ./redis-generator.sh clusterprep X 1392 | ./redis-generator.sh clustermake 6 1393 | ./redis-generator.sh clustermake 9 1394 | ./redis-generator.sh replication X 1395 | ./redis-generator.sh replication X 6579 1396 | ./redis-generator.sh replication-cache X 1397 | ./redis-generator.sh replication-cache X 6579 1398 | ./redis-generator.sh delete X 1399 | ./redis-generator.sh delete X 6579 1400 | ``` 1401 | 1402 | Create 2x Redis servers on ports 6479 and 6480 by editing script first with `DEBUG_REDISGEN='n'` and then running the multi command: 1403 | 1404 | ``` 1405 | ./redis-generator.sh multi 2 1406 | ``` 1407 | 1408 | Outputs the following where: 1409 | 1410 | * /etc/redis6479/redis6479.conf config file 1411 | * /etc/redis6480/redis6480.conf config file 1412 | 1413 | ``` 1414 | ./redis-generator.sh multi 2 1415 | 1416 | Creating redis servers starting at TCP = 6479... 1417 | ------------------------------------------------------- 1418 | creating redis server: redis6479.service [increment value: 0] 1419 | redis TCP port: 6479 1420 | create systemd redis6479.service 1421 | cp -a /usr/lib/systemd/system/redis.service /usr/lib/systemd/system/redis6479.service 1422 | create /etc/redis6479/redis6479.conf config file 1423 | mkdir -p /etc/redis6479 1424 | cp -a /etc/redis/redis.conf /etc/redis6479/redis6479.conf 1425 | -rw-r----- 1 redis root 92K Apr 18 00:42 /etc/redis6479/redis6479.conf 1426 | -rw-r--r-- 1 root root 474 May 25 02:32 /usr/lib/systemd/system/redis6479.service 1427 | Created symlink /etc/systemd/system/multi-user.target.wants/redis6479.service → /usr/lib/systemd/system/redis6479.service. 1428 | Note: Forwarding request to 'systemctl enable redis6479.service'. 1429 | ## Redis TCP 6479 Info ## 1430 | redis_version:6.2.12 1431 | redis_mode:standalone 1432 | process_id:3723969 1433 | tcp_port:6479 1434 | uptime_in_seconds:0 1435 | uptime_in_days:0 1436 | executable:/etc/redis6479/redis-server 1437 | config_file:/etc/redis6479/redis6479.conf 1438 | ------------------------------------------------------- 1439 | creating redis server: redis6480.service [increment value: 1] 1440 | redis TCP port: 6480 1441 | create systemd redis6480.service 1442 | cp -a /usr/lib/systemd/system/redis.service /usr/lib/systemd/system/redis6480.service 1443 | create /etc/redis6480/redis6480.conf config file 1444 | mkdir -p /etc/redis6480 1445 | cp -a /etc/redis/redis.conf /etc/redis6480/redis6480.conf 1446 | -rw-r----- 1 redis root 92K Apr 18 00:42 /etc/redis6480/redis6480.conf 1447 | -rw-r--r-- 1 root root 474 May 25 02:32 /usr/lib/systemd/system/redis6480.service 1448 | Created symlink /etc/systemd/system/multi-user.target.wants/redis6480.service → /usr/lib/systemd/system/redis6480.service. 1449 | Note: Forwarding request to 'systemctl enable redis6480.service'. 1450 | ## Redis TCP 6480 Info ## 1451 | redis_version:6.2.12 1452 | redis_mode:standalone 1453 | process_id:3724058 1454 | tcp_port:6480 1455 | uptime_in_seconds:0 1456 | uptime_in_days:0 1457 | executable:/etc/redis6480/redis-server 1458 | config_file:/etc/redis6480/redis6480.conf 1459 | ``` 1460 | 1461 | ``` 1462 | systemctl status redis6479 redis6480 1463 | ● redis6479.service - Redis persistent key-value database 1464 | Loaded: loaded (/usr/lib/systemd/system/redis6479.service; enabled; vendor preset: disabled) 1465 | Drop-In: /etc/systemd/system/redis6479.service.d 1466 | └─limit.conf, user.conf 1467 | Active: active (running) since Thu 2023-05-25 02:32:52 CDT; 2min 4s ago 1468 | Main PID: 3723969 (redis-server) 1469 | Status: "Ready to accept connections" 1470 | Tasks: 5 (limit: 203337) 1471 | Memory: 2.9M 1472 | CGroup: /system.slice/redis6479.service 1473 | └─3723969 /etc/redis6479/redis-server 127.0.0.1:6479 1474 | 1475 | May 25 02:32:52 hostname systemd[1]: Starting Redis persistent key-value database... 1476 | May 25 02:32:52 hostname systemd[1]: Started Redis persistent key-value database. 1477 | 1478 | ● redis6480.service - Redis persistent key-value database 1479 | Loaded: loaded (/usr/lib/systemd/system/redis6480.service; enabled; vendor preset: disabled) 1480 | Drop-In: /etc/systemd/system/redis6480.service.d 1481 | └─limit.conf, user.conf 1482 | Active: active (running) since Thu 2023-05-25 02:32:52 CDT; 2min 3s ago 1483 | Main PID: 3724058 (redis-server) 1484 | Status: "Ready to accept connections" 1485 | Tasks: 5 (limit: 203337) 1486 | Memory: 2.6M 1487 | CGroup: /system.slice/redis6480.service 1488 | └─3724058 /etc/redis6480/redis-server 127.0.0.1:6480 1489 | 1490 | May 25 02:32:52 hostname systemd[1]: Starting Redis persistent key-value database... 1491 | May 25 02:32:52 hostname systemd[1]: Started Redis persistent key-value database. 1492 | ``` 1493 | 1494 | For JuiceFS Redis metadata caching the references in JuiceFS mount formatting command will be for either: 1495 | 1496 | * `redis://:password@localhost:6479/1` or without password `redis://:localhost:6479/1` 1497 | * `redis://:password@localhost:6480/1` or without password `redis://:localhost:6480/1` 1498 | 1499 | Set `/etc/redis6479/redis6479.conf` and `/etc/redis6480/redis6480.conf` config files password for Redis and set `maxmemory-policy noeviction` and `appendonly yes` 1500 | 1501 | ``` 1502 | requirepass password 1503 | maxmemory-policy noeviction 1504 | appendonly yes 1505 | ``` 1506 | 1507 | And set JuiceFS best practices for Redis metadata caching outlined at https://juicefs.com/docs/community/redis_best_practices. 1508 | 1509 | ``` 1510 | grep -C1 -i '^appendfsync' /etc/redis6479/redis6479.conf 1511 | # appendfsync always 1512 | appendfsync everysec 1513 | # appendfsync no 1514 | ``` 1515 | 1516 | Restart Redis servers 1517 | 1518 | ``` 1519 | systemctl restart redis6479 redis6480 1520 | ``` 1521 | 1522 | Format JuiceFS sharded mount to use Redis metadata caching via `redis://:password@localhost:6479/1` 1523 | 1524 | ``` 1525 | cfaccountid='CF_ACCOUNT_ID' 1526 | cfaccesskey='' 1527 | cfsecretkey='' 1528 | cfbucketname='juicefs-shard' 1529 | 1530 | mkdir -p /home/juicefs 1531 | cd /home/juicefs 1532 | 1533 | juicefs format --storage s3 \ 1534 | --shards 10 \ 1535 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 1536 | --access-key $cfaccesskey \ 1537 | --secret-key $cfsecretkey \ 1538 | --compress none \ 1539 | --trash-days 0 \ 1540 | --block-size 4096 \ 1541 | redis://:password@localhost:6479/1 myjuicefs 1542 | ``` 1543 | 1544 | Outputs 1545 | 1546 | ``` 1547 | 2023/05/25 03:12:14.146627 juicefs[3725353] : Meta address: redis://:****@localhost:6479/1 [interface.go:401] 1548 | 2023/05/25 03:12:14.147407 juicefs[3725353] : Ping redis: 29.563µs [redis.go:2904] 1549 | 2023/05/25 03:12:14.148376 juicefs[3725353] : Data use shard10://s3://juicefs-shard-0/myjuicefs/ [format.go:434] 1550 | 2023/05/25 03:12:14.872195 juicefs[3725353] : Can't list s3://juicefs-shard-0/: InvalidMaxKeys: MaxKeys params must be positive integer <= 1000. 1551 | status code: 400, request id: , host id: [sharding.go:85] 1552 | 2023/05/25 03:12:14.872252 juicefs[3725353] : List storage shard10://s3://juicefs-shard-0/myjuicefs/ failed: list s3://juicefs-shard-0/: InvalidMaxKeys: MaxKeys params must be positive integer <= 1000. 1553 | status code: 400, request id: , host id: [format.go:452] 1554 | 2023/05/25 03:12:15.367557 juicefs[3725353] : Volume is formatted as { 1555 | "Name": "myjuicefs", 1556 | "UUID": "UUID-UUID-UUID-UUID-UUID", 1557 | "Storage": "s3", 1558 | "Bucket": "https://juicefs-shard-%d.CF_ACCOUNT_ID.r2.cloudflarestorage.com", 1559 | "AccessKey": "cfaccesskey", 1560 | "SecretKey": "removed", 1561 | "BlockSize": 4096, 1562 | "Compression": "none", 1563 | "Shards": 10, 1564 | "KeyEncrypted": true, 1565 | "MetaVersion": 1 1566 | } [format.go:471] 1567 | ``` 1568 | 1569 | Edit `/usr/lib/systemd/system/juicefs.service`. Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) with `--backup-meta 0`. Otherwise, for non-R2 S3 providers, you can set `--backup-meta 1h`. 1570 | 1571 | ``` 1572 | [Unit] 1573 | Description=JuiceFS 1574 | AssertPathIsDirectory=/home/juicefs_mount 1575 | After=network-online.target 1576 | 1577 | [Service] 1578 | Type=simple 1579 | WorkingDirectory=/home/juicefs 1580 | ExecStart=/usr/local/bin/juicefs mount \ 1581 | "redis://:password@localhost:6479/1" \ 1582 | /home/juicefs_mount \ 1583 | --no-usage-report \ 1584 | --writeback \ 1585 | --cache-size 102400 \ 1586 | --cache-dir /home/juicefs_cache \ 1587 | --buffer-size 2048 \ 1588 | --open-cache 0 \ 1589 | --attr-cache 1 \ 1590 | --entry-cache 1 \ 1591 | --dir-entry-cache 1 \ 1592 | --cache-partial-only false \ 1593 | --free-space-ratio 0.1 \ 1594 | --max-uploads 20 \ 1595 | --max-deletes 10 \ 1596 | --backup-meta 0 \ 1597 | --log /var/log/juicefs.log \ 1598 | --get-timeout 300 \ 1599 | --put-timeout 900 \ 1600 | --io-retries 90 \ 1601 | --prefetch 1 1602 | 1603 | ExecStop=/usr/local/bin/juicefs umount /home/juicefs_mount 1604 | Restart=always 1605 | RestartSec=5 1606 | 1607 | [Install] 1608 | WantedBy=default.target 1609 | ``` 1610 | 1611 | Edit `/usr/lib/systemd/system/juicefs-gateway.service` 1612 | 1613 | ``` 1614 | [Unit] 1615 | Description=JuiceFS Gateway 1616 | After=network-online.target 1617 | 1618 | [Service] 1619 | Environment='MINIO_ROOT_USER=AKIAIOSFODNN7EXAMPLE' 1620 | Environment='MINIO_ROOT_PASSWORD=12345678' 1621 | Type=simple 1622 | WorkingDirectory=/home/juicefs 1623 | ExecStart=/usr/local/bin/juicefs gateway \ 1624 | --no-usage-report \ 1625 | --writeback \ 1626 | --cache-size 102400 \ 1627 | --cache-dir /home/juicefs_cache \ 1628 | --attr-cache 1 \ 1629 | --entry-cache 0 \ 1630 | --dir-entry-cache 1 \ 1631 | --prefetch 1 \ 1632 | --free-space-ratio 0.1 \ 1633 | --max-uploads 20 \ 1634 | --max-deletes 10 \ 1635 | --backup-meta 0 \ 1636 | --get-timeout 300 \ 1637 | --put-timeout 900 \ 1638 | --io-retries 90 \ 1639 | --buffer-size 2048 \ 1640 | "redis://:password@localhost:6479/1" \ 1641 | localhost:3777 1642 | 1643 | Restart=always 1644 | RestartSec=5 1645 | 1646 | [Install] 1647 | WantedBy=default.target 1648 | ``` 1649 | 1650 | Restart Redis servers 1651 | 1652 | ``` 1653 | systemctl restart juicefs.service juicefs-gateway.service 1654 | ``` 1655 | 1656 | Note: As `--backup-meta 0` is set for Cloudflare R2 to disable automatic metadata backups, you can manually run the backup command to backup to a file i.e. `meta-dump.json`: 1657 | 1658 | ``` 1659 | juicefs dump redis://:password@localhost:6479/1 meta-dump.json 1660 | ``` 1661 | 1662 | ``` 1663 | juicefs dump redis://:password@localhost:6479/1 meta-dump.json 1664 | 2023/05/26 07:19:58.883775 juicefs[3791823] : Meta address: redis://:****@localhost:6479/1 [interface.go:401] 1665 | 2023/05/26 07:19:58.884482 juicefs[3791823] : Ping redis: 19.157µs [redis.go:2904] 1666 | 2023/05/26 07:19:58.884709 juicefs[3791823] : Secret key is removed for the sake of safety [redis.go:3236] 1667 | Dumped entries count: 5 / 5 [==============================================================] done 1668 | 2023/05/26 07:19:58.885830 juicefs[3791823] : Dump metadata into meta-dump.json succeed [dump.go:76] 1669 | ``` 1670 | 1671 | ### JuiceFS Benchmarks 100x R2 Sharded Mount + Redis Metadata Caching 1672 | 1673 | Format JuiceFS sharded mount to use Redis metadata caching via `redis://:password@localhost:6479/1` 1674 | 1675 | ``` 1676 | cfaccountid='CF_ACCOUNT_ID' 1677 | cfaccesskey='' 1678 | cfsecretkey='' 1679 | cfbucketname='juicefs-shard' 1680 | 1681 | mkdir -p /home/juicefs 1682 | cd /home/juicefs 1683 | 1684 | juicefs format --storage s3 \ 1685 | --shards 100 \ 1686 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 1687 | --access-key $cfaccesskey \ 1688 | --secret-key $cfsecretkey \ 1689 | --compress none \ 1690 | --trash-days 0 \ 1691 | --block-size 4096 \ 1692 | redis://:password@localhost:6479/1 myjuicefs 1693 | ``` 1694 | 1695 | Edit `/usr/lib/systemd/system/juicefs.service` raise to `--max-uploads 800` and `--max-deletes 100`. Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) with `--backup-meta 0`. Otherwise, for non-R2 S3 providers, you can set `--backup-meta 1h`. 1696 | 1697 | ``` 1698 | [Unit] 1699 | Description=JuiceFS 1700 | AssertPathIsDirectory=/home/juicefs_mount 1701 | After=network-online.target 1702 | 1703 | [Service] 1704 | Type=simple 1705 | WorkingDirectory=/home/juicefs 1706 | ExecStart=/usr/local/bin/juicefs mount \ 1707 | "redis://:password@localhost:6479/1" \ 1708 | /home/juicefs_mount \ 1709 | --no-usage-report \ 1710 | --writeback \ 1711 | --cache-size 102400 \ 1712 | --cache-dir /home/juicefs_cache \ 1713 | --buffer-size 3072 \ 1714 | --open-cache 0 \ 1715 | --attr-cache 1 \ 1716 | --entry-cache 1 \ 1717 | --dir-entry-cache 1 \ 1718 | --cache-partial-only false \ 1719 | --free-space-ratio 0.1 \ 1720 | --max-uploads 800 \ 1721 | --max-deletes 100 \ 1722 | --backup-meta 0 \ 1723 | --log /var/log/juicefs.log \ 1724 | --get-timeout 300 \ 1725 | --put-timeout 900 \ 1726 | --io-retries 90 \ 1727 | --prefetch 2 1728 | 1729 | ExecStop=/usr/local/bin/juicefs umount /home/juicefs_mount 1730 | Restart=always 1731 | RestartSec=5 1732 | 1733 | [Install] 1734 | WantedBy=default.target 1735 | ``` 1736 | 1737 | The table below shows comparison between [100x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-100x-r2-sharded-mount--redis-metadata-caching) vs [61x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-61x-r2-sharded-mount--redis-metadata-caching) vs [21x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-21x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-10x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) vs [1x Cloudflare JuiceFS mount (default)](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1). All R2 storage locations are with location hint North American East. 1738 | 1739 | Default 1024MB big file. 1740 | 1741 | | ITEM | VALUE (100x R2 Sharded + Redis) | COST (100x R2 Sharded + Redis) | VALUE (61x R2 Sharded + Redis) | COST (61x R2 Sharded + Redis) | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 1742 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 1743 | | Write big file | 1103.70 MiB/s | 7.42 s/file | 1778.16 MiB/s | 2.30 s/file | 1774.18 MiB/s | 2.31 s/file | 1904.61 MiB/s | 2.15 s/file | 906.04 MiB/s | 4.52 s/file | 1374.08 MiB/s | 2.98 s/file | 1744 | | Read big file | 342.66 MiB/s | 23.91 s/file | 231.92 MiB/s | 17.66 s/file | 162.36 MiB/s | 25.23 s/file | 201.00 MiB/s | 20.38 s/file | 223.19 MiB/s | 18.35 s/file | 152.23 MiB/s | 26.91 s/file | 1745 | | Write small file | 2505.3 files/s | 3.19 ms/file | 2449.2 files/s | 1.63 ms/file | 2333.5 files/s | 1.71 ms/file | 1319.8 files/s | 3.03 ms/file | 701.2 files/s | 5.70 ms/file | 780.3 files/s | 5.13 ms/file | 1746 | | Read small file | 13321.4 files/s | 0.60 ms/file | 5997.6 files/s | 0.67 ms/file | 10382.7 files/s | 0.39 ms/file | 10279.8 files/s | 0.39 ms/file | 6378.3 files/s | 0.63 ms/file | 8000.9 files/s | 0.50 ms/file | 1747 | | Stat file | 16229.5 files/s | 0.49 ms/file | 38302.2 files/s | 0.10 ms/file | 15955.7 files/s | 0.25 ms/file | 15890.1 files/s | 0.25 ms/file | 21123.7 files/s | 0.19 ms/file | 27902.2 files/s | 0.14 ms/file | 1748 | | FUSE operation | 142585 operations | 2.51 ms/op | 71292 operations | 1.80 ms/op | 71319 operations | 2.79 ms/op | 71338 operations | 2.23 ms/op | 71555 operations | 2.16 ms/op | 71649 operations | 3.06 ms/op | 1749 | | Update meta | 3473 operations | 1.98 ms/op | 1740 operations | 0.25 ms/op | 1739 operations | 0.25 ms/op | 1740 operations | 0.27 ms/op | 6271 operations | 9.01 ms/op | 6057 operations | 2.50 ms/op | 1750 | | Put object | 2847 operations | 470.88 ms/op | 1087 operations | 466.15 ms/op | 1055 operations | 514.85 ms/op | 1083 operations | 390.88 ms/op | 1152 operations | 403.23 ms/op | 1106 operations | 547.32 ms/op | 1751 | | Get object | 2048 operations | 402.89 ms/op | 1024 operations | 319.02 ms/op | 1027 operations | 346.44 ms/op | 1024 operations | 294.63 ms/op | 1034 operations | 278.61 ms/op | 1030 operations | 301.80 ms/op | 1752 | | Delete object | 440 operations | 174.48 ms/op | 215 operations | 201.12 ms/op | 736 operations | 195.40 ms/op | 754 operations | 125.28 ms/op | 316 operations | 124.32 ms/op | 29 operations | 234.02 ms/op | 1753 | | Write into cache | 2848 operations | 13.31 ms/op | 1424 operations | 5.36 ms/op | 1424 operations | 7.19 ms/op | 1424 operations | 4.85 ms/op | 1424 operations | 24 ms/op | 1424 operations | 7.19 ms/op | 1754 | | Read from cache | 800 operations | 0.09 ms/op | 400 operations | 0.07 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.04 ms/op | 1755 | 1756 | Default 1MB big file. 1757 | 1758 | | ITEM | VALUE (100x R2 Sharded + Redis) | COST (100x R2 Sharded + Redis) | VALUE (61x R2 Sharded + Redis) | COST (61x R2 Sharded + Redis) | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (1x R2 Default) | COST (1x R2 Default) | 1759 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 1760 | | Write big file | 637.16 MiB/s | 0.01 s/file | 617.15 MiB/s | 0.01 s/file | 600.01 MiB/s | 0.01 s/file | 530.10 MiB/s | 0.01 s/file | 230.82 MiB/s | 0.02 s/file | 1761 | | Read big file | 1764.73 MiB/s | 0.00 s/file | 1600.85 MiB/s | 0.00 s/file | 1300.69 MiB/s | 0.00 s/file | 1914.40 MiB/s | 0.00 s/file | 1276.38 MiB/s | 0.00 s/file | 1762 | | Write small file | 2666.9 files/s | 3.00 ms/file | 2808.3 files/s | 1.42 ms/file | 2648.3 files/s | 1.51 ms/file | 2715.4 files/s | 1.47 ms/file | 675.7 files/s | 5.92 ms/file | 1763 | | Read small file | 10905.8 files/s | 0.73 ms/file | 10154.0 files/s | 0.39 ms/file | 10442.4 files/s | 0.38 ms/file | 10069.0 files/s | 0.40 ms/file | 7833.1 files/s | 0.51 ms/file | 1764 | | Stat file | 22475.0 files/s | 0.36 ms/file | 15935.2 files/s | 0.25 ms/file | 16277.5 files/s | 0.25 ms/file | 16545.3 files/s | 0.24 ms/file | 28226.1 files/s | 0.14 ms/file | 1765 | | FUSE operation | 11485 operations | 0.12 ms/op | 5761 operations | 0.09 ms/op | 5765 operations | 0.09 ms/op | 5767 operations | 0.09 ms/op | 5756 operations | 0.41 ms/op | 1766 | | Update meta | 3233 operations | 0.19 ms/op | 1617 operations | 0.19 ms/op | 1617 operations | 0.18 ms/op | 1617 operations | 0.19 ms/op | 5770 operations | 0.70 ms/op | 1767 | | Put object | 735 operations | 344.93 ms/op | 32 operations | 377.01 ms/op | 30 operations | 369.65 ms/op | 37 operations | 290.94 ms/op | 118 operations | 242.35 ms/op | 1768 | | Get object | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 1769 | | Delete object | 117 operations | 123.92 ms/op | 76 operations | 189.67 ms/op | 22 operations | 268.03 ms/op | 48 operations | 103.83 ms/op | 95 operations | 83.94 ms/op | 1770 | | Write into cache | 808 operations | 0.13 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.14 ms/op | 1771 | | Read from cache | 816 operations | 0.13 ms/op | 408 operations | 0.07 ms/op | 408 operations | 0.08 ms/op | 408 operations | 0.06 ms/op | 408 operations | 0.06 ms/op | 1772 | 1773 | 100x R2 sharded JuiceFS mount with Redis metadata caching with location hint North American East 1774 | 1775 | Default 1024MB big file. 1776 | 1777 | ``` 1778 | juicefs bench -p 8 /home/juicefs_mount/ 1779 | Write big blocks count: 8192 / 8192 [===========================================================] done 1780 | Read big blocks count: 8192 / 8192 [===========================================================] done 1781 | Write small blocks count: 800 / 800 [=============================================================] done 1782 | Read small blocks count: 800 / 800 [=============================================================] done 1783 | Stat small files count: 800 / 800 [=============================================================] done 1784 | Benchmark finished! 1785 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 8 1786 | Time used: 33.5 s, CPU: 238.9%, Memory: 1687.5 MiB 1787 | +------------------+-------------------+--------------+ 1788 | | ITEM | VALUE | COST | 1789 | +------------------+-------------------+--------------+ 1790 | | Write big file | 1103.70 MiB/s | 7.42 s/file | 1791 | | Read big file | 342.66 MiB/s | 23.91 s/file | 1792 | | Write small file | 2505.3 files/s | 3.19 ms/file | 1793 | | Read small file | 13321.4 files/s | 0.60 ms/file | 1794 | | Stat file | 16229.5 files/s | 0.49 ms/file | 1795 | | FUSE operation | 142585 operations | 2.51 ms/op | 1796 | | Update meta | 3473 operations | 1.98 ms/op | 1797 | | Put object | 2847 operations | 470.88 ms/op | 1798 | | Get object | 2048 operations | 402.89 ms/op | 1799 | | Delete object | 440 operations | 174.48 ms/op | 1800 | | Write into cache | 2848 operations | 13.31 ms/op | 1801 | | Read from cache | 800 operations | 0.09 ms/op | 1802 | +------------------+-------------------+--------------+ 1803 | ``` 1804 | 1805 | Default 1MB big file. 1806 | 1807 | ``` 1808 | juicefs bench -p 8 /home/juicefs_mount/ --big-file-size 1 1809 | Write big blocks count: 8 / 8 [==============================================================] done 1810 | Read big blocks count: 8 / 8 [==============================================================] done 1811 | Write small blocks count: 800 / 800 [=============================================================] done 1812 | Read small blocks count: 800 / 800 [=============================================================] done 1813 | Stat small files count: 800 / 800 [=============================================================] done 1814 | Benchmark finished! 1815 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 8 1816 | Time used: 0.8 s, CPU: 210.4%, Memory: 605.2 MiB 1817 | +------------------+------------------+--------------+ 1818 | | ITEM | VALUE | COST | 1819 | +------------------+------------------+--------------+ 1820 | | Write big file | 637.16 MiB/s | 0.01 s/file | 1821 | | Read big file | 1764.73 MiB/s | 0.00 s/file | 1822 | | Write small file | 2666.9 files/s | 3.00 ms/file | 1823 | | Read small file | 10905.8 files/s | 0.73 ms/file | 1824 | | Stat file | 22475.0 files/s | 0.36 ms/file | 1825 | | FUSE operation | 11485 operations | 0.12 ms/op | 1826 | | Update meta | 3233 operations | 0.19 ms/op | 1827 | | Put object | 735 operations | 344.93 ms/op | 1828 | | Get object | 0 operations | 0.00 ms/op | 1829 | | Delete object | 117 operations | 123.92 ms/op | 1830 | | Write into cache | 808 operations | 0.13 ms/op | 1831 | | Read from cache | 816 operations | 0.13 ms/op | 1832 | +------------------+------------------+--------------+ 1833 | ``` 1834 | 1835 | 100x Cloudflare R2 buckets info using `sharded-buckets-info-r2.sh` passing AWS CLI profile `r2` with R2 bucket prefix `juicefs-shard-` for `100` sharded buckets and `https://cfaccountid.r2.cloudflarestorage.com` R2 endpoint. 1836 | 1837 | ``` 1838 | ./sharded-buckets-info-r2.sh r2 juicefs-shard- 100 https://cfaccountid.r2.cloudflarestorage.com 1839 | Bucket: juicefs-shard-0, Total Files: 34, Total Size: 4456448 1840 | Bucket: juicefs-shard-1, Total Files: 22, Total Size: 2627642 1841 | Bucket: juicefs-shard-2, Total Files: 28, Total Size: 3670016 1842 | Bucket: juicefs-shard-3, Total Files: 31, Total Size: 3935716 1843 | Bucket: juicefs-shard-4, Total Files: 28, Total Size: 3670016 1844 | Bucket: juicefs-shard-5, Total Files: 39, Total Size: 5111808 1845 | Bucket: juicefs-shard-6, Total Files: 35, Total Size: 4459354 1846 | Bucket: juicefs-shard-7, Total Files: 36, Total Size: 5506235 1847 | Bucket: juicefs-shard-8, Total Files: 140, Total Size: 20185088 1848 | Bucket: juicefs-shard-9, Total Files: 59, Total Size: 7733248 1849 | Bucket: juicefs-shard-10, Total Files: 50, Total Size: 6553600 1850 | Bucket: juicefs-shard-11, Total Files: 40, Total Size: 5242880 1851 | Bucket: juicefs-shard-12, Total Files: 63, Total Size: 10092544 1852 | Bucket: juicefs-shard-13, Total Files: 28, Total Size: 3670016 1853 | Bucket: juicefs-shard-14, Total Files: 31, Total Size: 4063232 1854 | Bucket: juicefs-shard-15, Total Files: 72, Total Size: 9437184 1855 | Bucket: juicefs-shard-16, Total Files: 37, Total Size: 4849664 1856 | Bucket: juicefs-shard-17, Total Files: 41, Total Size: 5373952 1857 | Bucket: juicefs-shard-18, Total Files: 123, Total Size: 18874368 1858 | Bucket: juicefs-shard-19, Total Files: 39, Total Size: 6029312 1859 | Bucket: juicefs-shard-20, Total Files: 30, Total Size: 3932160 1860 | Bucket: juicefs-shard-21, Total Files: 30, Total Size: 3932160 1861 | Bucket: juicefs-shard-22, Total Files: 42, Total Size: 5505024 1862 | Bucket: juicefs-shard-23, Total Files: 173, Total Size: 23592960 1863 | Bucket: juicefs-shard-24, Total Files: 42, Total Size: 5505024 1864 | Bucket: juicefs-shard-25, Total Files: 41, Total Size: 5373952 1865 | Bucket: juicefs-shard-26, Total Files: 33, Total Size: 4198517 1866 | Bucket: juicefs-shard-27, Total Files: 35, Total Size: 6422528 1867 | Bucket: juicefs-shard-28, Total Files: 62, Total Size: 8126464 1868 | Bucket: juicefs-shard-29, Total Files: 34, Total Size: 4327957 1869 | Bucket: juicefs-shard-30, Total Files: 28, Total Size: 3670016 1870 | Bucket: juicefs-shard-31, Total Files: 40, Total Size: 5242880 1871 | Bucket: juicefs-shard-32, Total Files: 32, Total Size: 4194304 1872 | Bucket: juicefs-shard-33, Total Files: 76, Total Size: 9961472 1873 | Bucket: juicefs-shard-34, Total Files: 31, Total Size: 4063232 1874 | Bucket: juicefs-shard-35, Total Files: 45, Total Size: 6685553 1875 | Bucket: juicefs-shard-36, Total Files: 41, Total Size: 5242916 1876 | Bucket: juicefs-shard-37, Total Files: 29, Total Size: 3801088 1877 | Bucket: juicefs-shard-38, Total Files: 123, Total Size: 16911519 1878 | Bucket: juicefs-shard-39, Total Files: 47, Total Size: 6160384 1879 | Bucket: juicefs-shard-40, Total Files: 27, Total Size: 3538944 1880 | Bucket: juicefs-shard-41, Total Files: 46, Total Size: 5899407 1881 | Bucket: juicefs-shard-42, Total Files: 34, Total Size: 4326157 1882 | Bucket: juicefs-shard-43, Total Files: 34, Total Size: 4456448 1883 | Bucket: juicefs-shard-44, Total Files: 42, Total Size: 6422528 1884 | Bucket: juicefs-shard-45, Total Files: 159, Total Size: 21757952 1885 | Bucket: juicefs-shard-46, Total Files: 45, Total Size: 5898240 1886 | Bucket: juicefs-shard-47, Total Files: 56, Total Size: 8257536 1887 | Bucket: juicefs-shard-48, Total Files: 40, Total Size: 5242880 1888 | Bucket: juicefs-shard-49, Total Files: 51, Total Size: 6684672 1889 | Bucket: juicefs-shard-50, Total Files: 29, Total Size: 3801088 1890 | Bucket: juicefs-shard-51, Total Files: 179, Total Size: 23461888 1891 | Bucket: juicefs-shard-52, Total Files: 40, Total Size: 5112935 1892 | Bucket: juicefs-shard-53, Total Files: 138, Total Size: 19795756 1893 | Bucket: juicefs-shard-54, Total Files: 62, Total Size: 8914826 1894 | Bucket: juicefs-shard-55, Total Files: 41, Total Size: 6291456 1895 | Bucket: juicefs-shard-56, Total Files: 72, Total Size: 9437184 1896 | Bucket: juicefs-shard-57, Total Files: 45, Total Size: 6815744 1897 | Bucket: juicefs-shard-58, Total Files: 32, Total Size: 4064159 1898 | Bucket: juicefs-shard-59, Total Files: 38, Total Size: 6685639 1899 | Bucket: juicefs-shard-60, Total Files: 34, Total Size: 4456448 1900 | Bucket: juicefs-shard-61, Total Files: 41, Total Size: 5373952 1901 | Bucket: juicefs-shard-62, Total Files: 34, Total Size: 4329917 1902 | Bucket: juicefs-shard-63, Total Files: 61, Total Size: 8912896 1903 | Bucket: juicefs-shard-64, Total Files: 50, Total Size: 7471104 1904 | Bucket: juicefs-shard-65, Total Files: 37, Total Size: 4849664 1905 | Bucket: juicefs-shard-66, Total Files: 38, Total Size: 4980736 1906 | Bucket: juicefs-shard-67, Total Files: 119, Total Size: 16515072 1907 | Bucket: juicefs-shard-68, Total Files: 115, Total Size: 15990784 1908 | Bucket: juicefs-shard-69, Total Files: 39, Total Size: 5111808 1909 | Bucket: juicefs-shard-70, Total Files: 63, Total Size: 9966336 1910 | Bucket: juicefs-shard-71, Total Files: 44, Total Size: 5767168 1911 | Bucket: juicefs-shard-72, Total Files: 31, Total Size: 4063232 1912 | Bucket: juicefs-shard-73, Total Files: 42, Total Size: 5244609 1913 | Bucket: juicefs-shard-74, Total Files: 25, Total Size: 3276800 1914 | Bucket: juicefs-shard-75, Total Files: 47, Total Size: 6030141 1915 | Bucket: juicefs-shard-76, Total Files: 69, Total Size: 9043968 1916 | Bucket: juicefs-shard-77, Total Files: 54, Total Size: 7995392 1917 | Bucket: juicefs-shard-78, Total Files: 128, Total Size: 16518412 1918 | Bucket: juicefs-shard-79, Total Files: 104, Total Size: 13631488 1919 | Bucket: juicefs-shard-80, Total Files: 77, Total Size: 10092544 1920 | Bucket: juicefs-shard-81, Total Files: 49, Total Size: 6422528 1921 | Bucket: juicefs-shard-82, Total Files: 54, Total Size: 7077888 1922 | Bucket: juicefs-shard-83, Total Files: 84, Total Size: 11010048 1923 | Bucket: juicefs-shard-84, Total Files: 96, Total Size: 12582912 1924 | Bucket: juicefs-shard-85, Total Files: 50, Total Size: 7471104 1925 | Bucket: juicefs-shard-86, Total Files: 32, Total Size: 4194304 1926 | Bucket: juicefs-shard-87, Total Files: 41, Total Size: 6291456 1927 | Bucket: juicefs-shard-88, Total Files: 81, Total Size: 10616832 1928 | Bucket: juicefs-shard-89, Total Files: 114, Total Size: 14942208 1929 | Bucket: juicefs-shard-90, Total Files: 37, Total Size: 5767168 1930 | Bucket: juicefs-shard-91, Total Files: 54, Total Size: 7077888 1931 | Bucket: juicefs-shard-92, Total Files: 62, Total Size: 8126464 1932 | Bucket: juicefs-shard-93, Total Files: 56, Total Size: 7340032 1933 | Bucket: juicefs-shard-94, Total Files: 35, Total Size: 4587520 1934 | Bucket: juicefs-shard-95, Total Files: 46, Total Size: 6946816 1935 | Bucket: juicefs-shard-96, Total Files: 41, Total Size: 6291456 1936 | Bucket: juicefs-shard-97, Total Files: 151, Total Size: 19791872 1937 | Bucket: juicefs-shard-98, Total Files: 92, Total Size: 12976128 1938 | Bucket: juicefs-shard-99, Total Files: 177, Total Size: 24117248 1939 | Total for all buckets, Total Files: 5804, Total Size: 792512215 1940 | ``` 1941 | 1942 | ### JuiceFS Benchmarks 61x R2 Sharded Mount + Redis Metadata Caching 1943 | 1944 | Format JuiceFS sharded mount to use Redis metadata caching via `redis://:password@localhost:6479/1` 1945 | 1946 | ``` 1947 | cfaccountid='CF_ACCOUNT_ID' 1948 | cfaccesskey='' 1949 | cfsecretkey='' 1950 | cfbucketname='juicefs-shard' 1951 | 1952 | mkdir -p /home/juicefs 1953 | cd /home/juicefs 1954 | 1955 | juicefs format --storage s3 \ 1956 | --shards 61 \ 1957 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 1958 | --access-key $cfaccesskey \ 1959 | --secret-key $cfsecretkey \ 1960 | --compress none \ 1961 | --trash-days 0 \ 1962 | --block-size 4096 \ 1963 | redis://:password@localhost:6479/1 myjuicefs 1964 | ``` 1965 | 1966 | Edit `/usr/lib/systemd/system/juicefs.service` raise `--max-uploads 20` and `--max-deletes 10` values to `--max-uploads 30` and `--max-deletes 30`. Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) with `--backup-meta 0`. Otherwise, for non-R2 S3 providers, you can set `--backup-meta 1h`. 1967 | 1968 | ``` 1969 | [Unit] 1970 | Description=JuiceFS 1971 | AssertPathIsDirectory=/home/juicefs_mount 1972 | After=network-online.target 1973 | 1974 | [Service] 1975 | Type=simple 1976 | WorkingDirectory=/home/juicefs 1977 | ExecStart=/usr/local/bin/juicefs mount \ 1978 | "redis://:password@localhost:6479/1" \ 1979 | /home/juicefs_mount \ 1980 | --no-usage-report \ 1981 | --writeback \ 1982 | --cache-size 102400 \ 1983 | --cache-dir /home/juicefs_cache \ 1984 | --buffer-size 2048 \ 1985 | --open-cache 0 \ 1986 | --attr-cache 1 \ 1987 | --entry-cache 1 \ 1988 | --dir-entry-cache 1 \ 1989 | --cache-partial-only false \ 1990 | --free-space-ratio 0.1 \ 1991 | --max-uploads 30 \ 1992 | --max-deletes 30 \ 1993 | --backup-meta 0 \ 1994 | --log /var/log/juicefs.log \ 1995 | --get-timeout 300 \ 1996 | --put-timeout 900 \ 1997 | --io-retries 90 \ 1998 | --prefetch 1 1999 | 2000 | ExecStop=/usr/local/bin/juicefs umount /home/juicefs_mount 2001 | Restart=always 2002 | RestartSec=5 2003 | 2004 | [Install] 2005 | WantedBy=default.target 2006 | ``` 2007 | 2008 | The table below shows comparison between [61x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-61x-r2-sharded-mount--redis-metadata-caching) vs [21x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-21x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-10x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) vs [1x Cloudflare JuiceFS mount (default)](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1). All R2 storage locations are with location hint North American East. 2009 | 2010 | Default 1024MB big file. 2011 | 2012 | | ITEM | VALUE (61x R2 Sharded + Redis) | COST (61x R2 Sharded + Redis) | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2013 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2014 | | Write big file | 1778.16 MiB/s | 2.30 s/file | 1774.18 MiB/s | 2.31 s/file | 1904.61 MiB/s | 2.15 s/file | 906.04 MiB/s | 4.52 s/file | 1374.08 MiB/s | 2.98 s/file | 2015 | | Read big file | 231.92 MiB/s | 17.66 s/file | 162.36 MiB/s | 25.23 s/file | 201.00 MiB/s | 20.38 s/file | 223.19 MiB/s | 18.35 s/file | 152.23 MiB/s | 26.91 s/file | 2016 | | Write small file | 2449.2 files/s | 1.63 ms/file | 2333.5 files/s | 1.71 ms/file | 1319.8 files/s | 3.03 ms/file | 701.2 files/s | 5.70 ms/file | 780.3 files/s | 5.13 ms/file | 2017 | | Read small file | 5997.6 files/s | 0.67 ms/file | 10382.7 files/s | 0.39 ms/file | 10279.8 files/s | 0.39 ms/file | 6378.3 files/s | 0.63 ms/file | 8000.9 files/s | 0.50 ms/file | 2018 | | Stat file | 38302.2 files/s | 0.10 ms/file | 15955.7 files/s | 0.25 ms/file | 15890.1 files/s | 0.25 ms/file | 21123.7 files/s | 0.19 ms/file | 27902.2 files/s | 0.14 ms/file | 2019 | | FUSE operation | 71292 operations | 1.80 ms/op | 71319 operations | 2.79 ms/op | 71338 operations | 2.23 ms/op | 71555 operations | 2.16 ms/op | 71649 operations | 3.06 ms/op | 2020 | | Update meta | 1740 operations | 0.25 ms/op | 1739 operations | 0.25 ms/op | 1740 operations | 0.27 ms/op | 6271 operations | 9.01 ms/op | 6057 operations | 2.50 ms/op | 2021 | | Put object | 1087 operations | 466.15 ms/op | 1055 operations | 514.85 ms/op | 1083 operations | 390.88 ms/op | 1152 operations | 403.23 ms/op | 1106 operations | 547.32 ms/op | 2022 | | Get object | 1024 operations | 319.02 ms/op | 1027 operations | 346.44 ms/op | 1024 operations | 294.63 ms/op | 1034 operations | 278.61 ms/op | 1030 operations | 301.80 ms/op | 2023 | | Delete object | 215 operations | 201.12 ms/op | 736 operations | 195.40 ms/op | 754 operations | 125.28 ms/op | 316 operations | 124.32 ms/op | 29 operations | 234.02 ms/op | 2024 | | Write into cache | 1424 operations | 5.36 ms/op | 1424 operations | 7.19 ms/op | 1424 operations | 4.85 ms/op | 1424 operations | 24 ms/op | 1424 operations | 7.19 ms/op | 2025 | | Read from cache | 400 operations | 0.07 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.04 ms/op | 2026 | 2027 | Default 1MB big file. 2028 | 2029 | | ITEM | VALUE (61x R2 Sharded + Redis) | COST (61x R2 Sharded + Redis) | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2030 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2031 | | Write big file | 617.15 MiB/s | 0.01 s/file | 600.01 MiB/s | 0.01 s/file | 530.10 MiB/s | 0.01 s/file | 230.82 MiB/s | 0.02 s/file | 2032 | | Read big file | 1600.85 MiB/s | 0.00 s/file | 1300.69 MiB/s | 0.00 s/file | 1914.40 MiB/s | 0.00 s/file | 1276.38 MiB/s | 0.00 s/file | 2033 | | Write small file | 2808.3 files/s | 1.42 ms/file | 2648.3 files/s | 1.51 ms/file | 2715.4 files/s | 1.47 ms/file | 675.7 files/s | 5.92 ms/file | 2034 | | Read small file | 10154.0 files/s | 0.39 ms/file | 10442.4 files/s | 0.38 ms/file | 10069.0 files/s | 0.40 ms/file | 7833.1 files/s | 0.51 ms/file | 2035 | | Stat file | 15935.2 files/s | 0.25 ms/file | 16277.5 files/s | 0.25 ms/file | 16545.3 files/s | 0.24 ms/file | 28226.1 files/s | 0.14 ms/file | 2036 | | FUSE operation | 5761 operations | 0.09 ms/op | 5765 operations | 0.09 ms/op | 5767 operations | 0.09 ms/op | 5756 operations | 0.41 ms/op | 2037 | | Update meta | 1617 operations | 0.19 ms/op | 1617 operations | 0.18 ms/op | 1617 operations | 0.19 ms/op | 5770 operations | 0.70 ms/op | 2038 | | Put object | 32 operations | 377.01 ms/op | 30 operations | 369.65 ms/op | 37 operations | 290.94 ms/op | 118 operations | 242.35 ms/op | 2039 | | Get object | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 2040 | | Delete object | 76 operations | 189.67 ms/op | 22 operations | 268.03 ms/op | 48 operations | 103.83 ms/op | 95 operations | 83.94 ms/op | 2041 | | Write into cache | 404 operations | 0.11 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.14 ms/op | 2042 | | Read from cache | 408 operations | 0.07 ms/op | 408 operations | 0.08 ms/op | 408 operations | 0.06 ms/op | 408 operations | 0.06 ms/op | 2043 | 2044 | 61x R2 sharded JuiceFS mount with Redis metadata caching with location hint North American East 2045 | 2046 | Default 1024MB big file. 2047 | 2048 | ``` 2049 | juicefs bench -p 4 /home/juicefs_mount/ 2050 | Write big blocks count: 4096 / 4096 [===========================================================] done 2051 | Read big blocks count: 4096 / 4096 [===========================================================] done 2052 | Write small blocks count: 400 / 400 [=============================================================] done 2053 | Read small blocks count: 400 / 400 [=============================================================] done 2054 | Stat small files count: 400 / 400 [=============================================================] done 2055 | Benchmark finished! 2056 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2057 | Time used: 21.4 s, CPU: 150.4%, Memory: 711.2 MiB 2058 | +------------------+------------------+--------------+ 2059 | | ITEM | VALUE | COST | 2060 | +------------------+------------------+--------------+ 2061 | | Write big file | 1778.16 MiB/s | 2.30 s/file | 2062 | | Read big file | 231.92 MiB/s | 17.66 s/file | 2063 | | Write small file | 2449.2 files/s | 1.63 ms/file | 2064 | | Read small file | 5997.6 files/s | 0.67 ms/file | 2065 | | Stat file | 38302.2 files/s | 0.10 ms/file | 2066 | | FUSE operation | 71292 operations | 1.80 ms/op | 2067 | | Update meta | 1740 operations | 0.25 ms/op | 2068 | | Put object | 1087 operations | 466.15 ms/op | 2069 | | Get object | 1024 operations | 319.02 ms/op | 2070 | | Delete object | 215 operations | 201.12 ms/op | 2071 | | Write into cache | 1424 operations | 5.36 ms/op | 2072 | | Read from cache | 400 operations | 0.07 ms/op | 2073 | +------------------+------------------+--------------+ 2074 | ``` 2075 | 2076 | Default 1MB big file. 2077 | 2078 | ``` 2079 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 2080 | Write big blocks count: 4 / 4 [==============================================================] done 2081 | Read big blocks count: 4 / 4 [==============================================================] done 2082 | Write small blocks count: 400 / 400 [=============================================================] done 2083 | Read small blocks count: 400 / 400 [=============================================================] done 2084 | Stat small files count: 400 / 400 [=============================================================] done 2085 | Benchmark finished! 2086 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2087 | Time used: 0.5 s, CPU: 108.9%, Memory: 137.7 MiB 2088 | +------------------+-----------------+--------------+ 2089 | | ITEM | VALUE | COST | 2090 | +------------------+-----------------+--------------+ 2091 | | Write big file | 617.15 MiB/s | 0.01 s/file | 2092 | | Read big file | 1600.85 MiB/s | 0.00 s/file | 2093 | | Write small file | 2808.3 files/s | 1.42 ms/file | 2094 | | Read small file | 10154.0 files/s | 0.39 ms/file | 2095 | | Stat file | 15935.2 files/s | 0.25 ms/file | 2096 | | FUSE operation | 5761 operations | 0.09 ms/op | 2097 | | Update meta | 1617 operations | 0.19 ms/op | 2098 | | Put object | 32 operations | 377.01 ms/op | 2099 | | Get object | 0 operations | 0.00 ms/op | 2100 | | Delete object | 76 operations | 189.67 ms/op | 2101 | | Write into cache | 404 operations | 0.11 ms/op | 2102 | | Read from cache | 408 operations | 0.07 ms/op | 2103 | +------------------+-----------------+--------------+ 2104 | ``` 2105 | 2106 | ### JuiceFS Benchmarks 21x R2 Sharded Mount + Redis Metadata Caching 2107 | 2108 | 2109 | Format JuiceFS sharded mount to use Redis metadata caching via `redis://:password@localhost:6479/1` 2110 | 2111 | ``` 2112 | cfaccountid='CF_ACCOUNT_ID' 2113 | cfaccesskey='' 2114 | cfsecretkey='' 2115 | cfbucketname='juicefs-shard' 2116 | 2117 | mkdir -p /home/juicefs 2118 | cd /home/juicefs 2119 | 2120 | juicefs format --storage s3 \ 2121 | --shards 21 \ 2122 | --bucket https://${cfbucketname}-%d.${cfaccountid}.r2.cloudflarestorage.com \ 2123 | --access-key $cfaccesskey \ 2124 | --secret-key $cfsecretkey \ 2125 | --compress none \ 2126 | --trash-days 0 \ 2127 | --block-size 4096 \ 2128 | redis://:password@localhost:6479/1 myjuicefs 2129 | ``` 2130 | 2131 | The table below shows comparison between [21x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-21x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-10x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) vs [1x Cloudflare JuiceFS mount (default)](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1). All R2 storage locations are with location hint North American East. 2132 | 2133 | Default 1024MB big file. 2134 | 2135 | | ITEM | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2136 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2137 | | Write big file | 1774.18 MiB/s | 2.31 s/file | 1904.61 MiB/s | 2.15 s/file | 906.04 MiB/s | 4.52 s/file | 1374.08 MiB/s | 2.98 s/file | 2138 | | Read big file | 162.36 MiB/s | 25.23 s/file | 201.00 MiB/s | 20.38 s/file | 223.19 MiB/s | 18.35 s/file | 152.23 MiB/s | 26.91 s/file | 2139 | | Write small file | 2333.5 files/s | 1.71 ms/file | 1319.8 files/s | 3.03 ms/file | 701.2 files/s | 5.70 ms/file | 780.3 files/s | 5.13 ms/file | 2140 | | Read small file | 10382.7 files/s | 0.39 ms/file | 10279.8 files/s | 0.39 ms/file | 6378.3 files/s | 0.63 ms/file | 8000.9 files/s | 0.50 ms/file | 2141 | | Stat file | 15955.7 files/s | 0.25 ms/file | 15890.1 files/s | 0.25 ms/file | 21123.7 files/s | 0.19 ms/file | 27902.2 files/s | 0.14 ms/file | 2142 | | FUSE operation | 71319 operations | 2.79 ms/op | 71338 operations | 2.23 ms/op | 71555 operations | 2.16 ms/op | 71649 operations | 3.06 ms/op | 2143 | | Update meta | 1739 operations | 0.25 ms/op | 1740 operations | 0.27 ms/op | 6271 operations | 9.01 ms/op | 6057 operations | 2.50 ms/op | 2144 | | Put object | 1055 operations | 514.85 ms/op | 1083 operations | 390.88 ms/op | 1152 operations | 403.23 ms/op | 1106 operations | 547.32 ms/op | 2145 | | Get object | 1027 operations | 346.44 ms/op | 1024 operations | 294.63 ms/op | 1034 operations | 278.61 ms/op | 1030 operations | 301.80 ms/op | 2146 | | Delete object | 736 operations | 195.40 ms/op | 754 operations | 125.28 ms/op | 316 operations | 124.32 ms/op | 29 operations | 234.02 ms/op | 2147 | | Write into cache | 1424 operations | 7.19 ms/op | 1424 operations | 4.85 ms/op | 1424 operations | 24 ms/op | 1424 operations | 7.19 ms/op | 2148 | | Read from cache | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.04 ms/op | 2149 | 2150 | Default 1MB big file. 2151 | 2152 | | ITEM | VALUE (21x R2 Sharded + Redis) | COST (21x R2 Sharded + Redis) | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2153 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2154 | | Write big file | 600.01 MiB/s | 0.01 s/file | 530.10 MiB/s | 0.01 s/file | 452.66 MiB/s | 0.01 s/file | 230.82 MiB/s | 0.02 s/file | 2155 | | Read big file | 1300.69 MiB/s | 0.00 s/file | 1914.40 MiB/s | 0.00 s/file | 1545.95 MiB/s | 0.00 s/file | 1276.38 MiB/s | 0.00 s/file | 2156 | | Write small file | 2648.3 files/s | 1.51 ms/file | 2715.4 files/s | 1.47 ms/file | 682.8 files/s | 5.86 ms/file | 675.7 files/s | 5.92 ms/file | 2157 | | Read small file | 10442.4 files/s | 0.38 ms/file | 10069.0 files/s | 0.40 ms/file | 6299.4 files/s | 0.63 ms/file | 7833.1 files/s | 0.51 ms/file | 2158 | | Stat file | 16277.5 files/s | 0.25 ms/file | 16545.3 files/s | 0.24 ms/file | 21365.2 files/s | 0.19 ms/file | 28226.1 files/s | 0.14 ms/file | 2159 | | FUSE operation | 5765 operations | 0.09 ms/op | 5767 operations | 0.09 ms/op | 5757 operations | 0.42 ms/op | 5756 operations | 0.41 ms/op | 2160 | | Update meta | 1617 operations | 0.18 ms/op | 1617 operations | 0.19 ms/op | 5814 operations | 0.72 ms/op | 5770 operations | 0.70 ms/op | 2161 | | Put object | 30 operations | 369.65 ms/op | 37 operations | 290.94 ms/op | 107 operations | 282.68 ms/op | 118 operations | 242.35 ms/op | 2162 | | Get object | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 2163 | | Delete object | 22 operations | 268.03 ms/op | 48 operations | 103.83 ms/op | 133 operations | 116.84 ms/op | 95 operations | 83.94 ms/op | 2164 | | Write into cache | 404 operations | 0.11 ms/op | 404 operations | 0.11 ms/op | 404 operations | 0.12 ms/op | 404 operations | 0.14 ms/op | 2165 | | Read from cache | 408 operations | 0.08 ms/op | 408 operations | 0.06 ms/op | 408 operations | 0.06 ms/op | 408 operations | 0.06 ms/op | 2166 | 2167 | 21x R2 sharded JuiceFS mount with Redis metadata caching with location hint North American East 2168 | 2169 | Default 1024MB big file. 2170 | 2171 | ``` 2172 | juicefs bench -p 4 /home/juicefs_mount/ 2173 | Write big blocks count: 4096 / 4096 [===========================================================] done 2174 | Read big blocks count: 4096 / 4096 [===========================================================] done 2175 | Write small blocks count: 400 / 400 [=============================================================] done 2176 | Read small blocks count: 400 / 400 [=============================================================] done 2177 | Stat small files count: 400 / 400 [=============================================================] done 2178 | Benchmark finished! 2179 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2180 | Time used: 29.0 s, CPU: 107.6%, Memory: 799.9 MiB 2181 | +------------------+------------------+--------------+ 2182 | | ITEM | VALUE | COST | 2183 | +------------------+------------------+--------------+ 2184 | | Write big file | 1774.18 MiB/s | 2.31 s/file | 2185 | | Read big file | 162.36 MiB/s | 25.23 s/file | 2186 | | Write small file | 2333.5 files/s | 1.71 ms/file | 2187 | | Read small file | 10382.7 files/s | 0.39 ms/file | 2188 | | Stat file | 15955.7 files/s | 0.25 ms/file | 2189 | | FUSE operation | 71319 operations | 2.79 ms/op | 2190 | | Update meta | 1739 operations | 0.25 ms/op | 2191 | | Put object | 1055 operations | 514.85 ms/op | 2192 | | Get object | 1027 operations | 346.44 ms/op | 2193 | | Delete object | 736 operations | 195.40 ms/op | 2194 | | Write into cache | 1424 operations | 7.19 ms/op | 2195 | | Read from cache | 400 operations | 0.05 ms/op | 2196 | +------------------+------------------+--------------+ 2197 | ``` 2198 | 2199 | Default 1MB big file. 2200 | 2201 | ``` 2202 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 2203 | Write big blocks count: 4 / 4 [==============================================================] done 2204 | Read big blocks count: 4 / 4 [==============================================================] done 2205 | Write small blocks count: 400 / 400 [=============================================================] done 2206 | Read small blocks count: 400 / 400 [=============================================================] done 2207 | Stat small files count: 400 / 400 [=============================================================] done 2208 | Benchmark finished! 2209 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2210 | Time used: 0.6 s, CPU: 86.1%, Memory: 121.1 MiB 2211 | +------------------+-----------------+--------------+ 2212 | | ITEM | VALUE | COST | 2213 | +------------------+-----------------+--------------+ 2214 | | Write big file | 600.01 MiB/s | 0.01 s/file | 2215 | | Read big file | 1300.69 MiB/s | 0.00 s/file | 2216 | | Write small file | 2648.3 files/s | 1.51 ms/file | 2217 | | Read small file | 10442.4 files/s | 0.38 ms/file | 2218 | | Stat file | 16277.5 files/s | 0.25 ms/file | 2219 | | FUSE operation | 5765 operations | 0.09 ms/op | 2220 | | Update meta | 1617 operations | 0.18 ms/op | 2221 | | Put object | 30 operations | 369.65 ms/op | 2222 | | Get object | 0 operations | 0.00 ms/op | 2223 | | Delete object | 22 operations | 268.03 ms/op | 2224 | | Write into cache | 404 operations | 0.11 ms/op | 2225 | | Read from cache | 408 operations | 0.08 ms/op | 2226 | +------------------+-----------------+--------------+ 2227 | ``` 2228 | 2229 | ``` 2230 | fio --name=sequential-write --directory=/home/juicefs_mount/fio --rw=write --refill_buffers --bs=4M --size=1G --end_fsync=1 2231 | sequential-write: (g=0): rw=write, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 2232 | fio-3.19 2233 | Starting 1 process 2234 | sequential-write: Laying out IO file (1 file / 1024MiB) 2235 | Jobs: 1 (f=1) 2236 | sequential-write: (groupid=0, jobs=1): err= 0: pid=3773486: Thu May 25 19:17:55 2023 2237 | write: IOPS=285, BW=1143MiB/s (1198MB/s)(1024MiB/896msec); 0 zone resets 2238 | clat (usec): min=2314, max=7625, avg=2899.57, stdev=787.63 2239 | lat (usec): min=2315, max=7626, avg=2900.44, stdev=787.90 2240 | clat percentiles (usec): 2241 | | 1.00th=[ 2343], 5.00th=[ 2376], 10.00th=[ 2409], 20.00th=[ 2442], 2242 | | 30.00th=[ 2507], 40.00th=[ 2540], 50.00th=[ 2606], 60.00th=[ 2704], 2243 | | 70.00th=[ 2835], 80.00th=[ 3032], 90.00th=[ 3982], 95.00th=[ 4817], 2244 | | 99.00th=[ 6390], 99.50th=[ 7111], 99.90th=[ 7635], 99.95th=[ 7635], 2245 | | 99.99th=[ 7635] 2246 | bw ( MiB/s): min= 1115, max= 1115, per=97.59%, avg=1115.26, stdev= 0.00, samples=1 2247 | iops : min= 278, max= 278, avg=278.00, stdev= 0.00, samples=1 2248 | lat (msec) : 4=90.23%, 10=9.77% 2249 | cpu : usr=17.99%, sys=37.77%, ctx=8195, majf=0, minf=10 2250 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2251 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2252 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2253 | issued rwts: total=0,256,0,0 short=0,0,0,0 dropped=0,0,0,0 2254 | latency : target=0, window=0, percentile=100.00%, depth=1 2255 | 2256 | Run status group 0 (all jobs): 2257 | WRITE: bw=1143MiB/s (1198MB/s), 1143MiB/s-1143MiB/s (1198MB/s-1198MB/s), io=1024MiB (1074MB), run=896-896msec 2258 | ``` 2259 | 2260 | ``` 2261 | fio --name=sequential-read --directory=/home/juicefs_mount/fio --rw=read --refill_buffers --bs=4M --size=1G --numjobs=4 2262 | sequential-read: (g=0): rw=read, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 2263 | ... 2264 | fio-3.19 2265 | Starting 4 processes 2266 | Jobs: 1 (f=1): [E(1),_(1),E(1),R(1)][-.-%][r=2294MiB/s][r=573 IOPS][eta 00m:00s] 2267 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3773759: Thu May 25 19:24:59 2023 2268 | read: IOPS=135, BW=541MiB/s (567MB/s)(1024MiB/1893msec) 2269 | clat (usec): min=2841, max=16889, avg=7040.44, stdev=1440.49 2270 | lat (usec): min=2842, max=16890, avg=7042.04, stdev=1440.57 2271 | clat percentiles (usec): 2272 | | 1.00th=[ 3720], 5.00th=[ 5145], 10.00th=[ 5604], 20.00th=[ 6063], 2273 | | 30.00th=[ 6390], 40.00th=[ 6652], 50.00th=[ 6915], 60.00th=[ 7177], 2274 | | 70.00th=[ 7504], 80.00th=[ 7963], 90.00th=[ 8586], 95.00th=[ 9110], 2275 | | 99.00th=[11731], 99.50th=[12256], 99.90th=[16909], 99.95th=[16909], 2276 | | 99.99th=[16909] 2277 | bw ( KiB/s): min=501680, max=573440, per=24.78%, avg=546789.33, stdev=39279.97, samples=3 2278 | iops : min= 122, max= 140, avg=133.33, stdev= 9.87, samples=3 2279 | lat (msec) : 4=1.95%, 10=95.70%, 20=2.34% 2280 | cpu : usr=0.58%, sys=63.11%, ctx=3077, majf=0, minf=1039 2281 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2282 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2283 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2284 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2285 | latency : target=0, window=0, percentile=100.00%, depth=1 2286 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3773760: Thu May 25 19:24:59 2023 2287 | read: IOPS=136, BW=546MiB/s (572MB/s)(1024MiB/1876msec) 2288 | clat (usec): min=2704, max=12163, avg=6973.28, stdev=1305.34 2289 | lat (usec): min=2706, max=12165, avg=6974.97, stdev=1305.26 2290 | clat percentiles (usec): 2291 | | 1.00th=[ 4490], 5.00th=[ 5145], 10.00th=[ 5604], 20.00th=[ 6063], 2292 | | 30.00th=[ 6456], 40.00th=[ 6652], 50.00th=[ 6849], 60.00th=[ 7046], 2293 | | 70.00th=[ 7308], 80.00th=[ 7767], 90.00th=[ 8455], 95.00th=[ 9110], 2294 | | 99.00th=[11863], 99.50th=[11994], 99.90th=[12125], 99.95th=[12125], 2295 | | 99.99th=[12125] 2296 | bw ( KiB/s): min=508031, max=589824, per=25.37%, avg=559829.00, stdev=45045.00, samples=3 2297 | iops : min= 124, max= 144, avg=136.67, stdev=11.02, samples=3 2298 | lat (msec) : 4=0.78%, 10=96.09%, 20=3.12% 2299 | cpu : usr=0.75%, sys=63.89%, ctx=2980, majf=0, minf=1038 2300 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2301 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2302 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2303 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2304 | latency : target=0, window=0, percentile=100.00%, depth=1 2305 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3773761: Thu May 25 19:24:59 2023 2306 | read: IOPS=135, BW=540MiB/s (567MB/s)(1024MiB/1895msec) 2307 | clat (usec): min=2860, max=13822, avg=6935.78, stdev=1291.86 2308 | lat (usec): min=2861, max=13824, avg=6937.74, stdev=1292.03 2309 | clat percentiles (usec): 2310 | | 1.00th=[ 3916], 5.00th=[ 5080], 10.00th=[ 5669], 20.00th=[ 6128], 2311 | | 30.00th=[ 6390], 40.00th=[ 6587], 50.00th=[ 6915], 60.00th=[ 7111], 2312 | | 70.00th=[ 7373], 80.00th=[ 7701], 90.00th=[ 8094], 95.00th=[ 8848], 2313 | | 99.00th=[11600], 99.50th=[12387], 99.90th=[13829], 99.95th=[13829], 2314 | | 99.99th=[13829] 2315 | bw ( KiB/s): min=469928, max=581632, per=24.67%, avg=544397.33, stdev=64492.33, samples=3 2316 | iops : min= 114, max= 142, avg=132.67, stdev=16.17, samples=3 2317 | lat (msec) : 4=1.56%, 10=95.31%, 20=3.12% 2318 | cpu : usr=0.37%, sys=64.15%, ctx=3083, majf=0, minf=1039 2319 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2320 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2321 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2322 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2323 | latency : target=0, window=0, percentile=100.00%, depth=1 2324 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3773762: Thu May 25 19:24:59 2023 2325 | read: IOPS=134, BW=539MiB/s (565MB/s)(1024MiB/1901msec) 2326 | clat (usec): min=1458, max=12052, avg=6959.56, stdev=1420.77 2327 | lat (usec): min=1458, max=12055, avg=6961.18, stdev=1420.80 2328 | clat percentiles (usec): 2329 | | 1.00th=[ 1516], 5.00th=[ 5080], 10.00th=[ 5669], 20.00th=[ 6063], 2330 | | 30.00th=[ 6390], 40.00th=[ 6718], 50.00th=[ 6915], 60.00th=[ 7177], 2331 | | 70.00th=[ 7504], 80.00th=[ 7898], 90.00th=[ 8291], 95.00th=[ 8848], 2332 | | 99.00th=[11338], 99.50th=[11994], 99.90th=[11994], 99.95th=[11994], 2333 | | 99.99th=[11994] 2334 | bw ( KiB/s): min=457227, max=581632, per=24.36%, avg=537433.00, stdev=69581.10, samples=3 2335 | iops : min= 111, max= 142, avg=131.00, stdev=17.35, samples=3 2336 | lat (msec) : 2=1.56%, 4=1.17%, 10=94.92%, 20=2.34% 2337 | cpu : usr=0.47%, sys=63.26%, ctx=3125, majf=0, minf=1038 2338 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2339 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2340 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2341 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2342 | latency : target=0, window=0, percentile=100.00%, depth=1 2343 | 2344 | Run status group 0 (all jobs): 2345 | READ: bw=2155MiB/s (2259MB/s), 539MiB/s-546MiB/s (565MB/s-572MB/s), io=4096MiB (4295MB), run=1876-1901msec 2346 | ``` 2347 | 2348 | ### JuiceFS Benchmarks 10x R2 Sharded Mount + Redis Metadata Caching 2349 | 2350 | The table below shows comparison between [10x Cloudflare R2 sharded JuiceFS mount + Redis metadata caching](#juicefs-benchmarks-10x-r2-sharded-mount--redis-metadata-caching) vs [10x Cloudflare R2 sharded JuiceFS mount](#10x-r2-sharded-juicefs-mount) vs [5x Cloudflare R2 sharded JuiceFS mount](#5x-r2-sharded-juicefs-mount) vs [1x Cloudflare JuiceFS mount (default)](#on-intel-xeon-e-2276g-6c12t-32gb-memory-and-2x-960gb-nvme-raid-1). All R2 storage locations are with location hint North American East. 2351 | 2352 | Default 1024MB big file. 2353 | 2354 | | ITEM | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (5x R2 Sharded) | COST (5x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2355 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2356 | | Write big file | 1904.61 MiB/s | 2.15 s/file | 906.04 MiB/s | 4.52 s/file | 960.47 MiB/s | 4.26 s/file | 1374.08 MiB/s | 2.98 s/file | 2357 | | Read big file | 201.00 MiB/s | 20.38 s/file | 223.19 MiB/s | 18.35 s/file | 174.17 MiB/s | 23.52 s/file | 152.23 MiB/s | 26.91 s/file | 2358 | | Write small file | 1319.8 files/s | 3.03 ms/file | 701.2 files/s | 5.70 ms/file | 777.4 files/s | 5.15 ms/file | 780.3 files/s | 5.13 ms/file | 2359 | | Read small file | 10279.8 files/s | 0.39 ms/file | 6378.3 files/s | 0.63 ms/file | 7940.0 files/s | 0.50 ms/file | 8000.9 files/s | 0.50 ms/file | 2360 | | Stat file | 15890.1 files/s | 0.25 ms/file | 21123.7 files/s | 0.19 ms/file | 29344.7 files/s | 0.14 ms/file | 27902.2 files/s | 0.14 ms/file | 2361 | | FUSE operation | 71338 operations | 2.23 ms/op | 71555 operations | 2.16 ms/op | 71597 operations | 2.67 ms/op | 71649 operations | 3.06 ms/op | 2362 | | Update meta | 1740 operations | 0.27 ms/op | 6271 operations | 9.01 ms/op | 6041 operations | 4.09 ms/op | 6057 operations | 2.50 ms/op | 2363 | | Put object | 1083 operations | 390.88 ms/op | 1152 operations | 403.23 ms/op | 1136 operations | 428.27 ms/op | 1106 operations | 547.32 ms/op | 2364 | | Get object | 1024 operations | 294.63 ms/op | 1034 operations | 278.61 ms/op | 1049 operations | 299.50 ms/op | 1030 operations | 301.80 ms/op | 2365 | | Delete object | 754 operations | 125.28 ms/op | 316 operations | 124.32 ms/op | 60 operations | 120.73 ms/op | 29 operations | 234.02 ms/op | 2366 | | Write into cache | 1424 operations | 4.85 ms/op | 1424 operations | 24.92 ms/op | 1424 operations | 83.12 ms/op | 1424 operations | 12.91 ms/op | 2367 | | Read from cache | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.05 ms/op | 400 operations | 0.04 ms/op | 2368 | 2369 | Default 1MB big file. 2370 | 2371 | | ITEM | VALUE (10x R2 Sharded + Redis) | COST (10x R2 Sharded + Redis) | VALUE (10x R2 Sharded) | COST (10x R2 Sharded) | VALUE (5x R2 Sharded) | COST (5x R2 Sharded) | VALUE (1x R2 Default) | COST (1x R2 Default) | 2372 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | 2373 | | Write big file | 530.10 MiB/s | 0.01 s/file | 452.66 MiB/s | 0.01 s/file | 448.20 MiB/s | 0.01 s/file | 230.82 MiB/s | 0.02 s/file | 2374 | | Read big file | 1914.40 MiB/s | 0.00 s/file | 1545.95 MiB/s | 0.00 s/file | 1376.38 MiB/s | 0.00 s/file | 1276.38 MiB/s | 0.00 s/file | 2375 | | Write small file | 2715.4 files/s | 1.47 ms/file | 682.8 files/s | 5.86 ms/file | 792.5 files/s | 5.05 ms/file | 675.7 files/s | 5.92 ms/file | 2376 | | Read small file | 10069.0 files/s | 0.40 ms/file | 6299.4 files/s | 0.63 ms/file | 7827.1 files/s | 0.51 ms/file | 7833.1 files/s | 0.51 ms/file | 2377 | | Stat file | 16545.3 files/s | 0.24 ms/file | 21365.2 files/s | 0.19 ms/file | 24308.1 files/s | 0.16 ms/file | 28226.1 files/s | 0.14 ms/file | 2378 | | FUSE operation | 5767 operations | 0.09 ms/op | 5757 operations | 0.42 ms/op | 5750 operations | 0.38 ms/op | 5756 operations | 0.41 ms/op | 2379 | | Update meta | 1617 operations | 0.19 ms/op | 5814 operations | 0.72 ms/op | 5740 operations | 0.74 ms/op | 5770 operations | 0.70 ms/op | 2380 | | Put object | 37 operations | 290.94 ms/op | 107 operations | 282.68 ms/op | 94 operations | 286.35 ms/op | 118 operations | 242.35 ms/op | 2381 | | Get object | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 0 operations | 0.00 ms/op | 2382 | | Delete object | 48 operations | 103.83 ms/op | 133 operations | 116.84 ms/op | 59 operations | 117.93 ms/op | 95 operations | 83.94 ms/op | 2383 | | Write into cache | 404 operations | 0.11 ms/op | 404 operations | 0.12 ms/op | 404 operations | 0.12 ms/op | 404 operations | 0.14 ms/op | 2384 | | Read from cache | 408 operations | 0.06 ms/op | 408 operations | 0.06 ms/op | 408 operations | 0.05 ms/op | 408 operations | 0.06 ms/op | 2385 | 2386 | 2387 | 10x R2 sharded JuiceFS mount with Redis metadata caching with location hint North American East 2388 | 2389 | Default 1024MB big file. 2390 | 2391 | ``` 2392 | juicefs bench -p 4 /home/juicefs_mount/ 2393 | Write big blocks count: 4096 / 4096 [===========================================================] done 2394 | Read big blocks count: 4096 / 4096 [===========================================================] done 2395 | Write small blocks count: 400 / 400 [=============================================================] done 2396 | Read small blocks count: 400 / 400 [=============================================================] done 2397 | Stat small files count: 400 / 400 [=============================================================] done 2398 | Benchmark finished! 2399 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2400 | Time used: 24.4 s, CPU: 128.3%, Memory: 1426.6 MiB 2401 | +------------------+------------------+--------------+ 2402 | | ITEM | VALUE | COST | 2403 | +------------------+------------------+--------------+ 2404 | | Write big file | 1904.61 MiB/s | 2.15 s/file | 2405 | | Read big file | 201.00 MiB/s | 20.38 s/file | 2406 | | Write small file | 1319.8 files/s | 3.03 ms/file | 2407 | | Read small file | 10279.8 files/s | 0.39 ms/file | 2408 | | Stat file | 15890.1 files/s | 0.25 ms/file | 2409 | | FUSE operation | 71338 operations | 2.23 ms/op | 2410 | | Update meta | 1740 operations | 0.27 ms/op | 2411 | | Put object | 1083 operations | 390.88 ms/op | 2412 | | Get object | 1024 operations | 294.63 ms/op | 2413 | | Delete object | 754 operations | 125.28 ms/op | 2414 | | Write into cache | 1424 operations | 4.85 ms/op | 2415 | | Read from cache | 400 operations | 0.05 ms/op | 2416 | +------------------+------------------+--------------+ 2417 | ``` 2418 | 2419 | Default 1MB big file. 2420 | 2421 | ``` 2422 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 2423 | Write big blocks count: 4 / 4 [==============================================================] done 2424 | Read big blocks count: 4 / 4 [==============================================================] done 2425 | Write small blocks count: 400 / 400 [=============================================================] done 2426 | Read small blocks count: 400 / 400 [=============================================================] done 2427 | Stat small files count: 400 / 400 [=============================================================] done 2428 | Benchmark finished! 2429 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2430 | Time used: 0.5 s, CPU: 106.4%, Memory: 139.4 MiB 2431 | +------------------+-----------------+--------------+ 2432 | | ITEM | VALUE | COST | 2433 | +------------------+-----------------+--------------+ 2434 | | Write big file | 530.10 MiB/s | 0.01 s/file | 2435 | | Read big file | 1914.40 MiB/s | 0.00 s/file | 2436 | | Write small file | 2715.4 files/s | 1.47 ms/file | 2437 | | Read small file | 10069.0 files/s | 0.40 ms/file | 2438 | | Stat file | 16545.3 files/s | 0.24 ms/file | 2439 | | FUSE operation | 5767 operations | 0.09 ms/op | 2440 | | Update meta | 1617 operations | 0.19 ms/op | 2441 | | Put object | 37 operations | 290.94 ms/op | 2442 | | Get object | 0 operations | 0.00 ms/op | 2443 | | Delete object | 48 operations | 103.83 ms/op | 2444 | | Write into cache | 404 operations | 0.11 ms/op | 2445 | | Read from cache | 408 operations | 0.06 ms/op | 2446 | +------------------+-----------------+--------------+ 2447 | ``` 2448 | 2449 | ``` 2450 | fio --name=sequential-write --directory=/home/juicefs_mount/fio --rw=write --refill_buffers --bs=4M --size=1G --end_fsync=1 2451 | sequential-write: (g=0): rw=write, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 2452 | fio-3.19 2453 | Starting 1 process 2454 | sequential-write: Laying out IO file (1 file / 1024MiB) 2455 | Jobs: 1 (f=1) 2456 | sequential-write: (groupid=0, jobs=1): err= 0: pid=3732500: Thu May 25 03:41:09 2023 2457 | write: IOPS=284, BW=1137MiB/s (1192MB/s)(1024MiB/901msec); 0 zone resets 2458 | clat (usec): min=2268, max=5195, avg=2898.39, stdev=411.09 2459 | lat (usec): min=2269, max=5197, avg=2899.15, stdev=411.31 2460 | clat percentiles (usec): 2461 | | 1.00th=[ 2278], 5.00th=[ 2442], 10.00th=[ 2507], 20.00th=[ 2606], 2462 | | 30.00th=[ 2671], 40.00th=[ 2737], 50.00th=[ 2802], 60.00th=[ 2900], 2463 | | 70.00th=[ 2999], 80.00th=[ 3163], 90.00th=[ 3392], 95.00th=[ 3654], 2464 | | 99.00th=[ 4752], 99.50th=[ 4752], 99.90th=[ 5211], 99.95th=[ 5211], 2465 | | 99.99th=[ 5211] 2466 | bw ( MiB/s): min= 1125, max= 1125, per=99.07%, avg=1125.97, stdev= 0.00, samples=1 2467 | iops : min= 281, max= 281, avg=281.00, stdev= 0.00, samples=1 2468 | lat (msec) : 4=98.05%, 10=1.95% 2469 | cpu : usr=17.67%, sys=39.11%, ctx=8195, majf=0, minf=12 2470 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2471 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2472 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2473 | issued rwts: total=0,256,0,0 short=0,0,0,0 dropped=0,0,0,0 2474 | latency : target=0, window=0, percentile=100.00%, depth=1 2475 | 2476 | Run status group 0 (all jobs): 2477 | WRITE: bw=1137MiB/s (1192MB/s), 1137MiB/s-1137MiB/s (1192MB/s-1192MB/s), io=1024MiB (1074MB), run=901-901msec 2478 | ``` 2479 | 2480 | ``` 2481 | juicefs warmup -p 4 /home/juicefs_mount/fio 2482 | Warming up count: 5 0.06/s 2483 | Warming up bytes: 5.00 GiB (5368709120 Bytes) 58.31 MiB/s 2484 | 2023/05/25 04:14:18.402336 juicefs[3733267] : Successfully warmed up 5 files (5368709120 bytes) [warmup.go:233] 2485 | ``` 2486 | 2487 | ``` 2488 | fio --name=sequential-read --directory=/home/juicefs_mount/fio --rw=read --refill_buffers --bs=4M --size=1G --numjobs=4 2489 | sequential-read: (g=0): rw=read, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 2490 | ... 2491 | fio-3.19 2492 | Starting 4 processes 2493 | Jobs: 3 (f=3): [R(3),_(1)][-.-%][r=2278MiB/s][r=569 IOPS][eta 00m:00s] 2494 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3733364: Thu May 25 04:14:53 2023 2495 | read: IOPS=134, BW=538MiB/s (565MB/s)(1024MiB/1902msec) 2496 | clat (usec): min=3113, max=17114, avg=7084.40, stdev=1537.84 2497 | lat (usec): min=3113, max=17117, avg=7086.01, stdev=1537.85 2498 | clat percentiles (usec): 2499 | | 1.00th=[ 3654], 5.00th=[ 5211], 10.00th=[ 5604], 20.00th=[ 5997], 2500 | | 30.00th=[ 6325], 40.00th=[ 6652], 50.00th=[ 6915], 60.00th=[ 7177], 2501 | | 70.00th=[ 7635], 80.00th=[ 7898], 90.00th=[ 8586], 95.00th=[ 9241], 2502 | | 99.00th=[12387], 99.50th=[14746], 99.90th=[17171], 99.95th=[17171], 2503 | | 99.99th=[17171] 2504 | bw ( KiB/s): min=488979, max=581632, per=24.85%, avg=548017.00, stdev=51292.21, samples=3 2505 | iops : min= 119, max= 142, avg=133.67, stdev=12.74, samples=3 2506 | lat (msec) : 4=1.56%, 10=94.14%, 20=4.30% 2507 | cpu : usr=0.53%, sys=63.18%, ctx=2943, majf=0, minf=1037 2508 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2509 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2510 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2511 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2512 | latency : target=0, window=0, percentile=100.00%, depth=1 2513 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3733365: Thu May 25 04:14:53 2023 2514 | read: IOPS=134, BW=539MiB/s (565MB/s)(1024MiB/1901msec) 2515 | clat (usec): min=3312, max=14490, avg=7078.54, stdev=1478.22 2516 | lat (usec): min=3313, max=14490, avg=7080.34, stdev=1478.20 2517 | clat percentiles (usec): 2518 | | 1.00th=[ 3687], 5.00th=[ 5211], 10.00th=[ 5604], 20.00th=[ 5997], 2519 | | 30.00th=[ 6259], 40.00th=[ 6718], 50.00th=[ 6915], 60.00th=[ 7242], 2520 | | 70.00th=[ 7570], 80.00th=[ 7898], 90.00th=[ 8586], 95.00th=[ 9634], 2521 | | 99.00th=[13042], 99.50th=[13173], 99.90th=[14484], 99.95th=[14484], 2522 | | 99.99th=[14484] 2523 | bw ( KiB/s): min=482629, max=581632, per=24.63%, avg=543169.67, stdev=53065.88, samples=3 2524 | iops : min= 117, max= 142, avg=132.33, stdev=13.43, samples=3 2525 | lat (msec) : 4=1.17%, 10=95.70%, 20=3.12% 2526 | cpu : usr=0.42%, sys=63.32%, ctx=2996, majf=0, minf=1035 2527 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2528 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2529 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2530 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2531 | latency : target=0, window=0, percentile=100.00%, depth=1 2532 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3733366: Thu May 25 04:14:53 2023 2533 | read: IOPS=134, BW=538MiB/s (565MB/s)(1024MiB/1902msec) 2534 | clat (usec): min=3185, max=13789, avg=7082.52, stdev=1538.28 2535 | lat (usec): min=3186, max=13791, avg=7084.44, stdev=1538.30 2536 | clat percentiles (usec): 2537 | | 1.00th=[ 3359], 5.00th=[ 5080], 10.00th=[ 5604], 20.00th=[ 6063], 2538 | | 30.00th=[ 6259], 40.00th=[ 6587], 50.00th=[ 6915], 60.00th=[ 7177], 2539 | | 70.00th=[ 7570], 80.00th=[ 7832], 90.00th=[ 8848], 95.00th=[10421], 2540 | | 99.00th=[12387], 99.50th=[12649], 99.90th=[13829], 99.95th=[13829], 2541 | | 99.99th=[13829] 2542 | bw ( KiB/s): min=495330, max=581632, per=24.58%, avg=541942.00, stdev=43565.40, samples=3 2543 | iops : min= 120, max= 142, avg=132.00, stdev=11.14, samples=3 2544 | lat (msec) : 4=1.17%, 10=93.36%, 20=5.47% 2545 | cpu : usr=0.53%, sys=63.02%, ctx=2964, majf=0, minf=1037 2546 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2547 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2548 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2549 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2550 | latency : target=0, window=0, percentile=100.00%, depth=1 2551 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3733367: Thu May 25 04:14:53 2023 2552 | read: IOPS=135, BW=542MiB/s (569MB/s)(1024MiB/1888msec) 2553 | clat (usec): min=4575, max=12495, avg=7028.10, stdev=1379.32 2554 | lat (usec): min=4577, max=12496, avg=7030.03, stdev=1379.17 2555 | clat percentiles (usec): 2556 | | 1.00th=[ 4621], 5.00th=[ 5080], 10.00th=[ 5473], 20.00th=[ 5866], 2557 | | 30.00th=[ 6259], 40.00th=[ 6652], 50.00th=[ 6915], 60.00th=[ 7177], 2558 | | 70.00th=[ 7570], 80.00th=[ 7832], 90.00th=[ 8586], 95.00th=[ 9503], 2559 | | 99.00th=[12125], 99.50th=[12125], 99.90th=[12518], 99.95th=[12518], 2560 | | 99.99th=[12518] 2561 | bw ( KiB/s): min=482629, max=598016, per=25.00%, avg=551361.67, stdev=60779.35, samples=3 2562 | iops : min= 117, max= 146, avg=134.33, stdev=15.31, samples=3 2563 | lat (msec) : 10=95.70%, 20=4.30% 2564 | cpu : usr=0.58%, sys=63.65%, ctx=3025, majf=0, minf=1035 2565 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2566 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2567 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2568 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2569 | latency : target=0, window=0, percentile=100.00%, depth=1 2570 | 2571 | Run status group 0 (all jobs): 2572 | READ: bw=2154MiB/s (2258MB/s), 538MiB/s-542MiB/s (565MB/s-569MB/s), io=4096MiB (4295MB), run=1888-1902msec 2573 | ``` 2574 | 2575 | ## On Intel Xeon E-2276G 6C/12T, 32GB memory and 2x 960GB NVMe raid 1 2576 | 2577 | Cloudflare R2 buckets are not yet geographically dispersed like Amazon AWS S3 and only operate in some geographical regions so performance of Cloudflare R2 and thuse JuiceFS can be impacted. 2578 | 2579 | For example, R2 created with location hint North American East versus R2 created on Dallas located dedicated server where Cloudflare automatically determines where R2 bucket gets created and their differences. 2580 | 2581 | For JuiceFS mounted storage at `/home/juicefs_mount/` 2582 | 2583 | | ITEM | VALUE (North American East) | COST (North American East) | VALUE (Server Location) | COST (Server Location) | 2584 | | --- | --- | --- | --- | --- | 2585 | | Write big file | 1374.08 MiB/s | 2.98 s/file | 973.94 MiB/s | 4.21 s/file | 2586 | | Read big file | 152.23 MiB/s | 26.91 s/file | 66.39 MiB/s | 61.69 s/file | 2587 | | Write small file | 780.3 files/s | 5.13 ms/file | 783.3 files/s | 5.11 ms/file | 2588 | | Read small file | 8000.9 files/s | 0.50 ms/file | 5335.7 files/s | 0.75 ms/file | 2589 | | Stat file | 27902.2 files/s | 0.14 ms/file | 22921.0 files/s | 0.17 ms/file | 2590 | | FUSE operation | 71649 operations | 3.06 ms/op | 72092 operations | 6.83 ms/op | 2591 | | Update meta | 6057 operations | 2.50 ms/op | 6213 operations | 3.92 ms/op | 2592 | | Put object | 1106 operations | 547.32 ms/op | 1065 operations | 1207.74 ms/op | 2593 | | Get object | 1030 operations | 301.80 ms/op | 1077 operations | 785.13 ms/op | 2594 | | Delete object | 29 operations | 234.02 ms/op | 27 operations | 250.50 ms/op | 2595 | | Write into cache | 1424 operations | 12.91 ms/op | 1424 operations | 18.18 ms/op | 2596 | | Read from cache | 400 operations | 0.04 ms/op | 400 operations | 0.05 ms/op | 2597 | 2598 | For direct R2 object storage benchmarks without cache acceleration of JuiceFS 2599 | 2600 | | ITEM | VALUE (North American East) | COST (North American East) | VALUE (Server Location) | COST (Server Location) | 2601 | | --- | --- | --- | --- | --- | 2602 | | Upload objects | 7.56 MiB/s | 528.88 ms/object | 3.26 MiB/s | 1228.16 ms/object | 2603 | | Download objects | 12.41 MiB/s | 322.35 ms/object | 4.22 MiB/s | 946.83 ms/object | 2604 | | Put small objects | 2.6 objects/s | 390.11 ms/object | 1.3 objects/s | 768.52 ms/object | 2605 | | Get small objects | 5.8 objects/s | 171.27 ms/object | 2.0 objects/s | 503.87 ms/object | 2606 | | List objects | 873.36 objects/s | 114.50 ms/op | 325.12 objects/s | 307.58 ms/op | 2607 | | Head objects | 13.4 objects/s | 74.84 ms/object | 4.3 objects/s | 231.59 ms/object | 2608 | | Delete objects | 4.3 objects/s | 230.17 ms/object | 3.5 objects/s | 283.57 ms/object | 2609 | | Change permissions | Not supported | Not supported | Not supported | Not supported | 2610 | | Change owner/group | Not supported | Not supported | Not supported | Not supported | 2611 | | Update mtime | Not supported | Not supported | Not supported | Not supported | 2612 | 2613 | ### with R2 bucket created with location hint North American East 2614 | 2615 | with R2 bucket created on Cloudflare dashboard with location hint North American East 2616 | 2617 | ``` 2618 | juicefs bench -p 4 /home/juicefs_mount/ 2619 | Write big blocks count: 4096 / 4096 [===========================================================] done 2620 | Read big blocks count: 4096 / 4096 [===========================================================] done 2621 | Write small blocks count: 400 / 400 [=============================================================] done 2622 | Read small blocks count: 400 / 400 [=============================================================] done 2623 | Stat small files count: 400 / 400 [=============================================================] done 2624 | Benchmark finished! 2625 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2626 | Time used: 32.4 s, CPU: 97.4%, Memory: 527.6 MiB 2627 | +------------------+------------------+--------------+ 2628 | | ITEM | VALUE | COST | 2629 | +------------------+------------------+--------------+ 2630 | | Write big file | 1374.08 MiB/s | 2.98 s/file | 2631 | | Read big file | 152.23 MiB/s | 26.91 s/file | 2632 | | Write small file | 780.3 files/s | 5.13 ms/file | 2633 | | Read small file | 8000.9 files/s | 0.50 ms/file | 2634 | | Stat file | 27902.2 files/s | 0.14 ms/file | 2635 | | FUSE operation | 71649 operations | 3.06 ms/op | 2636 | | Update meta | 6057 operations | 2.50 ms/op | 2637 | | Put object | 1106 operations | 547.32 ms/op | 2638 | | Get object | 1030 operations | 301.80 ms/op | 2639 | | Delete object | 29 operations | 234.02 ms/op | 2640 | | Write into cache | 1424 operations | 12.91 ms/op | 2641 | | Read from cache | 400 operations | 0.04 ms/op | 2642 | +------------------+------------------+--------------+ 2643 | ``` 2644 | 2645 | direct Cloudflare R2 storage object benchmark with location hint North American East 2646 | 2647 | ``` 2648 | juicefs objbench --storage s3 --access-key $cfaccesskey --secret-key $cfsecretkey https://${cfbucketname}.${cfaccountid}.r2.cloudflarestorage.com -p 1 2649 | Start Functional Testing ... 2650 | +----------+---------------------+--------------------------------------------------+ 2651 | | CATEGORY | TEST | RESULT | 2652 | +----------+---------------------+--------------------------------------------------+ 2653 | | basic | create a bucket | pass | 2654 | | basic | put an object | pass | 2655 | | basic | get an object | pass | 2656 | | basic | get non-exist | pass | 2657 | | basic | get partial object | failed to get object with the offset out of r... | 2658 | | basic | head an object | pass | 2659 | | basic | delete an object | pass | 2660 | | basic | delete non-exist | pass | 2661 | | basic | list objects | the result for list is incorrect | 2662 | | basic | special key | list encode file failed SerializationError: f... | 2663 | | sync | put a big object | pass | 2664 | | sync | put an empty object | pass | 2665 | | sync | multipart upload | pass | 2666 | | sync | change owner/group | not support | 2667 | | sync | change permission | not support | 2668 | | sync | change mtime | not support | 2669 | +----------+---------------------+--------------------------------------------------+ 2670 | 2671 | Start Performance Testing ... 2672 | 2023/05/23 04:38:31.529817 juicefs[3658965] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2673 | 2023/05/23 04:38:31.641211 juicefs[3658965] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2674 | 2023/05/23 04:38:42.854394 juicefs[3658965] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2675 | put small objects count: 100 / 100 [==============================================================] done 2676 | get small objects count: 100 / 100 [==============================================================] done 2677 | upload objects count: 256 / 256 [==============================================================] done 2678 | download objects count: 256 / 256 [==============================================================] done 2679 | list objects count: 100 / 100 [==============================================================] done 2680 | head objects count: 100 / 100 [==============================================================] done 2681 | delete objects count: 100 / 100 [==============================================================] done 2682 | Benchmark finished! block-size: 4096 KiB, big-object-size: 1024 MiB, small-object-size: 128 KiB, small-objects: 100, NumThreads: 1 2683 | +--------------------+------------------+------------------+ 2684 | | ITEM | VALUE | COST | 2685 | +--------------------+------------------+------------------+ 2686 | | upload objects | 7.56 MiB/s | 528.88 ms/object | 2687 | | download objects | 12.41 MiB/s | 322.35 ms/object | 2688 | | put small objects | 2.6 objects/s | 390.11 ms/object | 2689 | | get small objects | 5.8 objects/s | 171.27 ms/object | 2690 | | list objects | 873.36 objects/s | 114.50 ms/op | 2691 | | head objects | 13.4 objects/s | 74.84 ms/object | 2692 | | delete objects | 4.3 objects/s | 230.17 ms/object | 2693 | | change permissions | not support | not support | 2694 | | change owner/group | not support | not support | 2695 | | update mtime | not support | not support | 2696 | +--------------------+------------------+------------------+ 2697 | ``` 2698 | 2699 | ### with R2 bucket created with location hint North American West 2700 | 2701 | with R2 bucket created on Cloudflare dashboard with location hint North American West and default 1024MB big file. 2702 | 2703 | ``` 2704 | juicefs bench -p 4 /home/juicefs_mount/ 2705 | Write big blocks count: 4096 / 4096 [===========================================================] done 2706 | Read big blocks count: 4096 / 4096 [===========================================================] done 2707 | Write small blocks count: 400 / 400 [=============================================================] done 2708 | Read small blocks count: 400 / 400 [=============================================================] done 2709 | Stat small files count: 400 / 400 [=============================================================] done 2710 | Benchmark finished! 2711 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2712 | Time used: 44.1 s, CPU: 70.9%, Memory: 646.6 MiB 2713 | +------------------+------------------+--------------+ 2714 | | ITEM | VALUE | COST | 2715 | +------------------+------------------+--------------+ 2716 | | Write big file | 1382.61 MiB/s | 2.96 s/file | 2717 | | Read big file | 106.13 MiB/s | 38.60 s/file | 2718 | | Write small file | 742.0 files/s | 5.39 ms/file | 2719 | | Read small file | 5259.6 files/s | 0.76 ms/file | 2720 | | Stat file | 25240.3 files/s | 0.16 ms/file | 2721 | | FUSE operation | 71790 operations | 4.33 ms/op | 2722 | | Update meta | 6123 operations | 2.24 ms/op | 2723 | | Put object | 1072 operations | 787.82 ms/op | 2724 | | Get object | 1057 operations | 320.67 ms/op | 2725 | | Delete object | 10 operations | 426.32 ms/op | 2726 | | Write into cache | 1424 operations | 16.86 ms/op | 2727 | | Read from cache | 400 operations | 0.05 ms/op | 2728 | +------------------+------------------+--------------+ 2729 | ``` 2730 | 2731 | with R2 bucket created on Cloudflare dashboard with location hint North American West and default 1MB big file. 2732 | 2733 | ``` 2734 | juicefs bench -p 4 /home/juicefs_mount/ --big-file-size 1 2735 | Write big blocks count: 4 / 4 [==============================================================] done 2736 | Read big blocks count: 4 / 4 [==============================================================] done 2737 | Write small blocks count: 400 / 400 [=============================================================] done 2738 | Read small blocks count: 400 / 400 [=============================================================] done 2739 | Stat small files count: 400 / 400 [=============================================================] done 2740 | Benchmark finished! 2741 | BlockSize: 1 MiB, BigFileSize: 1 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2742 | Time used: 1.7 s, CPU: 102.6%, Memory: 154.9 MiB 2743 | +------------------+-----------------+--------------+ 2744 | | ITEM | VALUE | COST | 2745 | +------------------+-----------------+--------------+ 2746 | | Write big file | 230.82 MiB/s | 0.02 s/file | 2747 | | Read big file | 1276.38 MiB/s | 0.00 s/file | 2748 | | Write small file | 675.7 files/s | 5.92 ms/file | 2749 | | Read small file | 7833.1 files/s | 0.51 ms/file | 2750 | | Stat file | 28226.1 files/s | 0.14 ms/file | 2751 | | FUSE operation | 5756 operations | 0.41 ms/op | 2752 | | Update meta | 5770 operations | 0.70 ms/op | 2753 | | Put object | 118 operations | 242.35 ms/op | 2754 | | Get object | 0 operations | 0.00 ms/op | 2755 | | Delete object | 95 operations | 83.94 ms/op | 2756 | | Write into cache | 404 operations | 0.14 ms/op | 2757 | | Read from cache | 408 operations | 0.06 ms/op | 2758 | +------------------+-----------------+--------------+ 2759 | ``` 2760 | 2761 | ### with R2 bucket created on server 2762 | 2763 | with R2 bucket created on server, the R2 location is automatically chosen by Cloudflare 2764 | 2765 | ``` 2766 | juicefs bench -p 4 /home/juicefs_mount/ 2767 | Write big blocks count: 4096 / 4096 [===========================================================] done 2768 | Read big blocks count: 4096 / 4096 [===========================================================] done 2769 | Write small blocks count: 400 / 400 [=============================================================] done 2770 | Read small blocks count: 400 / 400 [=============================================================] done 2771 | Stat small files count: 400 / 400 [=============================================================] done 2772 | Benchmark finished! 2773 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 2774 | Time used: 68.4 s, CPU: 48.6%, Memory: 557.8 MiB 2775 | +------------------+------------------+---------------+ 2776 | | ITEM | VALUE | COST | 2777 | +------------------+------------------+---------------+ 2778 | | Write big file | 973.94 MiB/s | 4.21 s/file | 2779 | | Read big file | 66.39 MiB/s | 61.69 s/file | 2780 | | Write small file | 783.3 files/s | 5.11 ms/file | 2781 | | Read small file | 5335.7 files/s | 0.75 ms/file | 2782 | | Stat file | 22921.0 files/s | 0.17 ms/file | 2783 | | FUSE operation | 72092 operations | 6.83 ms/op | 2784 | | Update meta | 6213 operations | 3.92 ms/op | 2785 | | Put object | 1065 operations | 1207.74 ms/op | 2786 | | Get object | 1077 operations | 785.13 ms/op | 2787 | | Delete object | 27 operations | 250.50 ms/op | 2788 | | Write into cache | 1424 operations | 18.18 ms/op | 2789 | | Read from cache | 400 operations | 0.05 ms/op | 2790 | +------------------+------------------+---------------+ 2791 | ``` 2792 | 2793 | direct Cloudflare R2 storage object benchmark with R2 location is automatically chosen by Cloudflare 2794 | 2795 | ``` 2796 | juicefs objbench --storage s3 --access-key $cfaccesskey --secret-key $cfsecretkey https://${cfbucketname}.${cfaccountid}.r2.cloudflarestorage.com -p 1 2797 | 2798 | Start Functional Testing ... 2799 | +----------+---------------------+--------------------------------------------------+ 2800 | | CATEGORY | TEST | RESULT | 2801 | +----------+---------------------+--------------------------------------------------+ 2802 | | basic | create a bucket | pass | 2803 | | basic | put an object | pass | 2804 | | basic | get an object | pass | 2805 | | basic | get non-exist | pass | 2806 | | basic | get partial object | failed to get object with the offset out of r... | 2807 | | basic | head an object | pass | 2808 | | basic | delete an object | pass | 2809 | | basic | delete non-exist | pass | 2810 | | basic | list objects | the result for list is incorrect | 2811 | | basic | special key | list encode file failed SerializationError: f... | 2812 | | sync | put a big object | pass | 2813 | | sync | put an empty object | pass | 2814 | | sync | multipart upload | pass | 2815 | | sync | change owner/group | not support | 2816 | | sync | change permission | not support | 2817 | | sync | change mtime | not support | 2818 | +----------+---------------------+--------------------------------------------------+ 2819 | 2820 | Start Performance Testing ... 2821 | 2023/05/21 21:20:52.072515 juicefs[3620125] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2822 | 2023/05/21 21:20:52.361774 juicefs[3620125] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2823 | 2824 | 2023/05/21 21:21:22.543272 juicefs[3620125] : The keys are out of order: marker "", last "19" current "1" [sync.go:132] 2825 | put small objects count: 100 / 100 [==============================================================] done 2826 | get small objects count: 100 / 100 [==============================================================] done 2827 | upload objects count: 256 / 256 [==============================================================] done 2828 | download objects count: 256 / 256 [==============================================================] done 2829 | list objects count: 100 / 100 [==============================================================] done 2830 | head objects count: 100 / 100 [==============================================================] done 2831 | delete objects count: 100 / 100 [==============================================================] done 2832 | Benchmark finished! block-size: 4096 KiB, big-object-size: 1024 MiB, small-object-size: 128 KiB, small-objects: 100, NumThreads: 1 2833 | +--------------------+------------------+-------------------+ 2834 | | ITEM | VALUE | COST | 2835 | +--------------------+------------------+-------------------+ 2836 | | upload objects | 3.26 MiB/s | 1228.16 ms/object | 2837 | | download objects | 4.22 MiB/s | 946.83 ms/object | 2838 | | put small objects | 1.3 objects/s | 768.52 ms/object | 2839 | | get small objects | 2.0 objects/s | 503.87 ms/object | 2840 | | list objects | 325.12 objects/s | 307.58 ms/op | 2841 | | head objects | 4.3 objects/s | 231.59 ms/object | 2842 | | delete objects | 3.5 objects/s | 283.57 ms/object | 2843 | | change permissions | not support | not support | 2844 | | change owner/group | not support | not support | 2845 | | update mtime | not support | not support | 2846 | +--------------------+------------------+-------------------+ 2847 | ``` 2848 | 2849 | ### File copy tests 2850 | 2851 | Comparing JuiceFS mount with R2 storage `/home/juicefs_mount/` versus direct R2 storage bucket `s3://${cfbucketname_raw}` for read and writes. 2852 | 2853 | 2854 | Writes tests 2855 | 2856 | ``` 2857 | wget https://www.php.net/distributions/php-8.2.6.tar.gz 2858 | 2859 | ls -lah php-8.2.6.tar.gz 2860 | -rw-r--r-- 1 root root 19M May 9 11:10 php-8.2.6.tar.gz 2861 | 2862 | 1st run 2863 | sync && echo 3 > /proc/sys/vm/drop_caches 2864 | time \cp -f php-8.2.6.tar.gz /home/juicefs_mount/ 2865 | 2866 | real 0m0.040s 2867 | user 0m0.001s 2868 | sys 0m0.012s 2869 | 2870 | 2nd run 2871 | time \cp -f php-8.2.6.tar.gz /home/juicefs_mount/ 2872 | 2873 | real 0m0.024s 2874 | user 0m0.000s 2875 | sys 0m0.012s 2876 | 2877 | 1st run 2878 | sync && echo 3 > /proc/sys/vm/drop_caches 2879 | time aws s3 cp --profile r2 --endpoint-url=$url php-8.2.6.tar.gz s3://${cfbucketname_raw} 2880 | upload: ./php-8.2.6.tar.gz to s3://${cfbucketname_raw}/php-8.2.6.tar.gz 2881 | 2882 | real 0m2.343s 2883 | user 0m0.430s 2884 | sys 0m0.082s 2885 | 2886 | 2nd run 2887 | time aws s3 cp --profile r2 --endpoint-url=$url php-8.2.6.tar.gz s3://${cfbucketname_raw} 2888 | upload: ./php-8.2.6.tar.gz to s3://${cfbucketname_raw}/php-8.2.6.tar.gz 2889 | 2890 | real 0m1.350s 2891 | user 0m0.431s 2892 | sys 0m0.058s 2893 | ``` 2894 | 2895 | Read tests 2896 | 2897 | ``` 2898 | 1st run 2899 | sync && echo 3 > /proc/sys/vm/drop_caches 2900 | time \cp -f /home/juicefs_mount/php-8.2.6.tar.gz . 2901 | 2902 | real 0m2.334s 2903 | user 0m0.001s 2904 | sys 0m0.016s 2905 | 2906 | # 2nd run 2907 | time \cp -f /home/juicefs_mount/php-8.2.6.tar.gz . 2908 | 2909 | real 0m0.025s 2910 | user 0m0.000s 2911 | sys 0m0.016s 2912 | 2913 | 1st run 2914 | sync && echo 3 > /proc/sys/vm/drop_caches 2915 | time aws s3 cp --profile r2 --endpoint-url=$url s3://${cfbucketname_raw}/php-8.2.6.tar.gz . 2916 | download: s3://${cfbucketname_raw}/php-8.2.6.tar.gz to ./php-8.2.6.tar.gz 2917 | 2918 | real 0m1.449s 2919 | user 0m0.432s 2920 | sys 0m0.084s 2921 | 2922 | 2nd run 2923 | time aws s3 cp --profile r2 --endpoint-url=$url s3://${cfbucketname_raw}/php-8.2.6.tar.gz . 2924 | download: s3://${cfbucketname_raw}/php-8.2.6.tar.gz to ./php-8.2.6.tar.gz 2925 | 2926 | real 0m0.959s 2927 | user 0m0.405s 2928 | sys 0m0.075s 2929 | ``` 2930 | 2931 | | Test | File Size/Time (MB/s) | Time (Seconds) | 2932 | | ---- | --------------------- | -------------- | 2933 | | **Write to JuiceFS mounted S3 (1st run)** | 19MB/0.040s = 475 MB/s | 0.040 | 2934 | | **Write to JuiceFS mounted S3 (2nd run)** | 19MB/0.024s = 791.67 MB/s | 0.024 | 2935 | | **Write to S3 (1st run)** | 19MB/2.343s = 8.11 MB/s | 2.343 | 2936 | | **Write to S3 (2nd run)** | 19MB/1.350s = 14.07 MB/s | 1.350 | 2937 | | **Read from JuiceFS mounted S3 (1st run)** | 19MB/2.334s = 8.14 MB/s | 2.334 | 2938 | | **Read from JuiceFS mounted S3 (2nd run)** | 19MB/0.025s = 760 MB/s | 0.025 | 2939 | | **Read from S3 (1st run)** | 19MB/1.449s = 13.11 MB/s | 1.449 | 2940 | | **Read from S3 (2nd run)** | 19MB/0.959s = 19.81 MB/s | 0.959 | 2941 | 2942 | ### fio test for E-2276G server 2943 | 2944 | Pre-warmed up cache directory fio test 2945 | 2946 | ``` 2947 | ls -lah /home/juicefs_mount/fio 2948 | total 4.1G 2949 | drwxr-xr-x 2 root root 4.0K May 21 22:38 . 2950 | drwxrwxrwx 3 root root 4.0K May 21 22:37 .. 2951 | -rw-r--r-- 1 root root 1.0G May 21 22:38 sequential-read.0.0 2952 | -rw-r--r-- 1 root root 1.0G May 21 22:38 sequential-read.1.0 2953 | -rw-r--r-- 1 root root 1.0G May 21 22:38 sequential-read.2.0 2954 | -rw-r--r-- 1 root root 1.0G May 21 22:38 sequential-read.3.0 2955 | ``` 2956 | ``` 2957 | juicefs warmup -p 4 /home/juicefs_mount/fio 2958 | Warming up count: 4 0.02/s 2959 | Warming up bytes: 4.00 GiB (4294967296 Bytes) 16.59 MiB/s 2960 | 2023/05/21 22:47:02.773883 juicefs[3622249] : Successfully warmed up 4 files (4294967296 bytes) [warmup.go:233] 2961 | ``` 2962 | ``` 2963 | fio --name=sequential-read --directory=/home/juicefs_mount/fio --rw=read --refill_buffers --bs=4M --size=1G --numjobs=4 2964 | sequential-read: (g=0): rw=read, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 2965 | ... 2966 | fio-3.19 2967 | Starting 4 processes 2968 | Jobs: 3 (f=3): [_(1),R(3)][-.-%][r=2291MiB/s][r=572 IOPS][eta 00m:00s] 2969 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3622348: Sun May 21 22:47:28 2023 2970 | read: IOPS=135, BW=542MiB/s (568MB/s)(1024MiB/1890msec) 2971 | clat (usec): min=4835, max=13800, avg=7004.83, stdev=1154.13 2972 | lat (usec): min=4836, max=13801, avg=7006.45, stdev=1154.05 2973 | clat percentiles (usec): 2974 | | 1.00th=[ 5080], 5.00th=[ 5473], 10.00th=[ 5735], 20.00th=[ 6063], 2975 | | 30.00th=[ 6390], 40.00th=[ 6587], 50.00th=[ 6849], 60.00th=[ 7111], 2976 | | 70.00th=[ 7439], 80.00th=[ 7832], 90.00th=[ 8356], 95.00th=[ 8979], 2977 | | 99.00th=[11076], 99.50th=[11731], 99.90th=[13829], 99.95th=[13829], 2978 | | 99.99th=[13829] 2979 | bw ( KiB/s): min=493799, max=589824, per=25.20%, avg=553928.67, stdev=52399.21, samples=3 2980 | iops : min= 120, max= 144, avg=135.00, stdev=13.08, samples=3 2981 | lat (msec) : 10=98.83%, 20=1.17% 2982 | cpu : usr=0.64%, sys=64.69%, ctx=3015, majf=0, minf=1036 2983 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 2984 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2985 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 2986 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 2987 | latency : target=0, window=0, percentile=100.00%, depth=1 2988 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3622349: Sun May 21 22:47:28 2023 2989 | read: IOPS=134, BW=538MiB/s (564MB/s)(1024MiB/1905msec) 2990 | clat (usec): min=3199, max=11916, avg=7060.50, stdev=1274.27 2991 | lat (usec): min=3199, max=11916, avg=7062.11, stdev=1274.34 2992 | clat percentiles (usec): 2993 | | 1.00th=[ 3687], 5.00th=[ 5407], 10.00th=[ 5669], 20.00th=[ 6128], 2994 | | 30.00th=[ 6456], 40.00th=[ 6718], 50.00th=[ 6980], 60.00th=[ 7242], 2995 | | 70.00th=[ 7504], 80.00th=[ 7832], 90.00th=[ 8455], 95.00th=[ 9110], 2996 | | 99.00th=[11600], 99.50th=[11731], 99.90th=[11863], 99.95th=[11863], 2997 | | 99.99th=[11863] 2998 | bw ( KiB/s): min=481137, max=581632, per=24.88%, avg=546977.33, stdev=57045.78, samples=3 2999 | iops : min= 117, max= 142, avg=133.33, stdev=14.15, samples=3 3000 | lat (msec) : 4=1.17%, 10=95.70%, 20=3.12% 3001 | cpu : usr=0.84%, sys=64.29%, ctx=2994, majf=0, minf=1036 3002 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3003 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3004 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3005 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3006 | latency : target=0, window=0, percentile=100.00%, depth=1 3007 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3622350: Sun May 21 22:47:28 2023 3008 | read: IOPS=134, BW=538MiB/s (564MB/s)(1024MiB/1905msec) 3009 | clat (usec): min=3188, max=15334, avg=7060.55, stdev=1465.48 3010 | lat (usec): min=3189, max=15337, avg=7062.32, stdev=1465.47 3011 | clat percentiles (usec): 3012 | | 1.00th=[ 3523], 5.00th=[ 5211], 10.00th=[ 5669], 20.00th=[ 6063], 3013 | | 30.00th=[ 6390], 40.00th=[ 6652], 50.00th=[ 6849], 60.00th=[ 7177], 3014 | | 70.00th=[ 7439], 80.00th=[ 7832], 90.00th=[ 8455], 95.00th=[ 9765], 3015 | | 99.00th=[12518], 99.50th=[13042], 99.90th=[15270], 99.95th=[15270], 3016 | | 99.99th=[15270] 3017 | bw ( KiB/s): min=468476, max=594449, per=24.69%, avg=542724.33, stdev=65937.74, samples=3 3018 | iops : min= 114, max= 145, avg=132.33, stdev=16.26, samples=3 3019 | lat (msec) : 4=1.17%, 10=94.14%, 20=4.69% 3020 | cpu : usr=0.53%, sys=64.29%, ctx=2892, majf=0, minf=1036 3021 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3022 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3023 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3024 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3025 | latency : target=0, window=0, percentile=100.00%, depth=1 3026 | sequential-read: (groupid=0, jobs=1): err= 0: pid=3622351: Sun May 21 22:47:28 2023 3027 | read: IOPS=134, BW=537MiB/s (563MB/s)(1024MiB/1908msec) 3028 | clat (usec): min=1314, max=18340, avg=7077.81, stdev=1606.56 3029 | lat (usec): min=1314, max=18341, avg=7079.39, stdev=1606.52 3030 | clat percentiles (usec): 3031 | | 1.00th=[ 2507], 5.00th=[ 5211], 10.00th=[ 5669], 20.00th=[ 6128], 3032 | | 30.00th=[ 6259], 40.00th=[ 6652], 50.00th=[ 6980], 60.00th=[ 7308], 3033 | | 70.00th=[ 7570], 80.00th=[ 7963], 90.00th=[ 8586], 95.00th=[ 9503], 3034 | | 99.00th=[11994], 99.50th=[12518], 99.90th=[18220], 99.95th=[18220], 3035 | | 99.99th=[18220] 3036 | bw ( KiB/s): min=474806, max=573440, per=24.54%, avg=539421.67, stdev=55984.95, samples=3 3037 | iops : min= 115, max= 140, avg=131.33, stdev=14.15, samples=3 3038 | lat (msec) : 2=0.78%, 4=1.95%, 10=93.75%, 20=3.52% 3039 | cpu : usr=0.63%, sys=63.56%, ctx=2996, majf=0, minf=1036 3040 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3041 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3042 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3043 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3044 | latency : target=0, window=0, percentile=100.00%, depth=1 3045 | 3046 | Run status group 0 (all jobs): 3047 | READ: bw=2147MiB/s (2251MB/s), 537MiB/s-542MiB/s (563MB/s-568MB/s), io=4096MiB (4295MB), run=1890-1908msec 3048 | ``` 3049 | 3050 | ## On Intel Core i7 4790K 4C/8T, 32GB memory and 2x 240GB SSD raid 1 3051 | 3052 | ``` 3053 | juicefs bench -p 4 /home/juicefs_mount/ 3054 | Write big blocks count: 4096 / 4096 [======================================================] done 3055 | Read big blocks count: 4096 / 4096 [======================================================] done 3056 | Write small blocks count: 400 / 400 [========================================================] done 3057 | Read small blocks count: 400 / 400 [========================================================] done 3058 | Stat small files count: 400 / 400 [========================================================] done 3059 | Benchmark finished! 3060 | BlockSize: 1 MiB, BigFileSize: 1024 MiB, SmallFileSize: 128 KiB, SmallFileCount: 100, NumThreads: 4 3061 | Time used: 29.5 s, CPU: 51.7%, Memory: 1317.1 MiB 3062 | +------------------+------------------+---------------+ 3063 | | ITEM | VALUE | COST | 3064 | +------------------+------------------+---------------+ 3065 | | Write big file | 253.86 MiB/s | 16.13 s/file | 3066 | | Read big file | 418.69 MiB/s | 9.78 s/file | 3067 | | Write small file | 312.3 files/s | 12.81 ms/file | 3068 | | Read small file | 5727.4 files/s | 0.70 ms/file | 3069 | | Stat file | 29605.6 files/s | 0.14 ms/file | 3070 | | FUSE operation | 71271 operations | 1.95 ms/op | 3071 | | Update meta | 1289 operations | 74.78 ms/op | 3072 | | Put object | 204 operations | 1214.46 ms/op | 3073 | | Get object | 143 operations | 1032.30 ms/op | 3074 | | Delete object | 0 operations | 0.00 ms/op | 3075 | | Write into cache | 1567 operations | 1808.73 ms/op | 3076 | | Read from cache | 1286 operations | 62.66 ms/op | 3077 | +------------------+------------------+---------------+ 3078 | ``` 3079 | ``` 3080 | juicefs stats /home/juicefs_mount 3081 | ------usage------ ----------fuse--------- ----meta--- -blockcache ---object-- 3082 | cpu mem buf | ops lat read write| ops lat | read write| get put 3083 | 0.0% 33M 0 | 0 0 0 0 | 0 0 | 0 0 | 0 0 3084 | 0.1% 33M 0 | 0 0 0 0 | 0 0 | 0 0 | 0 0 3085 | 0.2% 34M 0 | 1 0.21 0 0 | 1 0.20 | 0 0 | 0 0 3086 | 2.1% 34M 0 | 5 1.68 0 0 | 5 1.67 | 0 0 | 0 0 3087 | 0.2% 34M 0 | 1 0.73 0 0 | 1 0.73 | 0 0 | 0 0 3088 | 114% 176M 64M|4533 0.06 0 564M| 18 4.32 | 0 560M| 0 0 3089 | 195% 1119M 1028M| 10K 0.37 0 1332M| 2 400 | 0 1328M| 0 0 3090 | 27.6% 1138M 1056M| 277 10.5 0 34M| 1 1811 | 0 32M| 0 36M 3091 | 84.2% 1147M 1028M|6455 0.73 0 806M| 2 301 | 0 812M| 0 28M 3092 | 19.3% 1153M 1056M| 619 4.38 0 77M| 0 0 | 0 80M| 0 8192K 3093 | 38.6% 1157M 1060M| 561 9.76 0 70M| 1 301 | 0 64M| 0 48M 3094 | 25.5% 1163M 1056M| 260 10.6 0 32M| 1 5486 | 0 32M| 0 24M 3095 | 62.3% 1175M 892M|3173 1.63 0 396M| 2 3413 | 0 392M| 0 44M 3096 | 54.3% 1177M 1032M|3834 0.61 0 479M| 1 5033 | 0 488M| 0 20M 3097 | 40.5% 1190M 1032M| 554 9.79 0 69M| 3 3926 | 0 64M| 0 36M 3098 | 22.8% 1195M 1040M| 266 10.5 0 33M| 1 6543 | 0 36M| 0 28M 3099 | 41.5% 1203M 804M|1595 2.23 0 199M| 1 300 | 0 208M| 0 36M 3100 | 11.2% 1204M 364M| 0 0 0 0 | 2 2520 | 0 0 | 0 24M 3101 | 20.4% 1204M 252M| 1 300 0 0 | 2 2847 | 0 0 | 0 36M 3102 | 9.6% 1205M 48M| 0 0 0 0 | 1 6478 | 0 0 | 0 24M 3103 | 8.3% 1206M 40M| 1 301 0 0 | 2 3465 | 0 0 | 0 36M 3104 | 11.3% 1207M 48M| 8 2465 0 0 | 48 7895 | 0 0 | 0 20M 3105 | 15.9% 1214M 144M| 336 0.91 40M 0 | 22 0.47 | 40M 0 | 0 36M 3106 | 20.8% 1218M 152M| 67 73.2 8192K 0 | 3 0.23 | 0 32M| 32M 24M 3107 | 32.4% 1222M 196M| 546 18.6 68M 0 | 2 0.19 | 0 88M| 88M 32M 3108 | 40.8% 1224M 200M| 869 11.1 108M 0 | 8 0.21 | 0 88M| 88M 8192K 3109 | 37.0% 1226M 200M| 674 8.98 84M 0 | 3 0.22 | 0 104M| 104M 28M 3110 | 35.3% 1229M 196M| 804 11.8 100M 0 | 8 0.23 |8192K 88M| 88M 16M 3111 | 78.0% 1232M 192M|3917 2.04 488M 0 | 10 0.25 | 400M 108M| 108M 24M 3112 | 45.2% 1235M 200M|2330 3.53 291M 0 | 9 0.22 | 196M 60M| 60M 24M 3113 | ------usage------ ----------fuse--------- ----meta--- -blockcache ---object-- 3114 | cpu mem buf | ops lat read write| ops lat | read write| get put 3115 | 115% 1240M 168M|9542 0.89 1191M 0 | 21 0.26 |1164M 4096K|4096K 32M 3116 | 95.8% 1244M 168M|8292 0.66 1036M 0 | 21 0.21 |1036M 0 | 0 28M 3117 | 105% 1263M 48M|6479 0.47 680M 21M| 699 2.63 | 680M 21M| 0 44M 3118 | 47.1% 1280M 48M|1372 1.60 0 28M| 913 2.34 | 0 28M| 0 24M 3119 | 56.4% 1310M 48M|2959 0.19 50M 0 |2141 0.25 | 50M 0 | 0 40M 3120 | 19.9% 1317M 48M| 286 0.61 0 0 | 285 0.61 | 0 0 | 0 36M 3121 | 9.4% 1318M 48M| 1 0.21 0 0 | 1 0.21 | 0 0 | 0 36M 3122 | 9.2% 1319M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 36M 3123 | 9.6% 1319M 48M| 1 0.21 0 0 | 2 0.24 | 0 0 | 0 32M 3124 | 9.8% 1321M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3125 | 11.0% 1321M 48M| 1 0.66 0 0 | 1 0.64 | 0 0 | 0 40M 3126 | 9.4% 1322M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 36M 3127 | 11.0% 1323M 48M| 1 0.20 0 0 | 1 0.20 | 0 0 | 0 44M 3128 | 9.4% 1324M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 36M 3129 | 8.8% 1325M 48M| 1 0.21 0 0 | 1 0.20 | 0 0 | 0 32M 3130 | 10.5% 1326M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3131 | 10.5% 1327M 48M| 1 0.22 0 0 | 1 0.21 | 0 0 | 0 40M 3132 | 11.3% 1328M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 44M 3133 | 10.5% 1328M 48M| 1 0.22 0 0 | 2 0.23 | 0 0 | 0 40M 3134 | 10.4% 1329M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3135 | 10.3% 1330M 48M| 1 0.23 0 0 | 1 0.23 | 0 0 | 0 40M 3136 | 10.7% 1331M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3137 | 10.3% 1332M 48M| 1 0.22 0 0 | 1 0.22 | 0 0 | 0 40M 3138 | 10.2% 1333M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3139 | 9.4% 1335M 48M| 1 0.22 0 0 | 1 0.21 | 0 0 | 0 36M 3140 | 10.3% 1335M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 40M 3141 | 10.3% 1336M 48M| 1 0.22 0 0 | 1 0.21 | 0 0 | 0 40M 3142 | 9.6% 1337M 48M| 0 0 0 0 | 1 0.27 | 0 0 | 0 36M 3143 | 10.3% 1338M 48M| 1 0.21 0 0 | 1 0.20 | 0 0 | 0 40M 3144 | 7.0% 1338M 48M| 0 0 0 0 | 0 0 | 0 0 | 0 32M 3145 | ``` 3146 | 3147 | ### fio test 3148 | 3149 | Pre-warmed up cache directory fio test 3150 | 3151 | ``` 3152 | ls -lah /home/juicefs_mount/fio 3153 | total 4.1G 3154 | drwxr-xr-x 2 root root 4.0K May 26 01:23 . 3155 | drwxrwxrwx 3 root root 4.0K May 26 01:15 .. 3156 | -rw-r--r-- 1 root root 1.0G May 26 01:16 sequential-read.0.0 3157 | -rw-r--r-- 1 root root 1.0G May 26 01:20 sequential-read.1.0 3158 | -rw-r--r-- 1 root root 1.0G May 26 01:24 sequential-read.2.0 3159 | -rw-r--r-- 1 root root 1.0G May 26 01:23 sequential-read.3.0 3160 | ``` 3161 | ``` 3162 | juicefs warmup -p 2 /home/juicefs_mount/fio 3163 | Warmed up paths count: 1 / 1 [==============================================================] done 3164 | 2022/05/26 01:38:00.362641 juicefs[45285] : Successfully warmed up 1 paths [warmup.go:209] 3165 | ``` 3166 | ``` 3167 | fio --name=sequential-read --directory=/home/juicefs_mount/fio --rw=read --refill_buffers --bs=4M --size=1G --numjobs=4 3168 | 3169 | sequential-read: (g=0): rw=read, bs=(R) 4096KiB-4096KiB, (W) 4096KiB-4096KiB, (T) 4096KiB-4096KiB, ioengine=psync, iodepth=1 3170 | ... 3171 | fio-3.7 3172 | Starting 4 processes 3173 | Jobs: 4 (f=4) 3174 | sequential-read: (groupid=0, jobs=1): err= 0: pid=47804: Thu May 26 01:38:12 2022 3175 | read: IOPS=179, BW=716MiB/s (751MB/s)(1024MiB/1430msec) 3176 | clat (usec): min=1688, max=15592, avg=5571.03, stdev=1390.95 3177 | lat (usec): min=1689, max=15592, avg=5572.39, stdev=1390.89 3178 | clat percentiles (usec): 3179 | | 1.00th=[ 2278], 5.00th=[ 3884], 10.00th=[ 4359], 20.00th=[ 4621], 3180 | | 30.00th=[ 4948], 40.00th=[ 5276], 50.00th=[ 5473], 60.00th=[ 5669], 3181 | | 70.00th=[ 5932], 80.00th=[ 6325], 90.00th=[ 6783], 95.00th=[ 7439], 3182 | | 99.00th=[ 9241], 99.50th=[14615], 99.90th=[15533], 99.95th=[15533], 3183 | | 99.99th=[15533] 3184 | bw ( KiB/s): min=704512, max=720896, per=24.30%, avg=712704.00, stdev=11585.24, samples=2 3185 | iops : min= 172, max= 176, avg=174.00, stdev= 2.83, samples=2 3186 | lat (msec) : 2=0.78%, 4=4.69%, 10=93.75%, 20=0.78% 3187 | cpu : usr=0.14%, sys=46.61%, ctx=2730, majf=0, minf=1055 3188 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3189 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3190 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3191 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3192 | latency : target=0, window=0, percentile=100.00%, depth=1 3193 | sequential-read: (groupid=0, jobs=1): err= 0: pid=47805: Thu May 26 01:38:12 2022 3194 | read: IOPS=180, BW=721MiB/s (756MB/s)(1024MiB/1420msec) 3195 | clat (usec): min=2722, max=12203, avg=5530.93, stdev=1193.63 3196 | lat (usec): min=2723, max=12204, avg=5532.24, stdev=1193.64 3197 | clat percentiles (usec): 3198 | | 1.00th=[ 3490], 5.00th=[ 4080], 10.00th=[ 4359], 20.00th=[ 4686], 3199 | | 30.00th=[ 4948], 40.00th=[ 5145], 50.00th=[ 5407], 60.00th=[ 5604], 3200 | | 70.00th=[ 5866], 80.00th=[ 6128], 90.00th=[ 6849], 95.00th=[ 7635], 3201 | | 99.00th=[11994], 99.50th=[12125], 99.90th=[12256], 99.95th=[12256], 3202 | | 99.99th=[12256] 3203 | bw ( KiB/s): min=696320, max=737280, per=24.44%, avg=716800.00, stdev=28963.09, samples=2 3204 | iops : min= 170, max= 180, avg=175.00, stdev= 7.07, samples=2 3205 | lat (msec) : 4=3.52%, 10=95.31%, 20=1.17% 3206 | cpu : usr=0.00%, sys=47.71%, ctx=2751, majf=0, minf=1054 3207 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3208 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3209 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3210 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3211 | latency : target=0, window=0, percentile=100.00%, depth=1 3212 | sequential-read: (groupid=0, jobs=1): err= 0: pid=47806: Thu May 26 01:38:12 2022 3213 | read: IOPS=179, BW=716MiB/s (751MB/s)(1024MiB/1430msec) 3214 | clat (usec): min=1880, max=13391, avg=5570.19, stdev=1200.55 3215 | lat (usec): min=1881, max=13393, avg=5571.52, stdev=1200.50 3216 | clat percentiles (usec): 3217 | | 1.00th=[ 2540], 5.00th=[ 4113], 10.00th=[ 4424], 20.00th=[ 4752], 3218 | | 30.00th=[ 5014], 40.00th=[ 5211], 50.00th=[ 5473], 60.00th=[ 5735], 3219 | | 70.00th=[ 5997], 80.00th=[ 6259], 90.00th=[ 6849], 95.00th=[ 7177], 3220 | | 99.00th=[ 8717], 99.50th=[12387], 99.90th=[13435], 99.95th=[13435], 3221 | | 99.99th=[13435] 3222 | bw ( KiB/s): min=688128, max=737280, per=24.30%, avg=712704.00, stdev=34755.71, samples=2 3223 | iops : min= 168, max= 180, avg=174.00, stdev= 8.49, samples=2 3224 | lat (msec) : 2=0.39%, 4=3.52%, 10=95.31%, 20=0.78% 3225 | cpu : usr=0.56%, sys=46.61%, ctx=2806, majf=0, minf=1055 3226 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3227 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3228 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3229 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3230 | latency : target=0, window=0, percentile=100.00%, depth=1 3231 | sequential-read: (groupid=0, jobs=1): err= 0: pid=47807: Thu May 26 01:38:12 2022 3232 | read: IOPS=179, BW=719MiB/s (754MB/s)(1024MiB/1425msec) 3233 | clat (usec): min=2478, max=11410, avg=5550.24, stdev=1014.45 3234 | lat (usec): min=2480, max=11411, avg=5551.59, stdev=1014.37 3235 | clat percentiles (usec): 3236 | | 1.00th=[ 3392], 5.00th=[ 4146], 10.00th=[ 4424], 20.00th=[ 4817], 3237 | | 30.00th=[ 5080], 40.00th=[ 5276], 50.00th=[ 5473], 60.00th=[ 5669], 3238 | | 70.00th=[ 5866], 80.00th=[ 6259], 90.00th=[ 6718], 95.00th=[ 7111], 3239 | | 99.00th=[ 8225], 99.50th=[ 9241], 99.90th=[11469], 99.95th=[11469], 3240 | | 99.99th=[11469] 3241 | bw ( KiB/s): min=720896, max=761856, per=25.28%, avg=741376.00, stdev=28963.09, samples=2 3242 | iops : min= 176, max= 186, avg=181.00, stdev= 7.07, samples=2 3243 | lat (msec) : 4=4.30%, 10=95.31%, 20=0.39% 3244 | cpu : usr=0.14%, sys=46.98%, ctx=2771, majf=0, minf=1054 3245 | IO depths : 1=100.0%, 2=0.0%, 4=0.0%, 8=0.0%, 16=0.0%, 32=0.0%, >=64=0.0% 3246 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3247 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 3248 | issued rwts: total=256,0,0,0 short=0,0,0,0 dropped=0,0,0,0 3249 | latency : target=0, window=0, percentile=100.00%, depth=1 3250 | 3251 | Run status group 0 (all jobs): 3252 | READ: bw=2864MiB/s (3003MB/s), 716MiB/s-721MiB/s (751MB/s-756MB/s), io=4096MiB (4295MB), run=1420-1430msec 3253 | ``` 3254 | 3255 | # Destroying JuiceFS Filesystem 3256 | 3257 | Need to get the metadata engine's UUID via jq JSON tool piped query and pass it to `juicefs destroy` command. Note for Cloudflare R2, the [`destroy`](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2) command might not work 100%. 3258 | 3259 | 3260 | For sqlite3 metadata cache setup 3261 | 3262 | ``` 3263 | juicefs rmr /home/juicefs_mount/fio 3264 | uuid=$(juicefs status sqlite3:///home/juicefs/myjuicefs.db | jq -r '.Setting.UUID') 3265 | systemctl stop juicefs.service juicefs-gateway.service 3266 | echo y | juicefs destroy sqlite3:///home/juicefs/myjuicefs.db $uuid 3267 | rm -rf /home/juicefs_cache/* 3268 | rm -f /home/juicefs/myjuicefs.db 3269 | ``` 3270 | 3271 | For Redis metadata cache setup 3272 | 3273 | ``` 3274 | juicefs rmr /home/juicefs_mount/fio 3275 | uuid=$(juicefs status redis://:password@localhost:6479/1 | jq -r '.Setting.UUID') 3276 | systemctl stop juicefs.service juicefs-gateway.service 3277 | echo y | juicefs destroy redis://:password@localhost:6479/1 $uuid 3278 | rm -rf /home/juicefs_cache/* 3279 | redis-cli -a password -h localhost -p 6479 flushall 3280 | ``` 3281 | 3282 | For Redis metadata cache cleanup if only using one R2 bucket 3283 | 3284 | ``` 3285 | # remove Cloudflare R2 bucket meta data from bucket s://juicefs/myjuicefs 3286 | aws s3 rm --recursive --profile r2 --endpoint-url=$url s3://$cfbucketname/myjuicefs 3287 | ``` 3288 | 3289 | For Redis metadata cache clean with multiple R2 buckets in sharded configuration using this repo's `bulk-delete-buckets-r2.sh` script to remove 61x R2 sharded buckets with names `juicefs-shard-*` from `juicefs-shard-0` to `juicefs-shard-60` for AWS CLI `r2` profile with AWS endpoint url = `https://cfaccountid.r2.cloudflarestorage.com` 3290 | 3291 | ``` 3292 | ./bulk-delete-buckets.sh r2 juicefs-shard- 61 https://cfaccountid.r2.cloudflarestorage.com 3293 | ``` 3294 | 3295 | ``` 3296 | echo y | juicefs destroy sqlite3:///home/juicefs/myjuicefs.db $uuid 3297 | 2022/05/25 04:22:02.572467 juicefs[25759] : Meta address: sqlite3:///home/juicefs/myjuicefs.db [interface.go:385] 3298 | volume name: myjuicefs 3299 | volume UUID: 8e5d920c-1aee-4c9c-ac37-feb8c924f4a2 3300 | data storage: s3://juicefs/myjuicefs/ 3301 | used bytes: 13042229248 3302 | used inodes: 1222 3303 | WARNING: The target volume will be destoried permanently, including: 3304 | WARNING: 1. ALL objects in the data storage: s3://juicefs/myjuicefs/ 3305 | WARNING: 2. ALL entries in the metadata engine: sqlite3:///home/juicefs/myjuicefs.db 3306 | Proceed anyway? [y/N]: 3307 | Deleted objects count: 4282 3308 | 2022/05/25 04:25:38.067123 juicefs[25759] : The volume has been destroyed! You may need to delete cache directory manually. [destroy.go:211] 3309 | ``` 3310 | 3311 | # Backup JuiceFS Metadata Script 3312 | 3313 | Note for Cloudflare R2, you need to [disable automatica metadata backups](https://juicefs.com/docs/community/how_to_setup_object_storage/#r2). So you can use `backup-juicefs-metadata.sh` shell script to setup a cronjob to backup the JuiceFS mount metadata cache. 3314 | 3315 | ## backup-juicefs-metadata.sh Usage: 3316 | 3317 | ``` 3318 | ./backup-juicefs-metadata.sh 3319 | No arguments supplied. Please provide the metadata source as an argument. 3320 | 3321 | Examples: 3322 | 3323 | ./backup-juicefs-metadata.sh sqlite3:///home/juicefs/myjuicefs.db 3324 | ./backup-juicefs-metadata.sh redis://:password@localhost:6479/1 3325 | ``` 3326 | 3327 | ## backup-juicefs-metadata.sh Example Backup 3328 | 3329 | The backup script uses [`juicefs dump`](https://juicefs.com/docs/community/metadata_dump_load#recovery-and-migration) command to backup the JuiceFS mount's metadata. You can edit script's backup directory variable `BACKUP_DIR="/home/juicefs_metadata_backups"` to your desired location. Ideally, backup to JuiceFS mount path i.e. `BACKUP_DIR="/home/juicefs_mount/juicefs_metadata_backups"` 3330 | 3331 | ``` 3332 | ./backup-juicefs-metadata.sh redis://:password@localhost:6479/1 3333 | ``` 3334 | ``` 3335 | ./backup-juicefs-metadata.sh redis://:password@localhost:6479/1 3336 | 2023/05/26 07:52:14.359874 juicefs[3792668] : Meta address: redis://:****@localhost:6479/1 [interface.go:401] 3337 | 2023/05/26 07:52:14.360591 juicefs[3792668] : Ping redis: 21.753µs [redis.go:2904] 3338 | 2023/05/26 07:52:14.360826 juicefs[3792668] : Secret key is removed for the sake of safety [redis.go:3236] 3339 | Dumped entries count: 0 / 0 [--------------------------------------------------------------] done 3340 | 2023/05/26 07:52:14.361536 juicefs[3792668] : Dump metadata into /home/juicefs_mount/juicefs_metadata_backups/meta-dump-20230526075214.json succeed [dump.go:76] 3341 | Backup successful! 3342 | Deleted backups older than 30 days. 3343 | Backup metadata file: /home/juicefs_mount/juicefs_metadata_backups/meta-dump-20230526081031.json.gz 3344 | ``` 3345 | 3346 | Inspecting JuiceFS mount's S3 Gateway 3347 | 3348 | ``` 3349 | aws --endpoint-url http://localhost:3777 s3 ls --recursive myjuicefs 3350 | 3351 | 2023-05-26 08:10:31 728 juicefs_metadata_backups/meta-dump-20230526081031.json.gz 3352 | ``` 3353 | 3354 | Inspecting JuiceFS mount's metadata backup file `/home/juicefs_mount/juicefs_metadata_backups/meta-dump-20230526081031.json.gz`. 3355 | 3356 | ``` 3357 | zcat /home/juicefs_mount/juicefs_metadata_backups/meta-dump-20230526081031.json.gz | jq -r 3358 | ``` 3359 | 3360 | ``` 3361 | zcat /home/juicefs_mount/juicefs_metadata_backups/meta-dump-20230526081031.json.gz | jq -r 3362 | { 3363 | "Setting": { 3364 | "Name": "myjuicefs", 3365 | "UUID": "UUID-UUID-UUID-UUID-UUID", 3366 | "Storage": "s3", 3367 | "Bucket": "https://juicefs-shard-%d.cf_account_id.r2.cloudflarestorage.com", 3368 | "AccessKey": "cfaccesskey", 3369 | "SecretKey": "removed", 3370 | "BlockSize": 4096, 3371 | "Compression": "none", 3372 | "Shards": 61, 3373 | "KeyEncrypted": true, 3374 | "MetaVersion": 1 3375 | }, 3376 | "Counters": { 3377 | "usedSpace": 0, 3378 | "usedInodes": 0, 3379 | "nextInodes": 27001, 3380 | "nextChunk": 30001, 3381 | "nextSession": 14, 3382 | "nextTrash": 0 3383 | }, 3384 | "Sustained": [], 3385 | "DelFiles": [], 3386 | "FSTree": { 3387 | "attr": { 3388 | "inode": 1, 3389 | "type": "directory", 3390 | "mode": 511, 3391 | "uid": 0, 3392 | "gid": 0, 3393 | "atime": 1685068161, 3394 | "mtime": 1685105530, 3395 | "ctime": 1685105530, 3396 | "mtimensec": 446053614, 3397 | "ctimensec": 446053614, 3398 | "nlink": 2, 3399 | "length": 0 3400 | }, 3401 | "xattrs": [ 3402 | { 3403 | "name": "lastBackup", 3404 | "value": "2023-05-26T05:50:33-05:00" 3405 | } 3406 | ], 3407 | "entries": {} 3408 | }, 3409 | "Trash": { 3410 | "attr": { 3411 | "inode": 9223372032828243968, 3412 | "type": "directory", 3413 | "mode": 0, 3414 | "uid": 0, 3415 | "gid": 0, 3416 | "atime": 0, 3417 | "mtime": 0, 3418 | "ctime": 0, 3419 | "nlink": 1, 3420 | "length": 0 3421 | }, 3422 | "entries": {} 3423 | } 3424 | } 3425 | ``` 3426 | 3427 | ## JuiceFS Backup Metadata Cronjob 3428 | 3429 | Cronjob wrapper script `backup-juicefs-metadata-wrapper.sh` 3430 | 3431 | ```bash 3432 | #!/bin/bash 3433 | /home/juicefs/backup-juicefs-metadata.sh redis://:password@localhost:6479/1 3434 | ``` 3435 | 3436 | ``` 3437 | chmod +x /home/juicefs/backup-juicefs-metadata-wrapper.sh 3438 | ``` 3439 | 3440 | ``` 3441 | 10 * * * * /home/juicefs/backup-juicefs-metadata-wrapper.sh >/dev/null 2>&1 3442 | ``` -------------------------------------------------------------------------------- /sharded-buckets-info-r2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Update PATH to include /usr/local/bin 4 | export PATH=$PATH:/usr/local/bin 5 | 6 | # Check if metadata source argument is passed 7 | if [ $# -eq 0 ] 8 | then 9 | echo "No arguments supplied. Please provide the following:" 10 | echo 11 | echo "aws cli profile name i.e. r2" 12 | echo "s3 sharded bucket prefix" 13 | echo "and shard count as an argument." 14 | echo "r2 endpoint-url i.e. https://your_cf_acount_id.r2.cloudflarestorage.com/" 15 | echo 16 | echo "Example if you're JuiceFS sharded bucket name prefix is:" 17 | echo "juicefs-shard-% for juicefs-shard-0, juicefs-shard-1 ... juicefs-shard-60 etc" 18 | echo 19 | echo "$0 r2 juicefs-shard- 60 https://your_cf_acount_id.r2.cloudflarestorage.com/" 20 | exit 1 21 | fi 22 | 23 | AWS_PROFILE=$1 24 | BUCKET_PREFIX=$2 25 | SHARD_COUNT=$(($3-1)) 26 | ENDPOINT=$4 27 | 28 | LOG_FILE="bucket_info.log" 29 | rm -f "$LOG_FILE" 30 | 31 | # Initialize total counters 32 | total_all_files=0 33 | total_all_size=0 34 | 35 | i=0 36 | while [ $i -le $SHARD_COUNT ] 37 | do 38 | # Fetch the data from AWS 39 | aws_output=$(aws s3 ls --recursive --profile "$AWS_PROFILE" --endpoint-url="$ENDPOINT" "s3://${BUCKET_PREFIX}$i") 40 | 41 | # Compute the total number of files 42 | total_files=$(echo "$aws_output" | wc -l) 43 | 44 | # Compute the total size of all files 45 | total_size=$(echo "$aws_output" | awk '{ total += $3 } END { print total }') 46 | 47 | # Output the results 48 | echo "Bucket: ${BUCKET_PREFIX}$i, Total Files: $total_files, Total Size: $total_size" | tee -a "$LOG_FILE" 49 | 50 | # Update total counters 51 | total_all_files=$((total_all_files + total_files)) 52 | total_all_size=$((total_all_size + total_size)) 53 | 54 | ((i++)) 55 | done 56 | 57 | # Output total counters 58 | echo "Total for all buckets, Total Files: $total_all_files, Total Size: $total_all_size" | tee -a $LOG_FILE 59 | --------------------------------------------------------------------------------