├── hooks └── pre-commit ├── web ├── src │ ├── vite-env.d.ts │ ├── assets │ │ ├── LOGO.zip │ │ └── logo.png │ ├── main.tsx │ ├── i18n │ │ ├── index.ts │ │ ├── zh.ts │ │ └── en.ts │ ├── index.css │ ├── App.css │ └── App.tsx ├── favicon.ico ├── public │ └── favicon.ico ├── tsconfig.node.json ├── .gitignore ├── index.html ├── tsconfig.json ├── package.json └── vite.config.ts ├── src ├── util │ ├── mod.rs │ └── http.rs ├── store │ ├── mod.rs │ └── blob.rs ├── config │ ├── mod.rs │ └── load_config.rs ├── task_local │ ├── mod.rs │ └── macros.rs ├── ui │ ├── util.rs │ ├── layer_detail.rs │ ├── layers.rs │ ├── image_detail.rs │ ├── files.rs │ └── mod.rs ├── image │ ├── mod.rs │ ├── layer.rs │ ├── oci_image.rs │ └── docker.rs ├── middleware.rs ├── dist.rs ├── error.rs ├── controller.rs └── main.rs ├── assets ├── diving-web.png └── diving-terminal.gif ├── entrypoint.sh ├── .gitignore ├── config.yml ├── .github └── workflows │ ├── test.yml │ ├── upload_asset.sh │ └── publish.yml ├── Makefile ├── Dockerfile ├── README-zh.md ├── .vscode └── launch.json ├── Cargo.toml ├── README.md └── LICENSE /hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | make fmt && make lint -------------------------------------------------------------------------------- /web/src/vite-env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | -------------------------------------------------------------------------------- /web/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/web/favicon.ico -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | mod http; 2 | 3 | pub use self::http::set_no_cache_if_not_exist; 4 | -------------------------------------------------------------------------------- /assets/diving-web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/assets/diving-web.png -------------------------------------------------------------------------------- /web/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/web/public/favicon.ico -------------------------------------------------------------------------------- /web/src/assets/LOGO.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/web/src/assets/LOGO.zip -------------------------------------------------------------------------------- /web/src/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/web/src/assets/logo.png -------------------------------------------------------------------------------- /assets/diving-terminal.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vicanso/diving-rs/HEAD/assets/diving-terminal.gif -------------------------------------------------------------------------------- /src/store/mod.rs: -------------------------------------------------------------------------------- 1 | mod blob; 2 | 3 | pub use blob::{clear_blob_files, get_blob_from_file, save_blob_to_file}; 4 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ "${1:0:1}" = '-' ]; then 5 | set -- diving "$@" 6 | fi 7 | 8 | exec "$@" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # These are backup files generated by rustfmt 2 | **/*.rs.bk 3 | 4 | 5 | # Added by cargo 6 | 7 | /target 8 | node_modules 9 | dist -------------------------------------------------------------------------------- /src/config/mod.rs: -------------------------------------------------------------------------------- 1 | mod load_config; 2 | 3 | pub use self::load_config::{ 4 | get_highest_user_wasted_percent, get_highest_wasted_bytes, get_layer_path, 5 | get_lowest_efficiency, must_load_config, 6 | }; 7 | -------------------------------------------------------------------------------- /web/tsconfig.node.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "composite": true, 4 | "module": "ESNext", 5 | "moduleResolution": "Node", 6 | "allowSyntheticDefaultImports": true 7 | }, 8 | "include": ["vite.config.ts"] 9 | } 10 | -------------------------------------------------------------------------------- /web/src/main.tsx: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import ReactDOM from "react-dom/client"; 3 | import App from "./App"; 4 | import "./index.css"; 5 | 6 | ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render( 7 | 8 | 9 | , 10 | ); 11 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | # default is ~/.diving/layers 2 | layer_path: /opt/diving/layers 3 | # default is 90d 4 | layer_ttl: 180d 5 | # no default value 6 | # threads: 2 7 | # If the efficiency is measured below X%, mark as failed. 8 | lowest_efficiency: 0.95 9 | # If the amount of wasted space is at least X or larger than X, mark as failed. 10 | highest_wasted_bytes: 20MB -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | pnpm-debug.log* 8 | lerna-debug.log* 9 | 10 | node_modules 11 | dist 12 | dist-ssr 13 | *.local 14 | 15 | # Editor directories and files 16 | .vscode/* 17 | !.vscode/extensions.json 18 | .idea 19 | .DS_Store 20 | *.suo 21 | *.ntvs* 22 | *.njsproj 23 | *.sln 24 | *.sw? 25 | -------------------------------------------------------------------------------- /src/task_local/mod.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | pub mod macros; 3 | 4 | use nanoid::nanoid; 5 | pub use tracing::info; 6 | 7 | pub fn clone_value_from_task_local(value: &T) -> T 8 | where 9 | T: Clone, 10 | { 11 | value.clone() 12 | } 13 | 14 | tokio::task_local! { 15 | pub static TRACE_ID: String; 16 | } 17 | 18 | pub fn generate_trace_id() -> String { 19 | nanoid!(6) 20 | } 21 | -------------------------------------------------------------------------------- /src/ui/util.rs: -------------------------------------------------------------------------------- 1 | use ratatui::{prelude::*, widgets::*}; 2 | 3 | use unicode_width::UnicodeWidthStr; 4 | 5 | // 计算字符宽度 6 | pub fn get_width(str: &str) -> u16 { 7 | UnicodeWidthStr::width_cjk(str) as u16 8 | } 9 | 10 | // 创建block 11 | pub fn create_block(title: &str) -> Block<'_> { 12 | Block::default().borders(Borders::ALL).title(Span::styled( 13 | title, 14 | Style::default().add_modifier(Modifier::BOLD), 15 | )) 16 | } 17 | -------------------------------------------------------------------------------- /web/src/i18n/index.ts: -------------------------------------------------------------------------------- 1 | import en from "./en"; 2 | import zh from "./zh"; 3 | 4 | function i18nGet(name: string) { 5 | if (window.navigator.language?.includes("zh")) { 6 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment 7 | // @ts-ignore 8 | return zh[name] || ""; 9 | } 10 | // eslint-disable-next-line @typescript-eslint/ban-ts-comment 11 | // @ts-ignore 12 | return en[name] || ""; 13 | } 14 | export default i18nGet; 15 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Diving 9 | 10 | 11 |
12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /src/image/mod.rs: -------------------------------------------------------------------------------- 1 | mod docker; 2 | mod layer; 3 | mod oci_image; 4 | 5 | pub use docker::{ 6 | analyze_docker_image, parse_image_info, DockerAnalyzeResult, DockerAnalyzeSummary, 7 | }; 8 | pub use layer::{ 9 | get_file_content_from_layer, get_file_content_from_tar, get_file_size_from_tar, 10 | get_files_from_layer, 11 | }; 12 | pub use oci_image::{ 13 | convert_files_to_file_tree, find_file_tree_item, FileTreeItem, ImageConfig, ImageFileInfo, 14 | ImageIndex, ImageLayer, ImageManifest, ImageManifestConfig, Op, 15 | MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST, MEDIA_TYPE_IMAGE_INDEX, MEDIA_TYPE_MANIFEST_LIST, 16 | }; 17 | -------------------------------------------------------------------------------- /web/src/index.css: -------------------------------------------------------------------------------- 1 | :root { 2 | font-family: Inter, system-ui, Avenir, Helvetica, Arial, sans-serif; 3 | line-height: 1.5; 4 | font-weight: 400; 5 | 6 | color-scheme: light dark; 7 | 8 | font-synthesis: none; 9 | text-rendering: optimizeLegibility; 10 | -webkit-font-smoothing: antialiased; 11 | -moz-osx-font-smoothing: grayscale; 12 | -webkit-text-size-adjust: 100%; 13 | } 14 | 15 | a { 16 | font-weight: 500; 17 | color: #646cff; 18 | text-decoration: inherit; 19 | } 20 | a:hover { 21 | color: #535bf2; 22 | } 23 | 24 | body { 25 | margin: 0; 26 | min-width: 320px; 27 | min-height: 100vh; 28 | } 29 | -------------------------------------------------------------------------------- /web/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "useDefineForClassFields": true, 5 | "lib": ["DOM", "DOM.Iterable", "ESNext"], 6 | "allowJs": false, 7 | "skipLibCheck": true, 8 | "esModuleInterop": false, 9 | "allowSyntheticDefaultImports": true, 10 | "strict": true, 11 | "forceConsistentCasingInFileNames": true, 12 | "module": "ESNext", 13 | "moduleResolution": "Node", 14 | "resolveJsonModule": true, 15 | "isolatedModules": true, 16 | "noEmit": true, 17 | "jsx": "react-jsx" 18 | }, 19 | "include": ["src"], 20 | "references": [{ "path": "./tsconfig.node.json" }] 21 | } 22 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: "publish" 2 | on: 3 | push: 4 | # tags: ["v[0-9]+.[0-9]+.[0-9]+*"] 5 | branches: [ main ] 6 | 7 | env: 8 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | windows: 13 | runs-on: windows-latest 14 | defaults: 15 | run: 16 | shell: bash 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: setup node 20 | uses: actions/setup-node@v3 21 | - uses: actions-rs/toolchain@v1 22 | with: 23 | toolchain: stable 24 | - name: build-web 25 | run: make build-web 26 | - name: release 27 | run: make release -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | lint: 2 | cargo clippy 3 | 4 | fmt: 5 | cargo fmt 6 | 7 | build-web: 8 | rm -rf dist \ 9 | && cd web \ 10 | && yarn install --network-timeout 600000 && yarn build \ 11 | && cp -rf dist ../ 12 | 13 | dev: 14 | cargo run -- redis:alpine?arch=amd64 15 | dev-web: 16 | cargo watch -w src -x 'run -- --mode=web' 17 | dev-docker: 18 | cargo run -- docker://redis:alpine 19 | dev-ci: 20 | CI=true cargo run -- redis:alpine?arch=amd64 21 | 22 | udeps: 23 | cargo +nightly udeps 24 | 25 | # 如果要使用需注释 profile.release 中的 strip 26 | bloat: 27 | cargo bloat --release --crates 28 | 29 | outdated: 30 | cargo outdated 31 | 32 | release: 33 | cargo build --release 34 | ls -lh target/release 35 | 36 | hooks: 37 | cp hooks/* .git/hooks/ -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "private": true, 4 | "version": "0.0.0", 5 | "type": "module", 6 | "scripts": { 7 | "format": "prettier --write src/*.ts src/*.tsx src/**/*.ts src/**/*.tsx src/**/**/*.tsx src/*.css", 8 | "dev": "vite", 9 | "build": "tsc && vite build", 10 | "preview": "vite preview" 11 | }, 12 | "dependencies": { 13 | "antd": "^5.16.0", 14 | "axios": "^1.6.8", 15 | "pretty-bytes": "^6.1.1", 16 | "react": "^18.2.0", 17 | "react-dom": "^18.2.0" 18 | }, 19 | "devDependencies": { 20 | "@types/react": "^18.2.74", 21 | "@types/react-dom": "^18.2.24", 22 | "@vitejs/plugin-react-swc": "^3.6.0", 23 | "prettier": "^3.2.5", 24 | "typescript": "^5.4.3", 25 | "vite": "^5.2.8" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /web/vite.config.ts: -------------------------------------------------------------------------------- 1 | import { defineConfig } from 'vite' 2 | import react from '@vitejs/plugin-react-swc' 3 | 4 | // https://vitejs.dev/config/ 5 | export default defineConfig({ 6 | plugins: [react()], 7 | base: "./", 8 | build: { 9 | chunkSizeWarningLimit: 1024 * 1024, 10 | rollupOptions: { 11 | output: { 12 | manualChunks: { 13 | common: [ 14 | "axios", 15 | "pretty-bytes", 16 | ], 17 | ui: [ 18 | "react", 19 | "react-dom", 20 | ], 21 | antd: [ 22 | "antd", 23 | ] 24 | }, 25 | }, 26 | }, 27 | }, 28 | server: { 29 | proxy: { 30 | "/api": { 31 | target: "http://127.0.0.1:7001", 32 | }, 33 | }, 34 | } 35 | }) 36 | -------------------------------------------------------------------------------- /src/task_local/macros.rs: -------------------------------------------------------------------------------- 1 | // task local log 2 | #[macro_export] 3 | macro_rules! tl_info { 4 | ($($arg:tt)*) => ( 5 | let trace_id = TRACE_ID.with(clone_value_from_task_local); 6 | info!( 7 | traceId = trace_id, 8 | $($arg)* 9 | ) 10 | ); 11 | } 12 | 13 | #[macro_export] 14 | macro_rules! tl_error { 15 | ($($arg:tt)*) => ( 16 | let trace_id = TRACE_ID.with(clone_value_from_task_local); 17 | error!( 18 | traceId = trace_id, 19 | $($arg)* 20 | ) 21 | ); 22 | } 23 | 24 | #[macro_export] 25 | macro_rules! tl_warn { 26 | ($($arg:tt)*) => ( 27 | let trace_id = TRACE_ID.with(clone_value_from_task_local); 28 | warn!( 29 | traceId = trace_id, 30 | $($arg)* 31 | ) 32 | ); 33 | } 34 | -------------------------------------------------------------------------------- /web/src/i18n/zh.ts: -------------------------------------------------------------------------------- 1 | export default { 2 | analyzeButton: "开始分析", 3 | imageInputPlaceholder: "请输入镜像名称", 4 | imageAnalyzeDesc: "请输入需要分析镜像名称,例如:", 5 | imageSlowDesc: "首次分析镜像需要先下载分层数据,因此会较慢(可能大于10分钟)", 6 | imageSummaryTitle: "镜像概要", 7 | imageScoreLabel: "分数", 8 | imageSizeLabel: "镜像大小", 9 | otherLayerSizeLabel: "其它层总大小(不包括第一层)", 10 | wastedSizeLabel: "浪费的空间大小", 11 | osArchLabel: "系统/架构", 12 | createdLabel: "创建于", 13 | commandLabel: "命令", 14 | layerContentTitle: "分层数据内容", 15 | permissionLabel: "权限", 16 | sizeLabel: "大小", 17 | fileTreeLabel: "目录树", 18 | layerLabel: "分层", 19 | modificationLabel: "更新或删除", 20 | expandLabel: "展开所有目录", 21 | keywordsLabel: "关键字", 22 | wastedSummaryTitle: "浪费的空间概要", 23 | totalSizeLabel: "总大小", 24 | countLabel: "次数", 25 | pathLabel: "路径", 26 | latestAnalyzeImagesTitle: "最近分析的镜像列表", 27 | modifiedAddedLargeFileTitle: "修改或新增的大文件", 28 | }; 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:18-alpine as webbuilder 2 | 3 | COPY . /diving-rs 4 | RUN apk update \ 5 | && apk add git make \ 6 | && cd /diving-rs \ 7 | && make build-web 8 | 9 | FROM rust:alpine as builder 10 | 11 | COPY --from=webbuilder /diving-rs /diving-rs 12 | 13 | RUN apk update \ 14 | && apk add git make build-base pkgconfig 15 | RUN rustup target list --installed 16 | RUN cd /diving-rs \ 17 | && make release 18 | 19 | FROM alpine 20 | 21 | EXPOSE 7001 22 | 23 | # tzdata 安装所有时区配置或可根据需要只添加所需时区 24 | 25 | RUN addgroup -g 1000 rust \ 26 | && adduser -u 1000 -G rust -s /bin/sh -D rust \ 27 | && apk add --no-cache ca-certificates tzdata 28 | 29 | COPY --from=builder /diving-rs/target/release/diving /usr/local/bin/diving 30 | COPY --from=builder /diving-rs/entrypoint.sh /entrypoint.sh 31 | 32 | ENV RUST_ENV=production 33 | 34 | USER rust 35 | 36 | WORKDIR /home/rust 37 | 38 | HEALTHCHECK --timeout=10s --interval=10s CMD [ "wget", "http://127.0.0.1:7001/ping", "-q", "-O", "-"] 39 | 40 | CMD ["diving", "--mode", "web", "--listen", "0.0.0.0:7001"] 41 | 42 | ENTRYPOINT ["/entrypoint.sh"] 43 | -------------------------------------------------------------------------------- /web/src/i18n/en.ts: -------------------------------------------------------------------------------- 1 | export default { 2 | analyzeButton: "Analyze", 3 | imageInputPlaceholder: "input the name of image", 4 | imageAnalyzeDesc: 5 | "Input the name of image to explore each layer in a docker image, for example:", 6 | imageSlowDesc: 7 | "The first time may be slow(more than 10 minutes) because download the layer data", 8 | imageSummaryTitle: "Image Summary", 9 | imageScoreLabel: "Efficiency Score", 10 | imageSizeLabel: "Image Size", 11 | otherLayerSizeLabel: "Other Layer Size", 12 | wastedSizeLabel: "Wasted Size", 13 | osArchLabel: "OS/ARCH", 14 | createdLabel: "Created", 15 | commandLabel: "Command", 16 | layerContentTitle: "Layer Content", 17 | permissionLabel: "Permission", 18 | sizeLabel: "Size", 19 | fileTreeLabel: "FileTree", 20 | layerLabel: "Layer", 21 | modificationLabel: "Modifications", 22 | expandLabel: "Expand", 23 | keywordsLabel: "Keywords", 24 | wastedSummaryTitle: "Wasted Summary", 25 | totalSizeLabel: "Total Size", 26 | countLabel: "Count", 27 | pathLabel: "Path", 28 | latestAnalyzeImagesTitle: "Latest Analyze Images", 29 | modifiedAddedLargeFileTitle: "Modified Or Add Large File", 30 | }; 31 | -------------------------------------------------------------------------------- /src/util/http.rs: -------------------------------------------------------------------------------- 1 | use axum::http::{header::HeaderName, HeaderMap, HeaderValue}; 2 | use std::str::FromStr; 3 | 4 | use crate::error::{HTTPError, HTTPResult}; 5 | 6 | /// 插入HTTP头 7 | pub fn insert_header( 8 | headers: &mut HeaderMap, 9 | name: &str, 10 | value: &str, 11 | ) -> HTTPResult<()> { 12 | // 如果失败则不设置 13 | let header_name = HeaderName::from_str(name) 14 | .map_err(|err| HTTPError::new_with_category(&err.to_string(), "invalidHeaderName"))?; 15 | let header_value = HeaderValue::from_str(value) 16 | .map_err(|err| HTTPError::new_with_category(&err.to_string(), "invalidHeaderValue"))?; 17 | headers.insert(header_name, header_value); 18 | Ok(()) 19 | } 20 | /// HTTP头不存在时才设置 21 | pub fn set_header_if_not_exist( 22 | headers: &mut HeaderMap, 23 | name: &str, 24 | value: &str, 25 | ) -> HTTPResult<()> { 26 | let current = headers.get(name); 27 | if current.is_some() { 28 | return Ok(()); 29 | } 30 | insert_header(headers, name, value) 31 | } 32 | 33 | /// 如果未设置cache-control,则设置为no-cache 34 | pub fn set_no_cache_if_not_exist(headers: &mut HeaderMap) { 35 | // 因为只会字符导致设置错误 36 | // 因此此处理不会出错 37 | let _ = set_header_if_not_exist(headers, "Cache-Control", "no-cache"); 38 | } 39 | -------------------------------------------------------------------------------- /src/middleware.rs: -------------------------------------------------------------------------------- 1 | use crate::{task_local::*, tl_info}; 2 | use axum::{body::Body, http::Request, middleware::Next, response::Response}; 3 | use axum_client_ip::InsecureClientIp; 4 | use chrono::Utc; 5 | 6 | use crate::task_local::{generate_trace_id, TRACE_ID}; 7 | use crate::util::set_no_cache_if_not_exist; 8 | 9 | pub async fn access_log( 10 | InsecureClientIp(ip): InsecureClientIp, 11 | req: Request, 12 | next: Next, 13 | ) -> Response { 14 | let started_at = Utc::now().timestamp_millis(); 15 | let path = req.uri().path().to_string(); 16 | let uri = req.uri().to_string(); 17 | let method = req.method().to_string(); 18 | let resp = next.run(req).await; 19 | if path != "/ping" { 20 | let status = resp.status().as_u16(); 21 | let cost = Utc::now().timestamp_millis() - started_at; 22 | tl_info!( 23 | category = "accessLog", 24 | ip = ip.to_string(), 25 | method, 26 | uri, 27 | status, 28 | cost, 29 | ); 30 | } 31 | resp 32 | } 33 | 34 | pub async fn entry(req: Request, next: Next) -> Response { 35 | TRACE_ID 36 | .scope(generate_trace_id(), async { 37 | let mut resp = next.run(req).await; 38 | 39 | let headers = resp.headers_mut(); 40 | set_no_cache_if_not_exist(headers); 41 | resp 42 | }) 43 | .await 44 | } 45 | -------------------------------------------------------------------------------- /README-zh.md: -------------------------------------------------------------------------------- 1 | # diving-rs 2 | 3 | 用于展示docker镜像的每一层文件列表,它更快更简单,使用rust语言开发。它支持两种模式:命令行(默认模式)以及web模式,无需依赖任何东西,包括docker客户端。 4 | 5 | `diving-rs`支持多个平台,包括:linux,windows,macos,可以在[release page](https://github.com/vicanso/diving-rs/releases)下载获取。 6 | 7 | 需要注意:由于镜像分层数据需要从镜像源下载,如docker hub,下载时长需要较长时间,如果超时则再次尝试即可,建议下载程序在本机执行。而对于私有化部署的镜像源,则可将diving的镜像部署运行在可访问镜像源的机器即可。 8 | 9 | ## config 10 | 11 | 默认配置文件为`~/.diving/config.yml`,其配置选项如下: 12 | 13 | - `layer_path`: 分层数据缓存的目录,默认为`~/.diving/layers` 14 | - `layer_ttl`: 分层数据缓存的有效期, 默认为`90d`,如果90天未再访问则该layer被清除 15 | 16 | ## terminal 17 | 18 | 镜像数据支持三种数据源模式,具体形式如下: 19 | 20 | - `registry` 简写的形式为docker registry,私有或其它的registry则使用完整地址 21 | - `docker` 基于本地安装了docker客户端的形式 22 | - `file` 基于本地导出的tar包 23 | 24 | ```bash 25 | diving redis:alpine 26 | 27 | diving quay.io/prometheus/node-exporter 28 | 29 | diving docker://redis:alpine 30 | 31 | diving file:///tmp/redis.tar 32 | 33 | CI=true diving redis:alpine 34 | ``` 35 | 36 | - `Current Layer Contents` 仅显示当前层的所有文件 37 | - `Press 1` 仅显示当前`修改或删除` 的文件 38 | - `Press 2` 仅显示当前层大于1MB的文件 39 | - `Press Esc or 0` 重置显示模式 40 | 41 | ![](./assets/diving-terminal.gif) 42 | 43 | ## web 44 | 45 | ```bash 46 | docker run -d --restart=always \ 47 | -p 7001:7001 \ 48 | -v $PWD/diving:/home/rust/.diving \ 49 | --name diving \ 50 | vicanso/diving 51 | ``` 52 | 53 | 需要注意,镜像非使用root运行,因此挂载的目录需要添加对应的读写权限,否则会启动失败。 54 | 55 | 在浏览器中打开`http://127.0.0.1:7001/`即可。 56 | 57 | ![](./assets/diving-web.png) -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "type": "lldb", 9 | "request": "launch", 10 | "name": "Debug executable 'diving'", 11 | "cargo": { 12 | "args": [ 13 | "build", 14 | "--bin=diving", 15 | "--package=diving" 16 | ], 17 | "filter": { 18 | "name": "diving", 19 | "kind": "bin" 20 | } 21 | }, 22 | "args": [], 23 | "cwd": "${workspaceFolder}" 24 | }, 25 | { 26 | "type": "lldb", 27 | "request": "launch", 28 | "name": "Debug unit tests in executable 'diving'", 29 | "cargo": { 30 | "args": [ 31 | "test", 32 | "--no-run", 33 | "--bin=diving", 34 | "--package=diving" 35 | ], 36 | "filter": { 37 | "name": "diving", 38 | "kind": "bin" 39 | } 40 | }, 41 | "args": [], 42 | "cwd": "${workspaceFolder}" 43 | } 44 | ] 45 | } -------------------------------------------------------------------------------- /src/ui/layer_detail.rs: -------------------------------------------------------------------------------- 1 | use chrono::{DateTime, Local, TimeZone}; 2 | use ratatui::{prelude::*, widgets::*}; 3 | 4 | use super::util; 5 | use crate::image::ImageLayer; 6 | 7 | pub struct DetailWidget<'a> { 8 | // 组件高度 9 | pub height: u16, 10 | // 组件 11 | pub widget: Paragraph<'a>, 12 | } 13 | pub struct DetailWidgetOption { 14 | pub width: u16, 15 | } 16 | // 创建layer详细信息的widget 17 | pub fn new_layer_detail_widget(layer: &ImageLayer, opt: DetailWidgetOption) -> DetailWidget<'_> { 18 | let cmd = layer.cmd.clone(); 19 | let detail_word_width = util::get_width(&cmd); 20 | let mut create_at = layer.created.clone(); 21 | if let Ok(value) = DateTime::parse_from_rfc3339(&layer.created) { 22 | create_at = Local 23 | .timestamp_opt(value.timestamp(), 0) 24 | .single() 25 | .unwrap() 26 | .to_rfc3339(); 27 | }; 28 | 29 | let paragraph = Paragraph::new(Line::from(vec![ 30 | Span::styled("Created:", Style::default().add_modifier(Modifier::BOLD)), 31 | Span::from(create_at), 32 | Span::styled("Command:", Style::default().add_modifier(Modifier::BOLD)), 33 | Span::from(cmd), 34 | ])) 35 | .block(util::create_block(" Layer Details ")) 36 | .alignment(Alignment::Left) 37 | .wrap(Wrap { trim: true }); 38 | // 拆分左侧栏 39 | let mut detail_height = detail_word_width / opt.width; 40 | if detail_word_width % opt.width != 0 { 41 | detail_height += 1; 42 | } 43 | // title + command tag + created tag + created time + border bottom 44 | detail_height += 5; 45 | DetailWidget { 46 | height: detail_height, 47 | widget: paragraph, 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "diving" 3 | version = "0.7.2" 4 | authors = ["Tree Xie "] 5 | edition = "2021" 6 | keywords = ["diving", "image", "dive"] 7 | license = "Apache-2.0" 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | axum = "0.8.4" 12 | axum-client-ip = "0.7.0" 13 | bytes = "1.10.1" 14 | bytesize = { version = "2.0.1", features = ["serde"] } 15 | chrono = "0.4.41" 16 | clap = { version = "4.5.45", features = ["derive"] } 17 | colored = "3.0.0" 18 | config = "0.15.14" 19 | crossterm = "0.29.0" 20 | futures = "0.3.31" 21 | glob = "0.3.3" 22 | hex = "0.4.3" 23 | home = "0.5.11" 24 | http = "1.3.1" 25 | humantime = "2.2.0" 26 | libflate = "2.1.0" 27 | lru = "0.16.0" 28 | mime = "0.3.17" 29 | mime_guess = "2.0.5" 30 | nanoid = "0.4.0" 31 | once_cell = "1.21.3" 32 | pad = "0.1.6" 33 | ratatui = "0.30.0-alpha.5" 34 | regex = "1.11.2" 35 | reqwest = { version = "0.12.23", default-features = false, features = [ 36 | "rustls-tls", 37 | "json", 38 | ] } 39 | rust-embed = { version = "8.7.2", features = ["compression", "mime-guess"] } 40 | serde = { version = "1.0.219", features = ["derive"] } 41 | serde_json = "1.0.143" 42 | serde_repr = "0.1.20" 43 | signal-hook = { version = "0.3.18", default-features = false } 44 | signal-hook-registry = "1.4.6" 45 | snafu = "0.8.9" 46 | substring = "1.4.5" 47 | tar = "0.4.44" 48 | tempfile = "3.21.0" 49 | textwrap = "0.16.2" 50 | time = "0.3.43" 51 | tokio = { version = "1.47.1", features = [ 52 | "macros", 53 | "rt", 54 | "rt-multi-thread", 55 | "net", 56 | "signal", 57 | "fs", 58 | ] } 59 | tokio-cron-scheduler = "0.14.0" 60 | tower = { version = "0.5.2", features = ["timeout"] } 61 | tracing = "0.1.41" 62 | tracing-subscriber = { version = "0.3.19", features = ["local-time"] } 63 | unicode-width = "0.2.0" 64 | unix_mode = "0.1.4" 65 | zstd = "0.13.3" 66 | 67 | [profile.release] 68 | codegen-units = 1 69 | lto = true 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # diving-rs 2 | 3 | [中文](./README-zh.md) 4 | 5 | Exploring each layer in a docker image, it's fast and simple, developed with Rust. There are two modes: terminal(default) and web. 6 | 7 | It does not depend on anything, including docker client. 8 | 9 | It supports multiple platforms: linux, windows and macos, you can get it from [release page](https://github.com/vicanso/diving-rs/releases). 10 | 11 | Note: Since the layer data needs to be downloaded from the source, such as Docker Hub, it may take a long time, if times out, please try again, it is recommended that the download program be executed locally. For image sources deployed privately, you can deploy the image of Diving on a machine that can access the image source. 12 | 13 | ## config 14 | 15 | The config file is `~/.diving/config.yml`, the options: 16 | 17 | - `layer_path`: The path of layer cache, default is `~/.diving/layers` 18 | - `layer_ttl`: The ttl of layer, default is `90d`. The layer will be purged if it is not accessed again for 90 days 19 | 20 | ## terminal 21 | 22 | Supports three data source modes analyze image. The specific form is as follows: 23 | 24 | - `registry` get image form docker registry or other registry 25 | - `docker` get image from local docker client 26 | - `file` get image for tar file 27 | 28 | ```bash 29 | diving redis:alpine 30 | 31 | diving quay.io/prometheus/node-exporter 32 | 33 | diving docker://redis:alpine 34 | 35 | diving file:///tmp/redis.tar 36 | 37 | CI=true diving redis:alpine 38 | ``` 39 | 40 | - `Current Layer Contents` only show the files of current layer 41 | - `Press 1` only show the `Modified/Removed` files of current layer 42 | - `Press 2` only show the files >= 1MB 43 | - `Press Esc or 0` reset the view mode 44 | 45 | ![](./assets/diving-terminal.gif) 46 | 47 | ## web 48 | 49 | ```bash 50 | docker run -d --restart=always \ 51 | -p 7001:7001 \ 52 | -v $PWD/diving:/home/rust/.diving \ 53 | --name diving \ 54 | vicanso/diving 55 | ``` 56 | 57 | It should be noted that it does not run as root, so the mounted directory needs to add the permission(r+w), otherwise it will fail to start. 58 | 59 | Open `http://127.0.0.1:7001/` in the browser. 60 | 61 | ![](./assets/diving-web.png) -------------------------------------------------------------------------------- /src/dist.rs: -------------------------------------------------------------------------------- 1 | use axum::response::{IntoResponse, Response}; 2 | use hex::encode; 3 | use http::{header, StatusCode}; 4 | use rust_embed::{EmbeddedFile, RustEmbed}; 5 | use std::convert::From; 6 | 7 | #[derive(RustEmbed)] 8 | #[folder = "dist/"] 9 | struct Assets; 10 | 11 | pub struct StaticFile(Option); 12 | 13 | impl IntoResponse for StaticFile { 14 | fn into_response(self) -> Response { 15 | if self.0.is_none() { 16 | return StatusCode::NOT_FOUND.into_response(); 17 | } 18 | // 已保证file不会为空 19 | let file = self.0.unwrap(); 20 | // hash为基于内容生成 21 | let str = &encode(file.metadata.sha256_hash())[0..8]; 22 | let mime_type = file.metadata.mimetype(); 23 | // 长度+hash的一部分 24 | let entity_tag = format!(r#""{:x}-{str}""#, file.data.len()); 25 | // 因为html对于网页是入口,避免缓存后更新不及时 26 | // 因此设置为0 27 | // 其它js,css会添加版本号,因此无影响 28 | let max_age = if mime_type.contains("text/html") { 29 | 0 30 | } else { 31 | 365 * 24 * 3600 32 | }; 33 | 34 | // 缓存服务器的有效期设置为较短的值 35 | let server_max_age = 600; 36 | let s_max_age = if max_age > server_max_age { 37 | Some(server_max_age) 38 | } else { 39 | None 40 | }; 41 | 42 | let mut max_age = format!("public, max-age={}", max_age); 43 | if let Some(s_max_age) = s_max_age { 44 | max_age = format!("{max_age}, s-maxage={s_max_age}"); 45 | } 46 | // 静态文件压缩由前置缓存服务器处理 47 | ( 48 | [ 49 | // content type 50 | (header::CONTENT_TYPE, mime_type.to_string()), 51 | // 为啥不设置Last-Modified 52 | // https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching#heuristic_caching 53 | // e tag 54 | (header::ETAG, entity_tag), 55 | // max age 56 | (header::CACHE_CONTROL, max_age), 57 | ], 58 | file.data, 59 | ) 60 | .into_response() 61 | } 62 | } 63 | 64 | // 获取资源文件 65 | fn get_asset(file_path: &str) -> Option { 66 | Assets::get(file_path) 67 | } 68 | 69 | // 获取静态资源文件 70 | pub fn get_static_file(file_path: &str) -> StaticFile { 71 | let file = get_asset(file_path); 72 | StaticFile(file) 73 | } 74 | -------------------------------------------------------------------------------- /src/ui/layers.rs: -------------------------------------------------------------------------------- 1 | use super::util; 2 | use crate::image::ImageLayer; 3 | use bytesize::ByteSize; 4 | use pad::PadStr; 5 | use ratatui::{prelude::*, widgets::*}; 6 | 7 | pub struct LayersWidget<'a> { 8 | // 组件高度 9 | pub height: u16, 10 | // 组件 11 | pub widget: Table<'a>, 12 | } 13 | pub struct LayersWidgetOption { 14 | pub is_active: bool, 15 | pub selected_layer: usize, 16 | } 17 | // 创建layer列表的widget 18 | pub fn new_layers_widget<'a>(layers: &[ImageLayer], opt: LayersWidgetOption) -> LayersWidget<'a> { 19 | let mut row_max_counts = [0, 0, 0]; 20 | let mut row_data_list = vec![]; 21 | // 生成表格数据,并计算每列最大宽度 22 | for (index, item) in layers.iter().enumerate() { 23 | let no = format!("{}", index + 1); 24 | // TODO 是否调整为1024 25 | let arr = vec![no, ByteSize(item.size).to_string(), item.cmd.clone()]; 26 | for (i, value) in arr.iter().enumerate() { 27 | if row_max_counts[i] < value.len() { 28 | row_max_counts[i] = value.len() 29 | } 30 | } 31 | row_data_list.push(arr) 32 | } 33 | 34 | let mut rows = vec![]; 35 | for (index, arr) in row_data_list.into_iter().enumerate() { 36 | let mut cells = vec![]; 37 | // 前两列填充空格 38 | for (i, value) in arr.into_iter().enumerate() { 39 | if i != 2 { 40 | cells.push(Cell::from(value.pad_to_width_with_alignment( 41 | row_max_counts[i], 42 | pad::Alignment::Right, 43 | ))); 44 | } else { 45 | cells.push(Cell::from(value)); 46 | } 47 | } 48 | let mut style = Style::default(); 49 | if index == opt.selected_layer { 50 | style = style.bg(Color::White).fg(Color::Black); 51 | } 52 | 53 | rows.push(Row::new(cells).style(style).height(1)) 54 | } 55 | 56 | let headers = ["Index", "Size", "Command"] 57 | .iter() 58 | .map(|h| Cell::from(*h).style(Style::default().add_modifier(Modifier::BOLD))); 59 | // title + header + border bottom 60 | let height = 3 + rows.len(); 61 | let header = Row::new(headers).height(1); 62 | // TODO 如何调整生命周期 63 | let mut title = " Layers "; 64 | if opt.is_active { 65 | title = " ● Layers "; 66 | } 67 | let widget = Table::new( 68 | rows, 69 | [ 70 | Constraint::Length(5), 71 | Constraint::Length(10), 72 | Constraint::Fill(1), 73 | ], 74 | ) 75 | .header(header) 76 | .block(util::create_block(title)); 77 | 78 | LayersWidget { 79 | height: height as u16, 80 | widget, 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use axum::{ 2 | http::{header, Method, StatusCode, Uri}, 3 | response::{IntoResponse, Response}, 4 | BoxError, Json, 5 | }; 6 | use http::HeaderValue; 7 | use serde::{Deserialize, Serialize}; 8 | use tracing::error; 9 | 10 | #[derive(Debug, Clone, Serialize, Deserialize)] 11 | #[serde(rename_all = "camelCase")] 12 | pub struct HTTPError { 13 | // 出错信息 14 | pub message: String, 15 | // 出错类型 16 | pub category: String, 17 | // 出错码 18 | pub code: String, 19 | // HTTP状态码 20 | pub status: u16, 21 | } 22 | 23 | pub type HTTPResult = Result; 24 | 25 | impl Default for HTTPError { 26 | fn default() -> Self { 27 | // 因为默认status为400,因此需要单独实现default 28 | HTTPError { 29 | message: "".to_string(), 30 | category: "".to_string(), 31 | // 默认使用400为状态码 32 | status: 400, 33 | code: "".to_string(), 34 | } 35 | } 36 | } 37 | 38 | impl HTTPError { 39 | pub fn new(message: &str) -> Self { 40 | Self { 41 | message: message.to_string(), 42 | ..Default::default() 43 | } 44 | } 45 | pub fn new_with_category(message: &str, category: &str) -> Self { 46 | Self { 47 | message: message.to_string(), 48 | category: category.to_string(), 49 | ..Default::default() 50 | } 51 | } 52 | pub fn new_with_category_status(message: &str, category: &str, status: u16) -> Self { 53 | Self { 54 | message: message.to_string(), 55 | category: category.to_string(), 56 | status, 57 | ..Default::default() 58 | } 59 | } 60 | } 61 | 62 | impl IntoResponse for HTTPError { 63 | fn into_response(self) -> Response { 64 | let status = match StatusCode::from_u16(self.status) { 65 | Ok(status) => status, 66 | Err(_) => StatusCode::BAD_REQUEST, 67 | }; 68 | // 对于出错设置为no-cache 69 | let mut res = Json(self).into_response(); 70 | res.headers_mut() 71 | .insert(header::CACHE_CONTROL, HeaderValue::from_static("no-cache")); 72 | (status, res).into_response() 73 | } 74 | } 75 | 76 | pub async fn handle_error( 77 | // `Method` and `Uri` are extractors so they can be used here 78 | method: Method, 79 | uri: Uri, 80 | // the last argument must be the error itself 81 | err: BoxError, 82 | ) -> HTTPError { 83 | error!("method:{}, uri:{}, error:{}", method, uri, err.to_string()); 84 | if err.is::() { 85 | return HTTPError::new_with_category_status("Request took too long", "timeout", 408); 86 | } 87 | HTTPError::new(&err.to_string()) 88 | } 89 | -------------------------------------------------------------------------------- /.github/workflows/upload_asset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Assure parameters are correct. 4 | if [ $# -lt 2 ]; then 5 | echo "Usage: upload_asset.sh " 6 | exit 1 7 | fi 8 | 9 | repo="vicanso/diving-rs" 10 | file_path=$1 11 | bearer=$2 12 | 13 | echo "Starting asset upload from $file_path to $repo." 14 | 15 | # Get the release for this tag. 16 | tag="$(git describe --tags --abbrev=0)" 17 | 18 | # Make sure the git tag could be determined. 19 | if [ -z "$tag" ]; then 20 | printf "\e[31mError: Unable to find git tag\e[0m\n" 21 | exit 1 22 | fi 23 | 24 | echo "Git tag: $tag" 25 | 26 | # Get the upload URL for the current tag. 27 | # 28 | # Since this might be a draft release, we can't just use the /releases/tags/:tag 29 | # endpoint which only shows published releases. 30 | echo "Checking for existing release..." 31 | upload_url=$(\ 32 | curl \ 33 | -H "Authorization: Bearer $bearer" \ 34 | "https://api.github.com/repos/$repo/releases" \ 35 | 2> /dev/null \ 36 | | grep -E "(upload_url|tag_name)" \ 37 | | paste - - \ 38 | | grep -e "tag_name\": \"$tag\"" \ 39 | | head -n 1 \ 40 | | sed 's/.*\(https.*assets\).*/\1/' \ 41 | ) 42 | 43 | # Create a new release if we didn't find one for this tag. 44 | if [ -z "$upload_url" ]; then 45 | echo "No release found." 46 | echo "Creating new release..." 47 | 48 | # Create new release. 49 | response=$( 50 | curl -f \ 51 | -X POST \ 52 | -H "Authorization: Bearer $bearer" \ 53 | -d "{\"tag_name\":\"$tag\",\"draft\":true}" \ 54 | "https://api.github.com/repos/$repo/releases" \ 55 | 2> /dev/null\ 56 | ) 57 | 58 | # Abort if the release could not be created. 59 | if [ $? -ne 0 ]; then 60 | printf "\e[31mError: Unable to create new release.\e[0m\n" 61 | exit 1; 62 | fi 63 | 64 | # Extract upload URL from new release. 65 | upload_url=$(\ 66 | echo "$response" \ 67 | | grep "upload_url" \ 68 | | sed 's/.*: "\(.*\){.*/\1/' \ 69 | ) 70 | fi 71 | 72 | # Propagate error if no URL for asset upload could be found. 73 | if [ -z "$upload_url" ]; then 74 | printf "\e[31mError: Unable to find release upload url.\e[0m\n" 75 | exit 2 76 | fi 77 | 78 | # Upload the file to the tag's release. 79 | file_name=${file_path##*/} 80 | echo "Uploading asset $file_name to $upload_url..." 81 | curl -v -f \ 82 | --http1.1 \ 83 | -X POST \ 84 | -H "Authorization: Bearer $bearer" \ 85 | -H "Content-Type: application/octet-stream" \ 86 | --data-binary @"$file_path" \ 87 | "$upload_url?name=$file_name" \ 88 | || { \ 89 | printf "\e[31mError: Unable to upload asset.\e[0m\n" \ 90 | && exit 3; \ 91 | } 92 | 93 | printf "\e[32mSuccess\e[0m\n" -------------------------------------------------------------------------------- /src/config/load_config.rs: -------------------------------------------------------------------------------- 1 | use bytesize::ByteSize; 2 | use config::{Config, File}; 3 | use home::home_dir; 4 | use once_cell::sync::OnceCell; 5 | use serde::{Deserialize, Serialize}; 6 | use std::{fs, path::PathBuf}; 7 | 8 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 9 | pub struct DivingConfig { 10 | pub layer_path: Option, 11 | pub layer_ttl: Option, 12 | pub threads: Option, 13 | pub lowest_efficiency: Option, 14 | pub highest_wasted_bytes: Option, 15 | pub highest_user_wasted_percent: Option, 16 | } 17 | 18 | pub fn must_load_config() -> &'static DivingConfig { 19 | static DIVING_CONFIG: OnceCell = OnceCell::new(); 20 | DIVING_CONFIG.get_or_init(|| { 21 | let config_file = get_config_path().join("config.yml"); 22 | if !config_file.exists() { 23 | fs::File::create(config_file.clone()).unwrap(); 24 | } 25 | Config::builder() 26 | .add_source(File::from(config_file)) 27 | .build() 28 | .unwrap() 29 | .try_deserialize::() 30 | .unwrap() 31 | }) 32 | } 33 | 34 | // 获取或初始化配置目录 35 | pub fn get_config_path() -> &'static PathBuf { 36 | static CONFIG_PATH: OnceCell = OnceCell::new(); 37 | CONFIG_PATH.get_or_init(|| { 38 | let dir = home_dir().unwrap(); 39 | let config_path = dir.join(".diving"); 40 | fs::create_dir_all(config_path.clone()).unwrap(); 41 | config_path 42 | }) 43 | } 44 | 45 | // 获取或初始化layer目录 46 | pub fn get_layer_path() -> &'static PathBuf { 47 | // 读取配置,若未配置则使用默认 48 | static LAYER_PATH: OnceCell = OnceCell::new(); 49 | LAYER_PATH.get_or_init(|| { 50 | let config_path = get_config_path(); 51 | let config = must_load_config(); 52 | let file = config 53 | .layer_path 54 | .clone() 55 | .unwrap_or_else(|| "layers".to_string()); 56 | let layer_path = config_path.join(file); 57 | fs::create_dir_all(layer_path.clone()).unwrap(); 58 | layer_path 59 | }) 60 | } 61 | 62 | pub fn get_lowest_efficiency() -> f64 { 63 | let config = must_load_config(); 64 | if let Some(lowest_efficiency) = config.lowest_efficiency { 65 | return lowest_efficiency; 66 | } 67 | 0.95 68 | } 69 | 70 | pub fn get_highest_wasted_bytes() -> u64 { 71 | let config = must_load_config(); 72 | if let Some(highest_wasted_bytes) = config.highest_wasted_bytes { 73 | return highest_wasted_bytes.0; 74 | } 75 | 20 * 1024 * 1024 76 | } 77 | 78 | pub fn get_highest_user_wasted_percent() -> f64 { 79 | let config = must_load_config(); 80 | if let Some(highest_user_wasted_percent) = config.highest_user_wasted_percent { 81 | return highest_user_wasted_percent; 82 | } 83 | 0.2 84 | } 85 | -------------------------------------------------------------------------------- /src/store/blob.rs: -------------------------------------------------------------------------------- 1 | use bytes::Bytes; 2 | use chrono::{DateTime, Utc}; 3 | use glob::glob; 4 | use snafu::{ResultExt, Snafu}; 5 | use std::{path::PathBuf, time::Duration}; 6 | use tokio::fs; 7 | 8 | use crate::config::{get_layer_path, must_load_config}; 9 | use crate::error::HTTPError; 10 | 11 | pub type Result = std::result::Result; 12 | 13 | #[derive(Debug, Snafu)] 14 | pub enum Error { 15 | #[snafu(display("Write file {} fail: {}", file, source))] 16 | Write { 17 | source: std::io::Error, 18 | file: String, 19 | }, 20 | #[snafu(display("Read file {} fail: {}", file, source))] 21 | Read { 22 | source: std::io::Error, 23 | file: String, 24 | }, 25 | #[snafu(display("Glob {} fail: {}", path, source))] 26 | Pattern { 27 | source: glob::PatternError, 28 | path: String, 29 | }, 30 | #[snafu(display("IO {} fail: {}", file, source))] 31 | IO { 32 | source: std::io::Error, 33 | file: String, 34 | }, 35 | } 36 | 37 | impl From for HTTPError { 38 | fn from(err: Error) -> Self { 39 | // 对于部分error单独转换 40 | HTTPError::new_with_category(&err.to_string(), "blob") 41 | } 42 | } 43 | 44 | // 将blob数据保存至文件 45 | pub async fn save_blob_to_file(digest: &str, data: &Bytes) -> Result<()> { 46 | let file = get_layer_path().join(digest); 47 | fs::write(file.clone(), data).await.context(WriteSnafu { 48 | file: file.to_string_lossy(), 49 | }) 50 | } 51 | 52 | // 从文件中读取blob数据 53 | pub async fn get_blob_from_file(digest: &str) -> Result> { 54 | let file = get_layer_path().join(digest); 55 | fs::read(file.clone()).await.context(ReadSnafu { 56 | file: file.to_string_lossy(), 57 | }) 58 | } 59 | 60 | async fn clear_blob(file: PathBuf, expired: i64) -> Result<()> { 61 | let meta = fs::metadata(file.clone()).await.context(IOSnafu { 62 | file: file.to_string_lossy(), 63 | })?; 64 | // 优先用访问时间,再取修改时间 65 | let time = meta.accessed().or(meta.modified()).context(IOSnafu { 66 | file: file.to_string_lossy(), 67 | })?; 68 | 69 | // 未过期 70 | let t: DateTime = DateTime::from(time); 71 | if t.timestamp() > expired { 72 | return Ok(()); 73 | } 74 | fs::remove_file(file.clone()).await.context(IOSnafu { 75 | file: file.to_string_lossy(), 76 | })?; 77 | Ok(()) 78 | } 79 | 80 | // 启动时清除较早下载的blob 81 | pub async fn clear_blob_files() -> Result<()> { 82 | let path = get_layer_path().to_str().unwrap_or_default(); 83 | if path.is_empty() { 84 | return Ok(()); 85 | } 86 | let layer_ttl = must_load_config() 87 | .layer_ttl 88 | .clone() 89 | .unwrap_or_else(|| "90d".to_string()); 90 | let ttl = layer_ttl 91 | .parse::() 92 | .unwrap_or_else(|_| Duration::from_secs(90 * 24 * 3600).into()); 93 | 94 | let expired = Utc::now().timestamp() - ttl.as_secs() as i64; 95 | 96 | let value = path.to_string() + "/*"; 97 | for entry in (glob(value.as_str()).context(PatternSnafu { 98 | path: value.to_string(), 99 | })?) 100 | .flatten() 101 | { 102 | // 清除失败忽略 103 | let _ = clear_blob(entry, expired).await; 104 | } 105 | Ok(()) 106 | } 107 | -------------------------------------------------------------------------------- /web/src/App.css: -------------------------------------------------------------------------------- 1 | .contentWrapper { 2 | width: 1200px; 3 | margin: auto; 4 | padding-bottom: 30px; 5 | } 6 | .header { 7 | background-color: #fff !important; 8 | color: #171b21 !important; 9 | border-bottom: 1px solid #f0f0f0; 10 | } 11 | .header.dark { 12 | background-color: #171b21 !important; 13 | color: #fff !important; 14 | border-bottom: none; 15 | } 16 | 17 | .logo { 18 | float: left; 19 | width: 150px; 20 | height: 31px; 21 | margin: 16px 24px 16px 0; 22 | line-height: 18px; 23 | font-weight: 900; 24 | cursor: pointer; 25 | } 26 | .logo span { 27 | font-size: 18px; 28 | } 29 | 30 | .search { 31 | float: left; 32 | margin-top: 10px; 33 | width: 900px; 34 | } 35 | 36 | .mtop30 { 37 | margin-top: 30px; 38 | } 39 | 40 | .fixSearch { 41 | position: fixed; 42 | width: 800px; 43 | left: 50%; 44 | top: 200px; 45 | margin-left: -400px; 46 | } 47 | .fixSearch .ant-input-affix-wrapper { 48 | padding: 15px; 49 | } 50 | .fixSearch .ant-btn { 51 | height: 55px; 52 | } 53 | .fixSearch .desc { 54 | text-align: center; 55 | margin-top: 40px; 56 | } 57 | .fixSearch .ant-typography { 58 | font-size: 16px; 59 | line-height: 2em; 60 | } 61 | 62 | ul.fileTree li:nth-child(odd) { 63 | background-color: rgb(241, 245, 249); 64 | } 65 | ul.fileTree.dark li:nth-child(odd) { 66 | background-color: rgba(0, 0, 0, 0.6); 67 | } 68 | ul.fileTree li { 69 | list-style: none; 70 | padding: 8px 10px; 71 | } 72 | 73 | .fileTree { 74 | margin: 0; 75 | padding: 0; 76 | } 77 | .fileTree .icon { 78 | padding: 0 5px; 79 | vertical-align: middle; 80 | } 81 | .fileTree .modified { 82 | color: #faad14; 83 | } 84 | .fileTree .removed { 85 | color: #ff4d4f; 86 | } 87 | .fileTree .download { 88 | display: none; 89 | } 90 | .fileTree li:hover .download { 91 | display: inline; 92 | margin-left: 10px; 93 | } 94 | .fileTree li span { 95 | display: inline-block; 96 | } 97 | .fileTree li span:nth-child(1) { 98 | width: 100px; 99 | } 100 | .fileTree li span:nth-child(2) { 101 | width: 50px; 102 | text-align: right; 103 | } 104 | .fileTree li span:nth-child(3) { 105 | width: 80px; 106 | text-align: right; 107 | margin-right: 20px; 108 | } 109 | .wastedList { 110 | margin: 0; 111 | padding: 0; 112 | } 113 | 114 | ul.wastedList li:nth-child(odd) { 115 | background-color: rgb(241, 245, 249); 116 | } 117 | ul.wastedList.dark li:nth-child(odd) { 118 | background-color: rgba(0, 0, 0, 0.6); 119 | } 120 | ul.wastedList li { 121 | list-style: none; 122 | padding: 8px 10px; 123 | } 124 | 125 | .wastedList li span { 126 | display: inline-block; 127 | } 128 | .wastedList li span:nth-child(1) { 129 | text-align: right; 130 | width: 80px; 131 | } 132 | .wastedList li span:nth-child(2) { 133 | text-align: right; 134 | width: 60px; 135 | margin-right: 30px; 136 | } 137 | 138 | .bigModifiedFileList { 139 | margin: 0; 140 | padding: 0; 141 | } 142 | ul.bigModifiedFileList li:nth-child(odd) { 143 | background-color: rgb(241, 245, 249); 144 | } 145 | ul.bigModifiedFileList.dark li:nth-child(odd) { 146 | background-color: rgba(0, 0, 0, 0.6); 147 | } 148 | ul.bigModifiedFileList li { 149 | list-style: none; 150 | padding: 8px 10px; 151 | } 152 | .bigModifiedFileList li span { 153 | display: inline-block; 154 | } 155 | .bigModifiedFileList li span { 156 | display: inline-block; 157 | } 158 | .bigModifiedFileList li span:nth-child(1) { 159 | text-align: right; 160 | width: 80px; 161 | } 162 | .bigModifiedFileList li span:nth-child(2) { 163 | text-align: right; 164 | width: 120px; 165 | margin-right: 30px; 166 | } 167 | 168 | .command { 169 | margin-bottom: 15px; 170 | } 171 | .command .ant-card-body { 172 | padding: 15px; 173 | } 174 | .command .bold { 175 | font-weight: 900; 176 | margin-right: 5px; 177 | } 178 | 179 | .analyzeImages { 180 | position: fixed; 181 | bottom: 0; 182 | right: 0; 183 | } 184 | -------------------------------------------------------------------------------- /src/controller.rs: -------------------------------------------------------------------------------- 1 | use crate::dist::{get_static_file, StaticFile}; 2 | use crate::error::HTTPResult; 3 | use crate::image::{ 4 | analyze_docker_image, get_file_content_from_layer, parse_image_info, DockerAnalyzeResult, 5 | }; 6 | use crate::store::get_blob_from_file; 7 | use axum::response::{IntoResponse, Response}; 8 | use axum::{extract::Query, routing::get, Json, Router}; 9 | use http::header; 10 | use http::Uri; 11 | use lru::LruCache; 12 | use once_cell::sync::OnceCell; 13 | use serde::{Deserialize, Serialize}; 14 | use std::num::NonZeroUsize; 15 | use std::sync::Mutex; 16 | 17 | const VERSION: &str = env!("CARGO_PKG_VERSION"); 18 | type JSONResult = HTTPResult>; 19 | 20 | pub fn new_router() -> Router { 21 | Router::new() 22 | .route("/ping", get(ping)) 23 | .route("/api/analyze", get(analyze)) 24 | .route("/api/file", get(get_file)) 25 | .route("/api/latest-images", get(get_latest_images)) 26 | .fallback(get(serve)) 27 | } 28 | 29 | async fn ping() -> &'static str { 30 | "pong" 31 | } 32 | 33 | #[derive(Debug, Deserialize)] 34 | #[serde(rename_all = "camelCase")] 35 | struct AnalyzeParams { 36 | image: String, 37 | } 38 | 39 | fn get_latest_image_cache() -> &'static Mutex> { 40 | static LATEST_IMAGE_CACHE: OnceCell>> = OnceCell::new(); 41 | LATEST_IMAGE_CACHE.get_or_init(|| { 42 | let c = LruCache::new(NonZeroUsize::new(5).unwrap()); 43 | Mutex::new(c) 44 | }) 45 | } 46 | fn add_to_latest_image_cache(name: &String) { 47 | if let Ok(mut cache) = get_latest_image_cache().lock() { 48 | cache.put(name.to_string(), "".to_string()); 49 | } 50 | } 51 | 52 | async fn analyze(Query(params): Query) -> JSONResult { 53 | let image_info = parse_image_info(¶ms.image); 54 | let result = analyze_docker_image(image_info).await?; 55 | add_to_latest_image_cache(¶ms.image); 56 | Ok(Json(result)) 57 | } 58 | 59 | #[derive(Debug, Serialize)] 60 | #[serde(rename_all = "camelCase")] 61 | struct LatestImageResp { 62 | pub images: Vec, 63 | pub version: String, 64 | } 65 | 66 | async fn get_latest_images() -> JSONResult { 67 | let mut image_list = vec![]; 68 | if let Ok(cache) = get_latest_image_cache().lock() { 69 | for (name, _) in cache.iter() { 70 | image_list.push(name.clone()); 71 | } 72 | } 73 | Ok(Json(LatestImageResp { 74 | images: image_list, 75 | version: VERSION.to_string(), 76 | })) 77 | } 78 | 79 | #[derive(Debug, Deserialize)] 80 | #[serde(rename_all = "camelCase")] 81 | struct GetFileParams { 82 | digest: String, 83 | media_type: String, 84 | file: String, 85 | } 86 | 87 | struct DownloadFile { 88 | name: String, 89 | content: Vec, 90 | } 91 | impl IntoResponse for DownloadFile { 92 | fn into_response(self) -> Response { 93 | let disposition = format!("attachment; filename=\"{}\"", self.name); 94 | ( 95 | [ 96 | ( 97 | header::CONTENT_TYPE, 98 | mime::APPLICATION_OCTET_STREAM.as_ref(), 99 | ), 100 | (header::CONTENT_DISPOSITION, disposition.as_str()), 101 | ], 102 | self.content, 103 | ) 104 | .into_response() 105 | } 106 | } 107 | 108 | async fn get_file(Query(params): Query) -> HTTPResult { 109 | let buf = get_blob_from_file(¶ms.digest).await?; 110 | let content = get_file_content_from_layer(&buf, ¶ms.media_type, ¶ms.file).await?; 111 | let name = params.file.split('/').next_back().unwrap_or_default(); 112 | Ok(DownloadFile { 113 | name: name.to_string(), 114 | content, 115 | }) 116 | } 117 | 118 | async fn serve(uri: Uri) -> StaticFile { 119 | let mut filename = &uri.path()[1..]; 120 | // html无版本号,因此不设置缓存 121 | if filename.is_empty() { 122 | filename = "index.html"; 123 | } 124 | get_static_file(filename) 125 | } 126 | -------------------------------------------------------------------------------- /src/ui/image_detail.rs: -------------------------------------------------------------------------------- 1 | use bytesize::ByteSize; 2 | use pad::PadStr; 3 | use ratatui::{prelude::*, widgets::*}; 4 | 5 | use super::util; 6 | use crate::image::DockerAnalyzeSummary; 7 | 8 | pub struct ImageDetailWidget<'a> { 9 | pub widget: Paragraph<'a>, 10 | } 11 | 12 | pub struct ImageDetailWidgetOption { 13 | pub name: String, 14 | pub arch: String, 15 | pub os: String, 16 | pub total_size: u64, 17 | pub size: u64, 18 | pub summary: DockerAnalyzeSummary, 19 | } 20 | 21 | pub fn new_image_detail_widget<'a>(opt: ImageDetailWidgetOption) -> ImageDetailWidget<'a> { 22 | let total_size = opt.total_size; 23 | let size = opt.size; 24 | let wasted_size = opt.summary.wasted_size; 25 | let score = opt.summary.score; 26 | let wasted_list = opt.summary.wasted_list; 27 | 28 | // let mut wasted_list: Vec = vec![]; 29 | // let mut wasted_size = 0; 30 | // for file in opt.file_summary_list.iter() { 31 | // let mut found = false; 32 | // let info = &file.info; 33 | // wasted_size += info.size; 34 | // for wasted in wasted_list.iter_mut() { 35 | // if wasted.path == info.path { 36 | // found = true; 37 | // wasted.count += 1; 38 | // wasted.total_size += info.size; 39 | // } 40 | // } 41 | // if !found { 42 | // wasted_list.push(ImageFileWastedSummary { 43 | // path: info.path.clone(), 44 | // count: 1, 45 | // total_size: info.size, 46 | // }); 47 | // } 48 | // } 49 | // wasted_list.sort_by(|a, b| b.total_size.cmp(&a.total_size)); 50 | 51 | // let mut score = 100 - wasted_size * 100 / total_size; 52 | // // 有浪费空间,则分数-1 53 | // if wasted_size != 0 { 54 | // score -= 1; 55 | // } 56 | 57 | // 生成浪费空间的文件列表 58 | let space_span = Span::from(" "); 59 | let headers = ["Count", "Total Space", "Path"]; 60 | let mut name = opt.name; 61 | if !opt.arch.is_empty() { 62 | name += &format!("({}/{})", opt.os, opt.arch); 63 | } 64 | let mut spans_list = vec![ 65 | Line::from(vec![ 66 | Span::styled( 67 | "Image name: ", 68 | Style::default().add_modifier(Modifier::BOLD), 69 | ), 70 | Span::from(name), 71 | ]), 72 | Line::from(vec![ 73 | Span::styled( 74 | "Total Image size: ", 75 | Style::default().add_modifier(Modifier::BOLD), 76 | ), 77 | Span::from(format!("{} / {}", ByteSize(total_size), ByteSize(size),)), 78 | ]), 79 | Line::from(vec![ 80 | Span::styled( 81 | "Potential wasted space: ", 82 | Style::default().add_modifier(Modifier::BOLD), 83 | ), 84 | Span::from(ByteSize(wasted_size).to_string()), 85 | ]), 86 | Line::from(vec![ 87 | Span::styled( 88 | "Image efficiency score: ", 89 | Style::default().add_modifier(Modifier::BOLD), 90 | ), 91 | Span::from(format!("{score} %")), 92 | ]), 93 | Line::from(vec![]), 94 | Line::from(vec![ 95 | Span::styled(headers[0], Style::default().add_modifier(Modifier::BOLD)), 96 | space_span.clone(), 97 | Span::styled(headers[1], Style::default().add_modifier(Modifier::BOLD)), 98 | space_span.clone(), 99 | Span::styled(headers[2], Style::default().add_modifier(Modifier::BOLD)), 100 | ]), 101 | ]; 102 | 103 | let count_pad_width = headers[0].len(); 104 | let size_pad_width = headers[1].len(); 105 | 106 | for wasted in wasted_list.iter() { 107 | let count_str = format!("{}", wasted.count) 108 | .pad_to_width_with_alignment(count_pad_width, pad::Alignment::Right); 109 | let size_str = ByteSize(wasted.total_size) 110 | .to_string() 111 | .pad_to_width_with_alignment(size_pad_width, pad::Alignment::Right); 112 | spans_list.push(Line::from(vec![ 113 | Span::from(count_str), 114 | space_span.clone(), 115 | Span::from(size_str), 116 | space_span.clone(), 117 | Span::from(format!("/{}", wasted.path)), 118 | ])) 119 | } 120 | 121 | let widget = Paragraph::new(spans_list).block(util::create_block(" Image Details ")); 122 | ImageDetailWidget { widget } 123 | } 124 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: "publish" 2 | on: 3 | push: 4 | tags: ["v[0-9]+.[0-9]+.[0-9]+*"] 5 | # branches: [ main ] 6 | 7 | env: 8 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | macos: 13 | runs-on: macos-latest 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: setup node 17 | uses: actions/setup-node@v3 18 | - uses: actions-rs/toolchain@v1 19 | with: 20 | toolchain: stable 21 | - name: build-web 22 | run: make build-web 23 | - name: Install target 24 | run: | 25 | rustup update 26 | rustup target add aarch64-apple-darwin 27 | rustup target add x86_64-apple-darwin 28 | - name: release 29 | run: | 30 | cargo build --release --target=aarch64-apple-darwin 31 | mv target/aarch64-apple-darwin/release/diving ./diving-darwin-aarch64 32 | ./.github/workflows/upload_asset.sh ./diving-darwin-aarch64 $GITHUB_TOKEN 33 | 34 | cargo build --release --target=x86_64-apple-darwin 35 | mv target/x86_64-apple-darwin/release/diving ./diving-darwin-x86 36 | ./.github/workflows/upload_asset.sh ./diving-darwin-x86 $GITHUB_TOKEN 37 | 38 | windows: 39 | runs-on: windows-latest 40 | defaults: 41 | run: 42 | shell: bash 43 | steps: 44 | - uses: actions/checkout@v3 45 | - name: setup node 46 | uses: actions/setup-node@v3 47 | - uses: actions-rs/toolchain@v1 48 | with: 49 | toolchain: stable 50 | - name: build-web 51 | run: make build-web 52 | - name: release 53 | run: make release 54 | - name: Upload Assets 55 | run: | 56 | mv target/release/diving.exe ./diving-windows.exe 57 | ./.github/workflows/upload_asset.sh ./diving-windows.exe $GITHUB_TOKEN 58 | 59 | linux: 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v3 63 | - name: setup node 64 | uses: actions/setup-node@v3 65 | - name: build-web 66 | run: make build-web 67 | - name: release 68 | uses: addnab/docker-run-action@v3 69 | with: 70 | image: messense/rust-musl-cross:x86_64-musl 71 | options: -v ${{ github.workspace }}:/home/rust/src 72 | run: | 73 | make release 74 | mv target/x86_64-unknown-linux-musl/release/diving ./diving-linux-x86 75 | - name: Upload Assets 76 | run: | 77 | ./.github/workflows/upload_asset.sh ./diving-linux-x86 $GITHUB_TOKEN 78 | 79 | linux-aarch64: 80 | runs-on: ubuntu-latest 81 | steps: 82 | - uses: actions/checkout@v3 83 | - name: setup node 84 | uses: actions/setup-node@v3 85 | - name: build-web 86 | run: make build-web 87 | - name: release 88 | uses: addnab/docker-run-action@v3 89 | with: 90 | image: messense/rust-musl-cross:aarch64-musl 91 | options: -v ${{ github.workspace }}:/home/rust/src 92 | run: | 93 | make release 94 | mv target/aarch64-unknown-linux-musl/release/diving ./diving-linux-aarch64 95 | - name: Upload Assets 96 | run: | 97 | ./.github/workflows/upload_asset.sh ./diving-linux-aarch64 $GITHUB_TOKEN 98 | docker: 99 | runs-on: ubuntu-latest 100 | timeout-minutes: 3600 101 | steps: 102 | - name: Change Swap Space 103 | run: | 104 | swapon --show=NAME | tail -n 1 105 | df -lh 106 | du -sh /usr/local/lib/android 107 | sudo rm -rf /usr/local/lib/android 108 | export SWAP_FILE=$(swapon --show=NAME | tail -n 1) 109 | sudo swapoff $SWAP_FILE 110 | sudo rm $SWAP_FILE 111 | export SWAP_FILE=/swapfile 112 | sudo fallocate -l 16G $SWAP_FILE 113 | sudo chmod 600 $SWAP_FILE 114 | sudo mkswap $SWAP_FILE 115 | sudo swapon $SWAP_FILE 116 | - name: Swap space report after modification 117 | shell: bash 118 | run: | 119 | echo "Memory and swap:" 120 | free -h 121 | echo 122 | swapon --show 123 | df -lh 124 | echo 125 | - name: Checkout 126 | uses: actions/checkout@v3 127 | - name: Docker meta 128 | id: meta 129 | uses: docker/metadata-action@v4 130 | with: 131 | images: | 132 | vicanso/diving 133 | tags: | 134 | type=semver,pattern={{version}} 135 | - name: Set up QEMU 136 | uses: docker/setup-qemu-action@v2 137 | - name: Set up Docker Buildx 138 | id: buildx 139 | uses: docker/setup-buildx-action@v2 140 | - name: Available platforms 141 | run: echo ${{ steps.buildx.outputs.platforms }} 142 | - name: Login to Docker Hub 143 | uses: docker/login-action@v2 144 | with: 145 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 146 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 147 | - name: Build and push 148 | id: docker_build 149 | uses: docker/build-push-action@v4 150 | with: 151 | platforms: linux/amd64, linux/arm64 152 | push: true 153 | tags: ${{ steps.meta.outputs.tags }} 154 | - name: Image digest 155 | run: | 156 | echo ${{ steps.docker_build.outputs.digest }} -------------------------------------------------------------------------------- /src/ui/files.rs: -------------------------------------------------------------------------------- 1 | use bytesize::ByteSize; 2 | use pad::PadStr; 3 | use ratatui::{prelude::*, widgets::*}; 4 | 5 | use crate::image::{FileTreeItem, Op}; 6 | 7 | use super::util; 8 | 9 | pub struct FilesWidgetOption { 10 | pub is_active: bool, 11 | pub selected_layer: usize, 12 | pub area: Rect, 13 | pub mode: u8, 14 | } 15 | 16 | pub struct FilesWidget<'a> { 17 | // 文件总数 18 | pub file_count: usize, 19 | // 组件 20 | pub files: List<'a>, 21 | // 文件列表展示区域 22 | pub files_area: Rect, 23 | pub block: Block<'a>, 24 | // block 展示区域 25 | pub block_area: Rect, 26 | pub content: Paragraph<'a>, 27 | // 内容展示区域 28 | pub content_area: Rect, 29 | } 30 | 31 | fn is_modified_or_removed(item: &FileTreeItem) -> bool { 32 | if item.op == Op::Removed || item.op == Op::Modified { 33 | return true; 34 | } 35 | // 如果子元素有此类型,也要展示 36 | for child in item.children.iter() { 37 | if is_modified_or_removed(child) { 38 | return true; 39 | } 40 | } 41 | false 42 | } 43 | fn add_to_file_tree_view( 44 | mode: u8, 45 | width_list: Vec, 46 | list: &mut Vec, 47 | items: &[FileTreeItem], 48 | is_last_list: Vec, 49 | ) -> usize { 50 | let mut count = 0; 51 | let space_span = Span::from(" "); 52 | let permission_width = width_list[0]; 53 | let id_width = width_list[1]; 54 | let size_width = width_list[2]; 55 | 56 | let get_file_mode_str = |mode: &str| -> String { 57 | mode.pad_to_width_with_alignment(permission_width, pad::Alignment::Middle) 58 | }; 59 | let get_id_str = 60 | |id: &str| -> String { id.pad_to_width_with_alignment(id_width, pad::Alignment::Right) }; 61 | let get_size_str = |size: u64| -> String { 62 | ByteSize(size) 63 | .to_string() 64 | .pad_to_width_with_alignment(size_width, pad::Alignment::Right) 65 | }; 66 | let get_padding_str = |list: &[bool], is_last: bool| -> String { 67 | let mut arr: Vec = list 68 | .iter() 69 | .map(|is_last| if is_last.to_owned() { " " } else { "│ " }.to_string()) 70 | .collect(); 71 | if is_last { 72 | arr.push("└── ".to_string()); 73 | } else { 74 | arr.push("├── ".to_string()); 75 | } 76 | arr.join("") 77 | }; 78 | 79 | let max = items.len(); 80 | 81 | for (index, item) in items.iter().enumerate() { 82 | match mode { 83 | // 只展示更新与删除 84 | 1 => { 85 | if !is_modified_or_removed(item) { 86 | continue; 87 | } 88 | } 89 | // 只显示大于1MB 90 | 2 => { 91 | if item.size < 1024 * 1024 { 92 | continue; 93 | } 94 | } 95 | _ => {} 96 | } 97 | let mut style = Style::default(); 98 | match item.op { 99 | Op::Modified => style = style.fg(Color::Yellow), 100 | Op::Removed => style = style.fg(Color::Red), 101 | _ => {} 102 | } 103 | let id = format!("{}:{}", item.uid, item.gid); 104 | let is_last = index == max - 1; 105 | let padding = get_padding_str(&is_last_list, is_last); 106 | let mut name = item.name.clone(); 107 | if !item.link.is_empty() { 108 | name = format!("{name} → {}", item.link); 109 | } 110 | list.push(ListItem::new(Line::from(vec![ 111 | Span::styled(get_file_mode_str(&item.mode), style), 112 | space_span.clone(), 113 | Span::styled(get_id_str(&id), style), 114 | space_span.clone(), 115 | Span::styled(get_size_str(item.size), style), 116 | space_span.clone(), 117 | // padding 118 | Span::from(padding), 119 | Span::styled(name, style), 120 | ]))); 121 | count += 1; 122 | if !item.children.is_empty() { 123 | let mut tmp = is_last_list.clone(); 124 | tmp.push(is_last); 125 | 126 | // 如果子元素没有符合插入到列表的 127 | // 则当前元素也删除 128 | let child_append_count = 129 | add_to_file_tree_view(mode, width_list.clone(), list, &item.children, tmp); 130 | if child_append_count == 0 { 131 | list.pop(); 132 | count -= 1; 133 | } 134 | } 135 | } 136 | count 137 | } 138 | 139 | pub fn new_files_widget( 140 | file_tree_list: &[Vec], 141 | opt: FilesWidgetOption, 142 | ) -> FilesWidget<'_> { 143 | // TODO 如何调整生命周期 144 | let mut title = " Current Layer Contents "; 145 | if opt.is_active { 146 | title = " ● Current Layer Contents "; 147 | } 148 | let chunks = Layout::default() 149 | .direction(Direction::Vertical) 150 | .margin(1) 151 | .constraints([Constraint::Length(2), Constraint::Length(u16::MAX)].as_ref()) 152 | .split(opt.area); 153 | 154 | let space_span = Span::from(" "); 155 | let name_list = ["Permission", " UID:GID ", " Size", "FileTree"]; 156 | let mode_tips = format!( 157 | "Esc|0: All 1: Modified/Removed 2: File >= 1MB | Current: {}", 158 | opt.mode 159 | ); 160 | let content = Paragraph::new(vec![ 161 | Line::from(vec![Span::styled( 162 | mode_tips, 163 | Style::default().add_modifier(Modifier::BOLD), 164 | )]), 165 | Line::from(vec![ 166 | Span::from(name_list[0]), 167 | space_span.clone(), 168 | Span::from(name_list[1]), 169 | space_span.clone(), 170 | Span::from(name_list[2]), 171 | space_span.clone(), 172 | Span::from(name_list[3]), 173 | ]), 174 | ]); 175 | 176 | let mut list = vec![]; 177 | 178 | let width_list: Vec = name_list.iter().map(|item| item.len()).collect(); 179 | let file_tree_items = &file_tree_list[opt.selected_layer]; 180 | 181 | add_to_file_tree_view(opt.mode, width_list, &mut list, file_tree_items, vec![]); 182 | 183 | let file_count = list.len(); 184 | let files = List::new(list).highlight_style(Style::default().bg(Color::White).fg(Color::Black)); 185 | FilesWidget { 186 | file_count, 187 | files, 188 | files_area: chunks[1], 189 | block: util::create_block(title), 190 | block_area: opt.area, 191 | content, 192 | content_area: chunks[0], 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /src/image/layer.rs: -------------------------------------------------------------------------------- 1 | use crate::error::HTTPError; 2 | use bytes::Bytes; 3 | use libflate::gzip::Decoder; 4 | use serde::{Deserialize, Serialize}; 5 | use snafu::{ResultExt, Snafu}; 6 | use std::fs::File; 7 | use std::{io::Read, path::Path}; 8 | use tar::Archive; 9 | 10 | use super::ImageFileInfo; 11 | 12 | #[derive(Debug, Snafu)] 13 | pub enum Error { 14 | #[snafu(display("File not found"))] 15 | NotFound, 16 | #[snafu(display("Read fail: {}", source))] 17 | Read { source: std::io::Error }, 18 | #[snafu(display("Gzip decode fail: {}", source))] 19 | GzipDecode { source: std::io::Error }, 20 | #[snafu(display("Zstd decode fail: {}", source))] 21 | ZstdDecode { source: std::io::Error }, 22 | #[snafu(display("Tar fail: {}", source))] 23 | Tar { source: std::io::Error }, 24 | } 25 | 26 | impl From for HTTPError { 27 | fn from(err: Error) -> Self { 28 | // 对于部分error单独转换 29 | HTTPError::new_with_category(&err.to_string(), "layer") 30 | } 31 | } 32 | pub type Result = std::result::Result; 33 | 34 | // 解压gzip 35 | fn gunzip(data: &[u8]) -> Result> { 36 | let mut decoder = Decoder::new(data).context(GzipDecodeSnafu {})?; 37 | let mut decode_data = vec![]; 38 | let _ = decoder 39 | .read_to_end(&mut decode_data) 40 | .context(GzipDecodeSnafu {})?; 41 | Ok(Bytes::copy_from_slice(&decode_data).to_vec()) 42 | } 43 | 44 | // zstd解压 45 | pub fn zstd_decode(data: &[u8]) -> Result> { 46 | let mut buf = vec![]; 47 | zstd::stream::copy_decode(data, &mut buf).context(ZstdDecodeSnafu {})?; 48 | Ok(buf) 49 | } 50 | 51 | // 从tar中读取文件信息 52 | pub async fn get_file_size_from_tar(tar: &str, filename: &str) -> Result { 53 | let file = File::open(tar).context(TarSnafu {})?; 54 | let mut a = Archive::new(file); 55 | for file in a.entries().context(TarSnafu {})? { 56 | let file = file.context(TarSnafu {})?; 57 | let name = file 58 | .path() 59 | .context(TarSnafu {})? 60 | .to_string_lossy() 61 | .to_string(); 62 | if name == filename { 63 | return Ok(file.size()); 64 | } 65 | } 66 | Ok(0) 67 | } 68 | 69 | // 从tar中读取文件信息 70 | pub async fn get_file_content_from_tar(tar: &str, filename: &str) -> Result> { 71 | let file = File::open(tar).context(TarSnafu {})?; 72 | let mut a = Archive::new(file); 73 | let mut content = vec![]; 74 | for file in a.entries().context(TarSnafu {})? { 75 | let mut file = file.context(TarSnafu {})?; 76 | let name = file 77 | .path() 78 | .context(TarSnafu {})? 79 | .to_string_lossy() 80 | .to_string(); 81 | if name == filename { 82 | file.read_to_end(&mut content).context(ReadSnafu {})?; 83 | break; 84 | } 85 | } 86 | if content.is_empty() { 87 | return Err(Error::NotFound {}); 88 | } 89 | Ok(content) 90 | } 91 | 92 | // 从分层数据中读取文件 93 | pub async fn get_file_content_from_layer( 94 | data: &[u8], 95 | media_type: &str, 96 | filename: &str, 97 | ) -> Result> { 98 | let buf; 99 | let mut a = if media_type.contains("gzip") { 100 | buf = gunzip(data)?; 101 | Archive::new(&buf[..]) 102 | } else if media_type.contains("zstd") { 103 | buf = zstd_decode(data)?; 104 | Archive::new(&buf[..]) 105 | } else { 106 | Archive::new(data) 107 | }; 108 | let mut content = vec![]; 109 | for file in a.entries().context(TarSnafu {})? { 110 | let mut file = file.context(TarSnafu {})?; 111 | let name = file 112 | .path() 113 | .context(TarSnafu {})? 114 | .to_string_lossy() 115 | .to_string(); 116 | if name == filename { 117 | file.read_to_end(&mut content).context(ReadSnafu {})?; 118 | break; 119 | } 120 | } 121 | if content.is_empty() { 122 | return Err(Error::NotFound {}); 123 | } 124 | Ok(content) 125 | } 126 | 127 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 128 | pub struct ImageLayerInfo { 129 | // 原始大小 130 | pub size: u64, 131 | // 解压后的大小 132 | pub unpack_size: u64, 133 | // 文件列表 134 | pub files: Vec, 135 | } 136 | 137 | // 从分层数据中读取所有文件信息 138 | // "application/vnd.oci.image.layer.v1.tar+gzip", 139 | pub async fn get_files_from_layer(data: &[u8], media_type: &str) -> Result { 140 | let buf; 141 | let size = data.len() as u64; 142 | let mut unpack_size = size; 143 | // TODO 支持gzip zstd等 144 | let mut a = if media_type.contains("gzip") { 145 | buf = gunzip(data)?; 146 | unpack_size = buf.len() as u64; 147 | Archive::new(&buf[..]) 148 | } else if media_type.contains("zstd") { 149 | buf = zstd_decode(data)?; 150 | unpack_size = buf.len() as u64; 151 | Archive::new(&buf[..]) 152 | } else { 153 | Archive::new(data) 154 | }; 155 | 156 | let mut files = vec![]; 157 | for file in a.entries().context(TarSnafu {})? { 158 | let file = file.context(TarSnafu {})?; 159 | let header = file.header(); 160 | // 不返回目录 161 | if header.entry_type().is_dir() { 162 | continue; 163 | } 164 | let mut link = "".to_string(); 165 | 166 | if let Some(value) = file.link_name().context(TarSnafu {})? { 167 | link = value.to_string_lossy().to_string() 168 | } 169 | let mut path = file 170 | .path() 171 | .context(TarSnafu {})? 172 | .to_string_lossy() 173 | .to_string(); 174 | let mut is_whiteout = None; 175 | // 为了实现这样的删除操作,AuFS 会在可读写层创建一个 whiteout 文件,把只读层里的文件“遮挡”起来。 176 | // .wh. 177 | // usr/local/bin/.wh.static 178 | if let Some(filename) = Path::new(&path).file_name() { 179 | let name = filename.to_string_lossy(); 180 | let prefix = ".wh."; 181 | if name.starts_with(prefix) { 182 | path = path.replace(name.to_string().as_str(), &name.replace(prefix, "")); 183 | is_whiteout = Some(true); 184 | } 185 | } 186 | let mode = header.mode().context(TarSnafu {})?; 187 | let info = ImageFileInfo { 188 | path, 189 | link, 190 | size: file.size(), 191 | mode: unix_mode::to_string(mode), 192 | uid: header.uid().context(TarSnafu {})?, 193 | gid: header.gid().context(TarSnafu {})?, 194 | is_whiteout, 195 | }; 196 | files.push(info); 197 | } 198 | Ok(ImageLayerInfo { 199 | files, 200 | unpack_size, 201 | size, 202 | }) 203 | } 204 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use axum::{error_handling::HandleErrorLayer, middleware::from_fn, Router}; 2 | use bytesize::ByteSize; 3 | use clap::Parser; 4 | use colored::*; 5 | use std::fs; 6 | use std::net::SocketAddr; 7 | use std::time::Duration; 8 | use std::{env, str::FromStr}; 9 | use tokio::signal; 10 | use tokio_cron_scheduler::{Job, JobScheduler}; 11 | use tower::ServiceBuilder; 12 | use tracing::Level; 13 | use tracing::{error, info}; 14 | use tracing_subscriber::FmtSubscriber; 15 | 16 | mod config; 17 | mod controller; 18 | mod dist; 19 | mod error; 20 | mod image; 21 | mod middleware; 22 | mod store; 23 | mod task_local; 24 | mod ui; 25 | mod util; 26 | 27 | use controller::new_router; 28 | use image::{analyze_docker_image, parse_image_info}; 29 | use middleware::{access_log, entry}; 30 | use store::clear_blob_files; 31 | use task_local::{generate_trace_id, TRACE_ID}; 32 | 33 | /// A tool for exploring each layer in a docker image. 34 | /// It can run in terminal or as a web service. 35 | #[derive(Parser, Debug)] 36 | #[command(author, version, about, long_about = None)] 37 | struct Args { 38 | /// Running mode of diving, terminal or web 39 | #[arg(short, long, default_value = "terminal")] 40 | mode: String, 41 | image: Option, 42 | /// The listen addr of web mode 43 | #[arg(short, long, default_value = "127.0.0.1:7001")] 44 | listen: String, 45 | /// The result output file 46 | #[arg(short, long)] 47 | output_file: Option, 48 | } 49 | 50 | impl Args { 51 | fn is_terminal_type(&self) -> bool { 52 | self.mode == "terminal" 53 | } 54 | } 55 | 56 | fn init_logger() { 57 | let mut level = Level::INFO; 58 | if let Ok(log_level) = env::var("LOG_LEVEL") { 59 | if let Ok(value) = Level::from_str(log_level.as_str()) { 60 | level = value; 61 | } 62 | } 63 | let timer = tracing_subscriber::fmt::time::OffsetTime::local_rfc_3339().unwrap_or_else(|_| { 64 | tracing_subscriber::fmt::time::OffsetTime::new( 65 | time::UtcOffset::from_hms(0, 0, 0).unwrap(), 66 | time::format_description::well_known::Rfc3339, 67 | ) 68 | }); 69 | let env = std::env::var("RUST_ENV").unwrap_or_default(); 70 | let subscriber = FmtSubscriber::builder() 71 | .with_max_level(level) 72 | .with_timer(timer) 73 | .with_ansi(env != "production") 74 | .finish(); 75 | tracing::subscriber::set_global_default(subscriber).expect("setting default subscriber failed"); 76 | } 77 | 78 | async fn start_scheduler() { 79 | let scheduler = JobScheduler::new().await.unwrap(); 80 | scheduler 81 | .add( 82 | // TODO 后续调整为可配置 83 | Job::new_async("@hourly", |_, _| { 84 | Box::pin(async { 85 | let result = clear_blob_files().await; 86 | if let Err(err) = result { 87 | error!(err = err.to_string(), "clear blob files fail") 88 | } else { 89 | info!("clear blob files success") 90 | } 91 | }) 92 | }) 93 | .unwrap(), 94 | ) 95 | .await 96 | .unwrap(); 97 | scheduler.start().await.unwrap(); 98 | } 99 | 100 | fn is_ci() -> bool { 101 | env::var_os("CI").unwrap_or_default() == "true" 102 | } 103 | 104 | // 分析镜像(错误直接以字符串返回) 105 | async fn analyze(image: String, output_file: String) -> Result<(), String> { 106 | // 命令行模式下清除过期数据 107 | clear_blob_files().await.map_err(|item| item.to_string())?; 108 | let image_info = parse_image_info(&image); 109 | let result = analyze_docker_image(image_info) 110 | .await 111 | .map_err(|item| item.to_string())?; 112 | if is_ci() || !output_file.is_empty() { 113 | let summary = result.summary(); 114 | let lowest_efficiency = (config::get_lowest_efficiency() * 100.0) as u64; 115 | let highest_wasted_bytes = config::get_highest_wasted_bytes(); 116 | let highest_user_wasted_percent = config::get_highest_user_wasted_percent(); 117 | println!("{}", "Analyze result:".bold().green()); 118 | println!(" efficiency: {} %", summary.score); 119 | println!( 120 | " wasted bytes: {} bytes ({})", 121 | summary.wasted_size, 122 | ByteSize(summary.wasted_size) 123 | ); 124 | 125 | let mut passed = true; 126 | if summary.score < lowest_efficiency { 127 | println!( 128 | "{}: lowest efficiency check, lowest: {}", 129 | "FAIL".red(), 130 | lowest_efficiency 131 | ); 132 | passed = false; 133 | } 134 | if summary.wasted_size > highest_wasted_bytes { 135 | println!( 136 | "{}: highest wasted bytes check, highest: {}", 137 | "FAIL".red(), 138 | ByteSize(highest_wasted_bytes) 139 | ); 140 | passed = false; 141 | } 142 | if summary.wasted_percent > highest_user_wasted_percent { 143 | println!( 144 | "{}: highest user wasted percent check, highest: {:.2}", 145 | "FAIL".red(), 146 | highest_user_wasted_percent 147 | ); 148 | passed = false; 149 | } 150 | if !output_file.is_empty() { 151 | fs::write( 152 | output_file, 153 | serde_json::to_string(&result).map_err(|err| err.to_string())?, 154 | ) 155 | .map_err(|err| err.to_string())?; 156 | } else if !passed { 157 | return Err("CI check fail".to_string()); 158 | } 159 | } else { 160 | ui::run_app(result).map_err(|item| item.to_string())?; 161 | } 162 | Ok(()) 163 | } 164 | 165 | #[tokio::main] 166 | async fn run() { 167 | // 启动时确保可以读取配置 168 | config::must_load_config(); 169 | let args = Args::parse(); 170 | if args.is_terminal_type() { 171 | if let Some(value) = args.image { 172 | TRACE_ID 173 | .scope(generate_trace_id(), async { 174 | if let Err(err) = analyze(value, args.output_file.unwrap_or_default()).await { 175 | error!(err, "analyze image fail"); 176 | std::process::exit(1) 177 | } 178 | }) 179 | .await; 180 | } else { 181 | error!("image can not be nil") 182 | } 183 | } else { 184 | start_scheduler().await; 185 | // build our application with a route 186 | let app = Router::new() 187 | .merge(new_router()) 188 | .layer( 189 | ServiceBuilder::new() 190 | .layer(HandleErrorLayer::new(error::handle_error)) 191 | .timeout(Duration::from_secs(10 * 60)), 192 | ) 193 | // 后面的layer先执行 194 | .layer(from_fn(access_log)) 195 | .layer(from_fn(entry)); 196 | 197 | info!("listening on http://{}", args.listen); 198 | let listener = tokio::net::TcpListener::bind(args.listen).await.unwrap(); 199 | 200 | axum::serve( 201 | listener, 202 | app.into_make_service_with_connect_info::(), 203 | ) 204 | .with_graceful_shutdown(shutdown_signal()) 205 | .await 206 | .unwrap(); 207 | } 208 | } 209 | 210 | async fn shutdown_signal() { 211 | let ctrl_c = async { 212 | signal::ctrl_c() 213 | .await 214 | .expect("failed to install Ctrl+C handler"); 215 | }; 216 | 217 | #[cfg(unix)] 218 | let terminate = async { 219 | // TODO 后续有需要可在此设置ping的状态 220 | signal::unix::signal(signal::unix::SignalKind::terminate()) 221 | .expect("failed to install signal handler") 222 | .recv() 223 | .await; 224 | }; 225 | 226 | #[cfg(not(unix))] 227 | let terminate = std::future::pending::<()>(); 228 | 229 | tokio::select! { 230 | _ = ctrl_c => {}, 231 | _ = terminate => {}, 232 | } 233 | 234 | info!("signal received, starting graceful shutdown"); 235 | } 236 | 237 | fn main() { 238 | // Because we need to get the local offset before Tokio spawns any threads, our `main` 239 | // function cannot use `tokio::main`. 240 | std::panic::set_hook(Box::new(|e| { 241 | error!(category = "panic", message = e.to_string(),); 242 | std::process::exit(1); 243 | })); 244 | init_logger(); 245 | run(); 246 | } 247 | -------------------------------------------------------------------------------- /src/image/oci_image.rs: -------------------------------------------------------------------------------- 1 | use serde::{Deserialize, Serialize}; 2 | use serde_repr::{Deserialize_repr, Serialize_repr}; 3 | use std::collections::HashMap; 4 | 5 | pub static MEDIA_TYPE_IMAGE_INDEX: &str = "application/vnd.oci.image.index.v1+json"; 6 | 7 | pub static MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST: &str = 8 | "application/vnd.docker.distribution.manifest.v2+json"; 9 | pub static MEDIA_TYPE_MANIFEST_LIST: &str = 10 | "application/vnd.docker.distribution.manifest.list.v2+json"; 11 | 12 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 13 | #[serde(rename_all = "camelCase")] 14 | pub struct ImageFileInfo { 15 | // 文件目录 16 | pub path: String, 17 | // 文件链接 18 | pub link: String, 19 | // 文件大小 20 | pub size: u64, 21 | // unix mode 22 | pub mode: String, 23 | pub uid: u64, 24 | pub gid: u64, 25 | // 该文件是否对应删除 26 | pub is_whiteout: Option, 27 | } 28 | 29 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 30 | #[serde(rename_all = "camelCase")] 31 | pub struct ImageLayer { 32 | // 创建时间 33 | pub created: String, 34 | pub digest: String, 35 | // 创建该层的命令 36 | pub cmd: String, 37 | // layer的大小 38 | pub size: u64, 39 | // 类型 40 | pub media_type: String, 41 | // layer解压之后的文件大小 42 | pub unpack_size: u64, 43 | // 该层是否为空(无文件操作) 44 | pub empty: bool, 45 | } 46 | 47 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 48 | #[serde(rename_all = "camelCase")] 49 | pub struct ImageFileSummary { 50 | // 所在层 51 | pub layer_index: usize, 52 | // 操作 53 | pub op: Op, 54 | // 文件信息 55 | pub info: ImageFileInfo, 56 | } 57 | 58 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 59 | #[serde(rename_all = "camelCase")] 60 | pub struct ImageIndex { 61 | // 类型 62 | pub media_type: String, 63 | // 版本 64 | pub schema_version: i64, 65 | // 镜像的manifest 66 | pub manifests: Vec, 67 | } 68 | 69 | impl ImageIndex { 70 | // 返回匹配manifest,如果无则返回第一个 71 | pub fn guess_manifest(&self, arch: &str) -> ImageIndexManifest { 72 | let os = "linux"; 73 | let mut os_match_manifests = vec![]; 74 | let mut architecture = arch.to_string(); 75 | if architecture.is_empty() { 76 | architecture = "amd64".to_string(); 77 | let arch = std::env::consts::ARCH; 78 | if arch.contains("arm") || arch.contains("aarch64") { 79 | architecture = "arm64".to_string() 80 | } 81 | } 82 | for item in &self.manifests { 83 | if item.platform.os != os { 84 | continue; 85 | } 86 | if item.platform.architecture == architecture { 87 | return item.clone(); 88 | } 89 | os_match_manifests.push(item) 90 | } 91 | // 如果有匹配os的,则返回对应os的 92 | if !os_match_manifests.is_empty() { 93 | return os_match_manifests[0].clone(); 94 | } 95 | self.manifests[0].clone() 96 | } 97 | } 98 | 99 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 100 | #[serde(rename_all = "camelCase")] 101 | pub struct ImageIndexManifest { 102 | // 类型 103 | pub media_type: String, 104 | // 内容对应的digest 105 | pub digest: String, 106 | // 大小 107 | pub size: i64, 108 | // 平台 109 | pub platform: ImageIndexPlatform, 110 | pub annotations: Option, 111 | } 112 | 113 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 114 | #[serde(rename_all = "camelCase")] 115 | pub struct ImageIndexPlatform { 116 | // 架构 117 | pub architecture: String, 118 | pub os: String, 119 | pub variant: Option, 120 | } 121 | 122 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 123 | #[serde(rename_all = "camelCase")] 124 | pub struct ImageIndexAnnotations { 125 | #[serde(rename = "vnd.docker.reference.digest")] 126 | pub vnd_docker_reference_digest: Option, 127 | #[serde(rename = "vnd.docker.reference.type")] 128 | pub vnd_docker_reference_type: Option, 129 | } 130 | 131 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 132 | #[serde(rename_all = "camelCase")] 133 | pub struct ImageManifest { 134 | pub media_type: String, 135 | pub schema_version: i64, 136 | pub config: ImageManifestConfig, 137 | // 文件分层信息 138 | pub layers: Vec, 139 | } 140 | 141 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 142 | #[serde(rename_all = "camelCase")] 143 | pub struct ImageManifestConfig { 144 | pub media_type: String, 145 | pub digest: String, 146 | pub size: i64, 147 | } 148 | 149 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 150 | #[serde(rename_all = "camelCase")] 151 | pub struct ImageManifestLayer { 152 | pub media_type: String, 153 | pub digest: String, 154 | pub size: u64, 155 | } 156 | 157 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 158 | #[serde(rename_all = "camelCase")] 159 | pub struct ImageExtraInfo { 160 | #[serde(rename = "User")] 161 | pub user: Option, 162 | #[serde(rename = "Env")] 163 | pub env: Option>, 164 | #[serde(rename = "Labels")] 165 | pub labels: Option>, 166 | } 167 | 168 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 169 | #[serde(rename_all = "camelCase")] 170 | pub struct ImageConfig { 171 | // 架构 172 | pub architecture: String, 173 | // 创建时间 174 | pub created: String, 175 | // 历史记录 176 | pub history: Vec, 177 | pub os: String, 178 | pub rootfs: ImageRootfs, 179 | // 镜像信息(还有其它更多字段未读取) 180 | pub config: Option, 181 | } 182 | 183 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 184 | #[serde(rename_all = "camelCase")] 185 | pub struct ImageHistory { 186 | pub created: String, 187 | #[serde(rename = "created_by")] 188 | pub created_by: Option, 189 | #[serde(rename = "empty_layer")] 190 | pub empty_layer: Option, 191 | pub comment: Option, 192 | } 193 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 194 | #[serde(rename_all = "camelCase")] 195 | pub struct ImageRootfs { 196 | #[serde(rename = "type")] 197 | pub type_field: String, 198 | #[serde(rename = "diff_ids")] 199 | pub diff_ids: Vec, 200 | } 201 | 202 | #[derive(Default, Debug, Clone, PartialEq, Serialize_repr, Deserialize_repr)] 203 | #[repr(u8)] 204 | pub enum Op { 205 | #[default] 206 | None, 207 | Removed, 208 | Modified, 209 | Added, 210 | } 211 | 212 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 213 | #[serde(rename_all = "camelCase")] 214 | pub struct FileTreeItem { 215 | // 文件或目录名称 216 | pub name: String, 217 | // 链接 218 | pub link: String, 219 | // 文件大小 220 | pub size: u64, 221 | // unix mode 222 | pub mode: String, 223 | pub uid: u64, 224 | pub gid: u64, 225 | // 操作:删除、更新等 226 | pub op: Op, 227 | // 子文件 228 | pub children: Vec, 229 | } 230 | 231 | // 从文件树中查找文件 232 | pub fn find_file_tree_item(items: &[FileTreeItem], path_list: Vec<&str>) -> Option { 233 | if path_list.is_empty() { 234 | return None; 235 | } 236 | let is_last = path_list.len() == 1; 237 | let path = path_list.first().unwrap().to_string(); 238 | for item in items.iter() { 239 | if item.name == path { 240 | if is_last { 241 | return Some(item.clone()); 242 | } 243 | return find_file_tree_item(&item.children, path_list[1..].to_vec()); 244 | } 245 | } 246 | None 247 | } 248 | 249 | // 添加文件至文件树 250 | fn add_file(items: &mut Vec, name_list: Vec<&str>, item: FileTreeItem) { 251 | // 文件 252 | if name_list.is_empty() { 253 | items.push(item); 254 | return; 255 | } 256 | // 目录 257 | let name = name_list[0]; 258 | let mut found_index = -1; 259 | // 是否已存在此目录 260 | for (index, dir) in items.iter_mut().enumerate() { 261 | if dir.name == name { 262 | dir.size += item.size; 263 | found_index = index as i64; 264 | } 265 | } 266 | // 不存在则插入 267 | if found_index < 0 { 268 | found_index = items.len() as i64; 269 | let mut op = Op::None; 270 | if item.op == Op::Modified { 271 | op = Op::Modified; 272 | } 273 | items.push(FileTreeItem { 274 | name: name.to_string(), 275 | size: item.size, 276 | op, 277 | // TODO 其它属性 278 | ..Default::default() 279 | }); 280 | } 281 | if let Some(file_tree_item) = items.get_mut(found_index as usize) { 282 | // 子目录 283 | add_file(&mut file_tree_item.children, name_list[1..].to_vec(), item); 284 | } 285 | } 286 | 287 | // 将文件转换为文件树 288 | pub fn convert_files_to_file_tree( 289 | files: &[ImageFileInfo], 290 | file_summary_list: &[ImageFileSummary], 291 | ) -> Vec { 292 | let mut file_tree: Vec = vec![]; 293 | for file in files.iter() { 294 | let arr: Vec<&str> = file.path.split('/').collect(); 295 | if arr.is_empty() { 296 | continue; 297 | } 298 | let mut op = Op::None; 299 | if file.is_whiteout.is_some() { 300 | op = Op::Removed; 301 | } else if file_summary_list 302 | .iter() 303 | .any(|item| item.info.path == file.path) 304 | { 305 | op = Op::Modified; 306 | } 307 | 308 | let size = arr.len(); 309 | add_file( 310 | &mut file_tree, 311 | arr[0..size - 1].to_vec(), 312 | FileTreeItem { 313 | // 已保证不会为空 314 | name: arr[size - 1].to_string(), 315 | link: file.link.clone(), 316 | size: file.size, 317 | mode: file.mode.clone(), 318 | uid: file.uid, 319 | gid: file.gid, 320 | op, 321 | ..Default::default() 322 | }, 323 | ) 324 | } 325 | file_tree 326 | } 327 | -------------------------------------------------------------------------------- /src/ui/mod.rs: -------------------------------------------------------------------------------- 1 | use self::image_detail::ImageDetailWidgetOption; 2 | use crate::image::{DockerAnalyzeResult, DockerAnalyzeSummary, FileTreeItem, ImageLayer}; 3 | use crossterm::{ 4 | event::{self, Event, KeyCode, KeyModifiers}, 5 | execute, 6 | terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen}, 7 | }; 8 | use ratatui::{prelude::*, widgets::*}; 9 | use std::process; 10 | use std::process::Command; 11 | use std::sync::atomic; 12 | use std::sync::mpsc::sync_channel; 13 | use std::{error::Error, io}; 14 | 15 | mod files; 16 | mod image_detail; 17 | mod layer_detail; 18 | mod layers; 19 | mod util; 20 | 21 | #[derive(Default, Debug, Clone)] 22 | struct WidgetState { 23 | name: String, 24 | arch: String, 25 | os: String, 26 | active_list: Vec, 27 | // 选中的区域 28 | active: String, 29 | selected_layer: usize, 30 | // 镜像大小 31 | size: u64, 32 | // 镜像解压大小 33 | total_size: u64, 34 | // 镜像层的信息 35 | layers: Vec, 36 | // 每层对应的文件树 37 | file_tree_list: Vec>, 38 | // 文件列表的状态 39 | files_state: ListState, 40 | // 文件列表项总数 41 | file_count: usize, 42 | // 文件树模式 43 | file_tree_mode: u8, 44 | summary: DockerAnalyzeSummary, 45 | } 46 | 47 | static LAYERS_WIDGET: &str = "layers"; 48 | static FILES_WIDGET: &str = "files"; 49 | 50 | impl WidgetState { 51 | fn next_widget(&mut self) { 52 | let found = self 53 | .active_list 54 | .iter() 55 | .position(|x| *x == self.active) 56 | .unwrap_or(0); 57 | if found >= self.active_list.len() - 1 { 58 | self.active = self.active_list[0].clone(); 59 | } else { 60 | self.active = self.active_list[found + 1].clone(); 61 | } 62 | if self.is_files_widget_active() { 63 | self.select_file(0); 64 | } else { 65 | self.files_state.select(None); 66 | } 67 | } 68 | // layers widget是否活动状态 69 | fn is_layers_widget_active(&self) -> bool { 70 | self.active == LAYERS_WIDGET 71 | } 72 | fn is_files_widget_active(&self) -> bool { 73 | self.active == FILES_WIDGET 74 | } 75 | fn select_file(&mut self, offset: i64) { 76 | let mut value = 0; 77 | if let Some(v) = self.files_state.selected() { 78 | value = v as i64; 79 | } 80 | value += offset; 81 | 82 | if value >= self.file_count as i64 { 83 | return; 84 | } 85 | // 如果offset为0,选择第一个文件 86 | if value < 0 || offset == 0 { 87 | value = 0 88 | } 89 | self.files_state.select(Some(value as usize)); 90 | } 91 | fn select_next(&mut self) { 92 | if self.is_files_widget_active() { 93 | self.select_file(1); 94 | return; 95 | } 96 | 97 | if self.is_layers_widget_active() && self.selected_layer < self.layers.len() - 1 { 98 | self.selected_layer += 1; 99 | } 100 | } 101 | fn select_prev(&mut self) { 102 | if self.is_files_widget_active() { 103 | self.select_file(-1); 104 | return; 105 | } 106 | if self.is_layers_widget_active() && self.selected_layer > 0 { 107 | self.selected_layer -= 1; 108 | } 109 | } 110 | fn change_file_tree_mode(&mut self, mode: u8) { 111 | self.file_tree_mode = mode; 112 | } 113 | } 114 | 115 | pub fn run_app(result: DockerAnalyzeResult) -> Result<(), Box> { 116 | // setup terminal 117 | enable_raw_mode()?; 118 | let mut stdout = io::stdout(); 119 | execute!(stdout, EnterAlternateScreen)?; 120 | let backend = CrosstermBackend::new(stdout); 121 | let mut terminal = Terminal::new(backend)?; 122 | 123 | let hidden = atomic::AtomicBool::default(); 124 | 125 | // create app and run it 126 | let summary = result.summary(); 127 | let mut state = WidgetState { 128 | name: result.name, 129 | arch: result.arch, 130 | os: result.os, 131 | layers: result.layers, 132 | selected_layer: 0, 133 | file_tree_list: result.file_tree_list, 134 | size: result.size, 135 | total_size: result.total_size, 136 | // 可以选中的widget列表顺序 137 | active_list: vec![LAYERS_WIDGET.to_string(), FILES_WIDGET.to_string()], 138 | active: LAYERS_WIDGET.to_string(), 139 | summary, 140 | ..Default::default() 141 | }; 142 | let (tx, rx) = sync_channel::(1); 143 | #[cfg(not(windows))] 144 | let signal = Some(unsafe { 145 | signal_hook_registry::register(signal_hook::consts::SIGCONT, move || { 146 | // 事件触发失败则直接退出 147 | // 因此使用unwrap 148 | tx.send(true).unwrap(); 149 | }) 150 | }?); 151 | #[cfg(windows)] 152 | let signal = None; 153 | 154 | loop { 155 | if hidden.load(atomic::Ordering::Relaxed) { 156 | // 等待fg事件,出错直接退出 157 | // 因此使用unwrap 158 | rx.recv().unwrap(); 159 | enable_raw_mode()?; 160 | execute!(terminal.backend_mut(), EnterAlternateScreen)?; 161 | terminal.hide_cursor()?; 162 | hidden.store(false, atomic::Ordering::Relaxed); 163 | terminal.clear()?; 164 | } 165 | terminal.draw(|f| draw_widgets(f, &mut state))?; 166 | 167 | if let Event::Key(key) = event::read()? { 168 | match key.code { 169 | // make dev的形式下不可用 170 | // suspend 171 | KeyCode::Char('z') => { 172 | // 只针对类unix系统 173 | if cfg!(unix) && key.modifiers.contains(KeyModifiers::CONTROL) { 174 | hidden.store(true, atomic::Ordering::Relaxed); 175 | 176 | disable_raw_mode()?; 177 | execute!(terminal.backend_mut(), LeaveAlternateScreen)?; 178 | terminal.show_cursor()?; 179 | 180 | let mut kill = Command::new("kill") 181 | .args(["-s", "STOP", &process::id().to_string()]) 182 | .spawn()?; 183 | kill.wait()?; 184 | 185 | continue; 186 | } 187 | } 188 | // 退出 189 | KeyCode::Char('c') => { 190 | if key.modifiers.contains(KeyModifiers::CONTROL) { 191 | break; 192 | } 193 | } 194 | // 退出 195 | KeyCode::Char('q') => break, 196 | KeyCode::Tab => state.next_widget(), 197 | // 左右均下一组件,因为只有两个组件 198 | KeyCode::Right => state.next_widget(), 199 | KeyCode::Left => state.next_widget(), 200 | // 组件中的上下移动 201 | KeyCode::Down => state.select_next(), 202 | KeyCode::Up => state.select_prev(), 203 | // 文件树模式选择 204 | KeyCode::Char('0') => state.change_file_tree_mode(0), 205 | KeyCode::Char('1') => state.change_file_tree_mode(1), 206 | KeyCode::Char('2') => state.change_file_tree_mode(2), 207 | KeyCode::Esc => state.change_file_tree_mode(0), 208 | 209 | _ => continue, 210 | } 211 | } 212 | } 213 | 214 | // restore terminal 215 | disable_raw_mode()?; 216 | execute!(terminal.backend_mut(), LeaveAlternateScreen)?; 217 | terminal.show_cursor()?; 218 | if let Some(value) = signal { 219 | signal_hook_registry::unregister(value); 220 | } 221 | Ok(()) 222 | } 223 | 224 | fn draw_widgets(f: &mut Frame, state: &mut WidgetState) { 225 | let chunks = Layout::default() 226 | .direction(Direction::Horizontal) 227 | .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) 228 | .split(f.area()); 229 | 230 | let layers_widget = layers::new_layers_widget( 231 | &state.layers, 232 | layers::LayersWidgetOption { 233 | is_active: state.is_layers_widget_active(), 234 | selected_layer: state.selected_layer, 235 | }, 236 | ); 237 | let layer = state 238 | .layers 239 | .get(state.selected_layer) 240 | .unwrap_or_else(|| &state.layers[0]); 241 | let detail_widget = layer_detail::new_layer_detail_widget( 242 | layer, 243 | layer_detail::DetailWidgetOption { 244 | width: chunks[0].width, 245 | }, 246 | ); 247 | 248 | let left_chunks = Layout::default() 249 | .direction(Direction::Vertical) 250 | .constraints( 251 | [ 252 | Constraint::Length(layers_widget.height), 253 | Constraint::Length(detail_widget.height), 254 | Constraint::Fill(1), 255 | ] 256 | .as_ref(), 257 | ) 258 | .split(chunks[0]); 259 | let image_detail_widget = image_detail::new_image_detail_widget(ImageDetailWidgetOption { 260 | name: state.name.clone(), 261 | arch: state.arch.clone(), 262 | os: state.os.clone(), 263 | total_size: state.total_size, 264 | size: state.size, 265 | summary: state.summary.clone(), 266 | }); 267 | f.render_widget(layers_widget.widget, left_chunks[0]); 268 | f.render_widget(detail_widget.widget, left_chunks[1]); 269 | f.render_widget(image_detail_widget.widget, left_chunks[2]); 270 | 271 | // 文件列表 272 | let files_widget = files::new_files_widget( 273 | &state.file_tree_list, 274 | files::FilesWidgetOption { 275 | is_active: state.is_files_widget_active(), 276 | selected_layer: state.selected_layer, 277 | area: chunks[1], 278 | mode: state.file_tree_mode, 279 | }, 280 | ); 281 | if state.file_count != files_widget.file_count { 282 | state.file_count = files_widget.file_count; 283 | } 284 | f.render_widget(files_widget.block, files_widget.block_area); 285 | f.render_widget(files_widget.content, files_widget.content_area); 286 | f.render_stateful_widget( 287 | files_widget.files, 288 | files_widget.files_area, 289 | &mut state.files_state, 290 | ); 291 | } 292 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /web/src/App.tsx: -------------------------------------------------------------------------------- 1 | import { Component, ReactNode } from "react"; 2 | import { 3 | ConfigProvider, 4 | theme, 5 | Card, 6 | Layout, 7 | Input, 8 | message, 9 | Descriptions, 10 | Form, 11 | Select, 12 | Col, 13 | Row, 14 | Checkbox, 15 | Typography, 16 | Space, 17 | List, 18 | } from "antd"; 19 | import axios, { AxiosError } from "axios"; 20 | import prettyBytes from "pretty-bytes"; 21 | import i18nGet from "./i18n"; 22 | 23 | import "./App.css"; 24 | 25 | const { Option } = Select; 26 | const { defaultAlgorithm, darkAlgorithm } = theme; 27 | const { Header, Content } = Layout; 28 | const { Search } = Input; 29 | const { Paragraph } = Typography; 30 | 31 | interface ModifiedFile { 32 | digest: string; 33 | path: string; 34 | size: number; 35 | } 36 | 37 | interface ImageAnalyzeResult { 38 | name: string; 39 | arch: string; 40 | os: string; 41 | layers: Layer[]; 42 | size: number; 43 | totalSize: number; 44 | fileTreeList: FileTreeList[][]; 45 | fileSummaryList: FileSummaryList[]; 46 | bigModifiedFileList: ModifiedFile[]; 47 | } 48 | 49 | interface Layer { 50 | created: string; 51 | digest: string; 52 | mediaType: string; 53 | cmd: string; 54 | size: number; 55 | unpackSize: number; 56 | empty: boolean; 57 | } 58 | 59 | interface FileTreeList { 60 | key: string; 61 | name: string; 62 | link: string; 63 | size: number; 64 | mode: string; 65 | uid: number; 66 | gid: number; 67 | op: number; 68 | children: FileTreeList[]; 69 | } 70 | 71 | interface FileSummaryList { 72 | layerIndex: number; 73 | op: number; 74 | info: Info; 75 | } 76 | 77 | interface Info { 78 | path: string; 79 | link: string; 80 | size: number; 81 | mode: string; 82 | uid: number; 83 | gid: number; 84 | isWhiteout: any; 85 | } 86 | interface FileWastedSummary { 87 | path: string; 88 | totalSize: number; 89 | count: number; 90 | } 91 | 92 | const plusOutlined = ( 93 | 103 | ); 104 | const minusOutlined = ( 105 | 115 | ); 116 | 117 | const isDarkMode = () => 118 | window.matchMedia("(prefers-color-scheme: dark)").matches; 119 | 120 | const getLogoIcon = (isDarkMode: boolean) => { 121 | let color = `rgb(0, 0, 0)`; 122 | if (isDarkMode) { 123 | color = `rgb(255, 255, 255)`; 124 | } 125 | return ( 126 | 134 | 135 | 136 | 137 | 138 | ); 139 | }; 140 | 141 | const getGithubIcon = (isDarkMode: boolean) => { 142 | if (window.location.host.indexOf("diving") === -1) { 143 | return; 144 | } 145 | let color = `rgb(0, 0, 0)`; 146 | if (isDarkMode) { 147 | color = `rgb(255, 255, 255)`; 148 | } 149 | return ( 150 | 159 | 170 | 171 | ); 172 | }; 173 | 174 | const getDownloadIcon = () => { 175 | const color = `#646cff`; 176 | return ( 177 | 183 | 190 | 197 | 204 | 205 | ); 206 | }; 207 | 208 | const getImageSummary = (result: ImageAnalyzeResult) => { 209 | let wastedSize = 0; 210 | let wastedList: FileWastedSummary[] = []; 211 | // 计算浪费的空间以及文件 212 | result.fileSummaryList.forEach((item) => { 213 | const { size, path } = item.info; 214 | const found = wastedList.find((item) => item.path === path); 215 | if (found) { 216 | found.count++; 217 | found.totalSize += size; 218 | } else { 219 | wastedList.push({ 220 | path, 221 | count: 1, 222 | totalSize: size, 223 | }); 224 | } 225 | wastedSize += size; 226 | }); 227 | wastedList.sort((a, b) => { 228 | return b.totalSize - a.totalSize; 229 | }); 230 | 231 | // 除去第一个不为0的layer大小 232 | let firstNotEmptyLayerSize = 0; 233 | result.layers.forEach((item) => { 234 | if (firstNotEmptyLayerSize != 0) { 235 | return; 236 | } 237 | firstNotEmptyLayerSize = item.size; 238 | }); 239 | const otherLayerSize = result.totalSize - firstNotEmptyLayerSize; 240 | 241 | const score = (100 - (wastedSize * 100) / result.totalSize).toFixed(2); 242 | 243 | const imageDescriptions = { 244 | score: `${score}%`, 245 | size: `${prettyBytes(result.totalSize)} / ${prettyBytes(result.size)}`, 246 | otherSize: prettyBytes(otherLayerSize), 247 | wastedSize: prettyBytes(wastedSize), 248 | osArch: `${result.os}/${result.arch}`, 249 | created: result.layers[result.layers.length - 1].created, 250 | }; 251 | return { 252 | wastedList, 253 | imageDescriptions, 254 | }; 255 | }; 256 | 257 | const addKeyToFileTreeItem = (items: FileTreeList[], prefix: string) => { 258 | items.forEach((item) => { 259 | let key = item.name; 260 | if (prefix) { 261 | key = `${prefix}/${key}`; 262 | } 263 | item.key = key; 264 | addKeyToFileTreeItem(item.children, key); 265 | }); 266 | }; 267 | 268 | interface FileTreeViewOption { 269 | expandAll: boolean; 270 | expandItems: string[]; 271 | sizeLimit: number; 272 | onlyModifiedRemoved: boolean; 273 | keyword: string; 274 | } 275 | 276 | const opRemoved = 1; 277 | const opModified = 2; 278 | 279 | const isModifiedRemoved = (item: FileTreeList) => { 280 | const arr = [opRemoved, opModified]; 281 | if (arr.includes(item.op)) { 282 | return true; 283 | } 284 | // 如果子元素符合,则也符合 285 | for (let i = 0; i < item.children.length; i++) { 286 | const { op } = item.children[i]; 287 | if (arr.includes(op)) { 288 | return true; 289 | } 290 | } 291 | return false; 292 | }; 293 | 294 | const isMatchKeyword = (item: FileTreeList, keyword: string) => { 295 | if (item.name.includes(keyword)) { 296 | return true; 297 | } 298 | // 如果子元素符合,则也符合 299 | for (let i = 0; i < item.children.length; i++) { 300 | if (isMatchKeyword(item.children[i], keyword)) { 301 | return true; 302 | } 303 | } 304 | return false; 305 | }; 306 | 307 | const addToFileTreeView = ( 308 | onToggleExpand: (key: string) => void, 309 | layer: Layer, 310 | list: JSX.Element[], 311 | items: FileTreeList[], 312 | isLastList: boolean[], 313 | opt: FileTreeViewOption, 314 | ) => { 315 | if (!items) { 316 | return 0; 317 | } 318 | const max = items.length; 319 | let count = 0; 320 | const isExpandAll = () => { 321 | if (opt.expandAll || opt.keyword) { 322 | return true; 323 | } 324 | return false; 325 | }; 326 | 327 | const shouldExpand = (key: string) => { 328 | if (isExpandAll()) { 329 | return true; 330 | } 331 | if (opt.expandItems?.includes(key)) { 332 | return true; 333 | } 334 | return false; 335 | }; 336 | items.forEach((item, index) => { 337 | // 如果限制了大小 338 | if (opt.sizeLimit && item.size < opt.sizeLimit) { 339 | return; 340 | } 341 | // 如果仅展示更新、删除选项 342 | if (opt.onlyModifiedRemoved && !isModifiedRemoved(item)) { 343 | return; 344 | } 345 | // 如果指定关键字筛选 346 | if (opt.keyword && !isMatchKeyword(item, opt.keyword)) { 347 | return; 348 | } 349 | const id = `${item.uid}:${item.gid}`; 350 | const isLast = index === max - 1; 351 | let name = item.name; 352 | if (item.link) { 353 | name = `${name} → ${item.link}`; 354 | } 355 | const padding = isLastList.length * 30; 356 | 357 | let className = ""; 358 | if (item.op === opRemoved) { 359 | className = "removed"; 360 | } else if (item.op === opModified) { 361 | className = "modified"; 362 | } 363 | let icon: JSX.Element = <>; 364 | if (item.children.length) { 365 | const { key } = item; 366 | if (isExpandAll() || opt.expandItems?.includes(key)) { 367 | icon = minusOutlined; 368 | } else { 369 | icon = plusOutlined; 370 | } 371 | icon = ( 372 | { 376 | e.preventDefault(); 377 | onToggleExpand(key); 378 | }} 379 | > 380 | {icon} 381 | 382 | ); 383 | } 384 | let downloadIcon: JSX.Element = <>; 385 | if (item.children.length === 0 && item.size > 0) { 386 | downloadIcon = ( 387 | 391 | {getDownloadIcon()} 392 | 393 | ); 394 | } 395 | list.push( 396 |
  • 397 | {item.mode} 398 | {id} 399 | {prettyBytes(item.size)} 400 | 406 | {icon} 407 | {name} 408 | {downloadIcon} 409 | 410 |
  • , 411 | ); 412 | count++; 413 | if (item.children.length && shouldExpand(item.key)) { 414 | const tmp = isLastList.slice(0); 415 | tmp.push(isLast); 416 | const childAppendCount = addToFileTreeView( 417 | onToggleExpand, 418 | layer, 419 | list, 420 | item.children, 421 | tmp, 422 | opt, 423 | ); 424 | // 如果子文件一个都没有插入 425 | // 也未指定keyword 426 | // 则将当前目录也删除 427 | if (childAppendCount === 0 && opt.keyword === "") { 428 | list.pop(); 429 | count -= 1; 430 | } 431 | } 432 | }); 433 | return count; 434 | }; 435 | 436 | interface ImageDescriptions { 437 | score: string; 438 | size: string; 439 | otherSize: string; 440 | wastedSize: string; 441 | osArch: string; 442 | created: string; 443 | } 444 | interface AppState { 445 | version: string; 446 | gotResult: boolean; 447 | loading: boolean; 448 | imageDescriptions: ImageDescriptions; 449 | layers: Layer[]; 450 | currentLayer: number; 451 | fileTreeList: FileTreeList[][]; 452 | fileTreeViewOption: FileTreeViewOption; 453 | wastedList: FileWastedSummary[]; 454 | imageName: string; 455 | arch: string; 456 | latestAnalyzeImages: string[]; 457 | bigModifiedFileList: ModifiedFile[]; 458 | } 459 | 460 | interface LatestImages { 461 | images: string[]; 462 | version: string; 463 | } 464 | 465 | interface App { 466 | state: AppState; 467 | } 468 | const amd64Arch = "amd64"; 469 | const arm64Arch = "arm64"; 470 | const request = axios.create({ 471 | timeout: 600 * 1000, 472 | baseURL: "./api", 473 | }); 474 | 475 | class App extends Component { 476 | constructor(props: any) { 477 | super(props); 478 | const urlInfo = new URL(window.location.href); 479 | const image = urlInfo.searchParams.get("image") || ""; 480 | let arch = urlInfo.searchParams.get("arch") || amd64Arch; 481 | if ([amd64Arch, arm64Arch].indexOf(arch) === -1) { 482 | arch = amd64Arch; 483 | } 484 | this.state = { 485 | gotResult: false, 486 | loading: false, 487 | imageDescriptions: {} as ImageDescriptions, 488 | layers: [], 489 | currentLayer: 0, 490 | fileTreeList: [], 491 | fileTreeViewOption: {} as FileTreeViewOption, 492 | wastedList: [], 493 | imageName: image, 494 | arch, 495 | latestAnalyzeImages: [], 496 | bigModifiedFileList: [], 497 | version: "", 498 | }; 499 | } 500 | async componentDidMount() { 501 | if (this.state.imageName) { 502 | this.onSearch(this.state.imageName); 503 | } 504 | const { data } = await request.get("/latest-images", { 505 | timeout: 5 * 1000, 506 | }); 507 | this.setState({ 508 | latestAnalyzeImages: data.images, 509 | version: data.version, 510 | }); 511 | } 512 | async onSearch(value: String) { 513 | const image = value.trim(); 514 | if (!image) { 515 | return; 516 | } 517 | const { arch } = this.state; 518 | const url = `./?image=${image}&arch=${arch}`; 519 | if (window.location.href !== url) { 520 | window.history.pushState(null, "", url); 521 | } 522 | 523 | this.setState({ 524 | imageName: image, 525 | loading: true, 526 | }); 527 | try { 528 | let url = `/analyze?image=${image}`; 529 | if (!/^(file|docker):\/\//.test(image) && arch) { 530 | url += `?arch=${arch}`; 531 | } 532 | const { data } = await request.get(url, { 533 | timeout: 10 * 60 * 1000, 534 | }); 535 | // 为每个file tree item增加key 536 | data.fileTreeList.forEach((fileTree) => { 537 | addKeyToFileTreeItem(fileTree, ""); 538 | }); 539 | 540 | const result = getImageSummary(data); 541 | this.setState({ 542 | imageDescriptions: result.imageDescriptions, 543 | wastedList: result.wastedList, 544 | fileTreeList: data.fileTreeList, 545 | layers: data.layers, 546 | currentLayer: 0, 547 | gotResult: true, 548 | bigModifiedFileList: data.bigModifiedFileList, 549 | }); 550 | } catch (err: any) { 551 | let msg = err?.message as string; 552 | let axiosErr = err as AxiosError; 553 | if (axiosErr?.response?.data) { 554 | let data = axiosErr.response.data as { 555 | message: string; 556 | }; 557 | msg = data.message || ""; 558 | } 559 | message.error(msg || "analyze image fail", 10); 560 | } finally { 561 | this.setState({ 562 | loading: false, 563 | }); 564 | } 565 | } 566 | render(): ReactNode { 567 | const { 568 | imageName, 569 | gotResult, 570 | loading, 571 | imageDescriptions, 572 | layers, 573 | currentLayer, 574 | fileTreeList, 575 | fileTreeViewOption, 576 | wastedList, 577 | arch, 578 | latestAnalyzeImages, 579 | bigModifiedFileList, 580 | version, 581 | } = this.state; 582 | const onToggleExpand = (key: string) => { 583 | const opt = Object.assign({}, this.state.fileTreeViewOption); 584 | const items = opt.expandItems || []; 585 | const index = items.indexOf(key); 586 | if (index === -1) { 587 | items.push(key); 588 | } else { 589 | items.splice(index, 1); 590 | } 591 | opt.expandItems = items; 592 | this.setState({ 593 | fileTreeViewOption: opt, 594 | }); 595 | }; 596 | 597 | const selectLayer = (index: number) => { 598 | this.setState({ 599 | currentLayer: index, 600 | }); 601 | }; 602 | 603 | const getImageSummaryView = () => { 604 | const imageSummary = ( 605 | 606 | 607 | {imageDescriptions["score"]} 608 | 609 | 610 | {imageDescriptions["size"]} 611 | 612 | 613 | {imageDescriptions["otherSize"]} 614 | 615 | 616 | {imageDescriptions["wastedSize"]} 617 | 618 | 619 | {imageDescriptions["osArch"]} 620 | 621 | 622 | {new Date(imageDescriptions["created"]).toLocaleString()} 623 | 624 | 625 | ); 626 | return
    {imageSummary}
    ; 627 | }; 628 | 629 | const layerOptions = layers.map((item, index) => { 630 | let { digest } = item; 631 | if (digest) { 632 | digest = digest.replace("sha256:", "").substring(0, 8); 633 | } 634 | if (!digest) { 635 | digest = "none"; 636 | } 637 | const size = item.size || 0; 638 | let sizeDesc = ""; 639 | if (size > 0) { 640 | sizeDesc = ` (${prettyBytes(size)})`; 641 | } 642 | 643 | let label = `${index + 1}: ${digest.toUpperCase()}${sizeDesc}`; 644 | return { 645 | value: index, 646 | label, 647 | }; 648 | }); 649 | 650 | const sizeOptions = [ 651 | 0, 652 | 10 * 1000, 653 | 30 * 1000, 654 | 100 * 1000, 655 | 500 * 1000, 656 | 1000 * 1000, 657 | 10 * 1000 * 1000, 658 | ].map((size) => { 659 | let label = `>= ${prettyBytes(size)}`; 660 | if (size === 0) { 661 | label = "No Limit"; 662 | } 663 | return { 664 | value: size, 665 | label, 666 | }; 667 | }); 668 | 669 | const fileTreeViewList = [] as JSX.Element[]; 670 | addToFileTreeView( 671 | onToggleExpand, 672 | layers[currentLayer], 673 | fileTreeViewList, 674 | fileTreeList[currentLayer], 675 | [], 676 | fileTreeViewOption, 677 | ); 678 | 679 | const layerFilter = ( 680 | 681 | 682 | 683 | { 699 | const opt = Object.assign({}, this.state.fileTreeViewOption); 700 | opt.sizeLimit = limit; 701 | this.setState({ 702 | fileTreeViewOption: opt, 703 | }); 704 | }} 705 | /> 706 | 707 | 708 | 709 | 710 | { 712 | const opt = Object.assign({}, fileTreeViewOption); 713 | opt.onlyModifiedRemoved = e.target.checked; 714 | this.setState({ 715 | fileTreeViewOption: opt, 716 | }); 717 | }} 718 | > 719 | {i18nGet("modificationLabel")} 720 | 721 | 722 | 723 | 724 | 725 | { 727 | const opt = Object.assign({}, fileTreeViewOption); 728 | opt.expandAll = e.target.checked; 729 | this.setState({ 730 | fileTreeViewOption: opt, 731 | }); 732 | }} 733 | > 734 | {i18nGet("expandLabel")} 735 | 736 | 737 | 738 | 739 | 740 | { 744 | const opt = Object.assign({}, fileTreeViewOption); 745 | opt.keyword = e.target.value.trim(); 746 | this.setState({ 747 | fileTreeViewOption: opt, 748 | }); 749 | }} 750 | /> 751 | 752 | 753 | 754 | ); 755 | 756 | const getLayerContentView = () => { 757 | let fileTreeListClassName = "fileTree"; 758 | if (isDarkMode()) { 759 | fileTreeListClassName += " dark"; 760 | } 761 | 762 | const layerInfo = layers[currentLayer]; 763 | 764 | const cmd = ( 765 | <> 766 | 767 | 768 | 769 | {i18nGet("createdLabel")}: 770 | {new Date(layerInfo.created).toLocaleString()} 771 | 772 | 773 | {i18nGet("commandLabel")}: 774 | {layerInfo.cmd} 775 | 776 | 777 | 778 | 779 | ); 780 | return ( 781 |
    782 | 783 | {layerFilter} 784 | {cmd} 785 |
      786 |
    • 787 | {i18nGet("permissionLabel")} 788 | UID:GID 789 | {i18nGet("sizeLabel")} 790 | {i18nGet("fileTreeLabel")} 791 |
    • 792 | {fileTreeViewList} 793 |
    794 |
    795 |
    796 | ); 797 | }; 798 | 799 | const getWastedSummaryView = () => { 800 | const arr = wastedList.filter((item) => item.totalSize > 0); 801 | if (arr.length === 0) { 802 | return <>; 803 | } 804 | const list = arr.map((item) => { 805 | return ( 806 |
  • 807 | {prettyBytes(item.totalSize)} 808 | {item.count} 809 | /{item.path} 810 |
  • 811 | ); 812 | }); 813 | let className = "wastedList"; 814 | if (isDarkMode()) { 815 | className += " dark"; 816 | } 817 | return ( 818 |
    819 | 820 |
      821 |
    • 822 | {i18nGet("totalSizeLabel")} 823 | {i18nGet("countLabel")} 824 | {i18nGet("pathLabel")} 825 |
    • 826 | {list} 827 |
    828 |
    829 |
    830 | ); 831 | }; 832 | 833 | const getBigModifiedFileView = () => { 834 | if (bigModifiedFileList.length === 0) { 835 | return <>; 836 | } 837 | const arr = bigModifiedFileList.slice(0); 838 | arr.sort((item1, item2) => { 839 | return item2.size - item1.size; 840 | }); 841 | const list = arr.map((item) => { 842 | let { digest } = item; 843 | if (digest) { 844 | digest = digest.replace("sha256:", "").substring(0, 8); 845 | } 846 | return ( 847 |
  • 848 | {digest.toUpperCase()} 849 | {prettyBytes(item.size)} 850 | /{item.path} 851 |
  • 852 | ); 853 | }); 854 | let className = "bigModifiedFileList"; 855 | if (isDarkMode()) { 856 | className += " dark"; 857 | } 858 | return ( 859 |
    860 | 861 |
      862 |
    • 863 | {i18nGet("layerLabel")} 864 | {i18nGet("totalSizeLabel")} 865 | {i18nGet("pathLabel")} 866 |
    • 867 | {list} 868 |
    869 |
    870 |
    871 | ); 872 | }; 873 | const getSearchView = () => { 874 | const size = "large"; 875 | const selectBefore = ( 876 | 891 | ); 892 | return ( 893 | 904 | ); 905 | }; 906 | let headerClass = "header"; 907 | if (isDarkMode()) { 908 | headerClass += " dark"; 909 | } 910 | 911 | const getLatestAnalyzeImagesView = () => { 912 | if (latestAnalyzeImages.length === 0) { 913 | return <>; 914 | } 915 | return ( 916 | {i18nGet("latestAnalyzeImagesTitle")}} 921 | dataSource={latestAnalyzeImages} 922 | renderItem={(item) => ( 923 | 924 | 925 | { 928 | const arr = item.split("?"); 929 | const image = arr[0]; 930 | let arch = amd64Arch; 931 | if (arr[1]) { 932 | const result = /arch=(\S+)/.exec(arr[1]); 933 | if (result && result.length === 2) { 934 | arch = result[1]; 935 | } 936 | } 937 | window.location.href = `/?image=${image}&arch=${arch}`; 938 | e.preventDefault(); 939 | }} 940 | > 941 | {item} 942 | 943 | {" "} 944 | 945 | )} 946 | /> 947 | ); 948 | }; 949 | 950 | return ( 951 | 956 | 957 | {getGithubIcon(isDarkMode())} 958 |
    959 |
    960 |
    { 963 | window.location.href = "/"; 964 | }} 965 | > 966 | 967 | {getLogoIcon(isDarkMode())} 968 | Diving {version} 969 | 970 |
    971 | {gotResult &&
    {getSearchView()}
    } 972 |
    973 |
    974 | {!gotResult && ( 975 |
    976 | {getSearchView()} 977 |
    978 | 979 | {i18nGet("imageAnalyzeDesc")} 980 |
    981 | redis:alpine, vicanso/diving 982 |
    983 | quay.io/prometheus/node-exporter 984 |
    985 | dragonwell-registry.cn-hangzhou.cr.aliyuncs.com/dragonwell/dragonwell 986 |
    987 | xxx.com/user/image:tag 988 |
    989 | {i18nGet("imageSlowDesc")} 990 |
    991 |
    992 |
    993 | )} 994 | {gotResult && ( 995 | 996 |
    997 | {getImageSummaryView()} 998 | {getLayerContentView()} 999 | {getWastedSummaryView()} 1000 | {getBigModifiedFileView()} 1001 |
    1002 |
    1003 | )} 1004 | {getLatestAnalyzeImagesView()} 1005 |
    1006 |
    1007 | ); 1008 | } 1009 | } 1010 | 1011 | export default App; 1012 | -------------------------------------------------------------------------------- /src/image/docker.rs: -------------------------------------------------------------------------------- 1 | use crate::config::must_load_config; 2 | use crate::{task_local::*, tl_info}; 3 | use chrono::{DateTime, Utc}; 4 | use http::StatusCode; 5 | use lru::LruCache; 6 | use once_cell::sync::OnceCell; 7 | use regex::Regex; 8 | use reqwest::Client; 9 | use serde::{de::DeserializeOwned, Deserialize, Serialize}; 10 | use serde_json::Value; 11 | use snafu::{ResultExt, Snafu}; 12 | use std::io::Write; 13 | use std::process::{Command, Stdio}; 14 | use std::{collections::HashMap, num::NonZeroUsize, str::FromStr, sync::Mutex, time::Duration}; 15 | use substring::Substring; 16 | 17 | use super::{get_file_content_from_tar, get_file_size_from_tar, get_files_from_layer}; 18 | use super::{ 19 | layer::ImageLayerInfo, 20 | oci_image::{ImageFileSummary, ImageManifestLayer}, 21 | FileTreeItem, ImageConfig, ImageIndex, ImageLayer, ImageManifest, ImageManifestConfig, Op, 22 | MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST, MEDIA_TYPE_IMAGE_INDEX, MEDIA_TYPE_MANIFEST_LIST, 23 | }; 24 | use crate::{ 25 | error::HTTPError, 26 | image::{convert_files_to_file_tree, find_file_tree_item, ImageFileInfo}, 27 | store::{get_blob_from_file, save_blob_to_file}, 28 | }; 29 | 30 | #[derive(Debug, Snafu)] 31 | pub enum Error { 32 | #[snafu(display("IO fail: {source}"))] 33 | IO { source: std::io::Error }, 34 | #[snafu(display("Build request {} fail: {}", url, source))] 35 | Build { source: reqwest::Error, url: String }, 36 | #[snafu(display("Request {} fail: {}", url, source))] 37 | Request { source: reqwest::Error, url: String }, 38 | #[snafu(display("Parse {} json fail: {}", url, source))] 39 | Json { source: reqwest::Error, url: String }, 40 | #[snafu(display("Serde json {category} fail: {source}"))] 41 | SerdeJson { 42 | source: serde_json::Error, 43 | category: String, 44 | }, 45 | #[snafu(display("Layer handle fail: {}", source))] 46 | Layer { source: super::layer::Error }, 47 | #[snafu(display("Request {} code: {} fail: {}", url, code, message))] 48 | Docker { 49 | message: String, 50 | code: String, 51 | url: String, 52 | }, 53 | #[snafu(display("{message}"))] 54 | Whatever { message: String }, 55 | } 56 | 57 | impl From for HTTPError { 58 | fn from(err: Error) -> Self { 59 | // 对于部分error单独转换 60 | HTTPError::new_with_category(&err.to_string(), "docker") 61 | } 62 | } 63 | 64 | pub type Result = std::result::Result; 65 | 66 | static REGISTRY: &str = "https://index.docker.io/v2"; 67 | 68 | static REGISTRY_LOCAL_FILE: &str = "local-file"; 69 | static REGISTRY_LOCAL_DOCKER: &str = "local-docker"; 70 | 71 | #[derive(Debug, Clone, Default)] 72 | pub struct ImageInfo { 73 | // 镜像对应的registry 74 | pub registry: String, 75 | // 镜像用户 76 | pub user: String, 77 | // 镜像名称 78 | pub name: String, 79 | // 镜像版本 80 | pub tag: String, 81 | // 镜像架构 82 | pub arch: String, 83 | } 84 | 85 | static FILE_PROTOCOL: &str = "file://"; 86 | static LOCAL_DOCKER_PROTOCOL: &str = "docker://"; 87 | 88 | pub fn parse_image_info(image: &str) -> ImageInfo { 89 | let mut value = image.to_string(); 90 | if value.starts_with(FILE_PROTOCOL) { 91 | return ImageInfo { 92 | registry: REGISTRY_LOCAL_FILE.to_string(), 93 | name: value.replace(FILE_PROTOCOL, ""), 94 | ..Default::default() 95 | }; 96 | } 97 | if value.starts_with(LOCAL_DOCKER_PROTOCOL) { 98 | return ImageInfo { 99 | registry: REGISTRY_LOCAL_DOCKER.to_string(), 100 | name: value.replace(LOCAL_DOCKER_PROTOCOL, ""), 101 | ..Default::default() 102 | }; 103 | } 104 | let mut arch = "".to_string(); 105 | if let Some(index) = value.find('?') { 106 | let query = value.substring(index + 1, value.len()); 107 | for item in query.split('&') { 108 | let arr: Vec<&str> = item.split('=').collect(); 109 | if arr.len() == 2 && arr[0] == "arch" { 110 | arch = arr[1].to_string(); 111 | } 112 | } 113 | value = value.substring(0, index).to_string(); 114 | } 115 | if !value.contains(':') { 116 | value += ":latest"; 117 | } 118 | 119 | let mut values: Vec<&str> = value.split(&['/', ':']).collect(); 120 | let tag = values.pop().unwrap_or_default().to_string(); 121 | let mut registry = REGISTRY.to_string(); 122 | let mut user = "library".to_string(); 123 | let mut name = "".to_string(); 124 | match values.len() { 125 | 1 => { 126 | name = values[0].to_string(); 127 | } 128 | 2 => { 129 | user = values[0].to_string(); 130 | name = values[1].to_string(); 131 | } 132 | 3 => { 133 | // 默认仅支持https v2 134 | registry = format!("https://{}/v2", values[0]); 135 | user = values[1].to_string(); 136 | name = values[2].to_string(); 137 | } 138 | _ => {} 139 | } 140 | 141 | ImageInfo { 142 | registry, 143 | user, 144 | name, 145 | tag, 146 | arch, 147 | } 148 | } 149 | 150 | #[derive(Debug, Clone, Default)] 151 | pub struct AuthInfo { 152 | pub auth: String, 153 | pub service: String, 154 | pub scope: String, 155 | } 156 | 157 | fn parse_auth_info(auth: &str) -> Result { 158 | let re = 159 | Regex::new("(?P\\S+?)=\"(?P\\S+?)\",?").map_err(|err| Error::Whatever { 160 | message: err.to_string(), 161 | })?; 162 | let mut auth_info = AuthInfo { 163 | ..Default::default() 164 | }; 165 | for caps in re.captures_iter(auth) { 166 | let value = caps["value"].to_string(); 167 | match &caps["key"] { 168 | "realm" => auth_info.auth = value, 169 | "service" => auth_info.service = value, 170 | "scope" => auth_info.scope = value, 171 | _ => {} 172 | } 173 | } 174 | 175 | Ok(auth_info) 176 | } 177 | 178 | #[derive(Debug, Clone, Default)] 179 | pub struct DockerClient { 180 | registry: String, 181 | } 182 | 183 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 184 | pub struct DockerTokenInfo { 185 | token: String, 186 | expires_in: Option, 187 | issued_at: Option, 188 | } 189 | 190 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 191 | pub struct ImageManifestCacheInfo { 192 | expired_at: i64, 193 | manifest: ImageManifest, 194 | } 195 | 196 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 197 | #[serde(rename_all = "camelCase")] 198 | pub struct BigModifiedFileInfo { 199 | pub path: String, 200 | pub size: u64, 201 | pub digest: String, 202 | } 203 | 204 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 205 | #[serde(rename_all = "camelCase")] 206 | pub struct DockerAnalyzeResult { 207 | // 镜像名称 208 | pub name: String, 209 | // 架构 210 | pub arch: String, 211 | // 系统 212 | pub os: String, 213 | // 运行用户 214 | pub user: String, 215 | // 环境变量 216 | pub envs: Vec, 217 | // 镜像label 218 | pub labels: Vec, 219 | // 镜像分层数据 220 | pub layers: Vec, 221 | // 镜像大小 222 | pub size: u64, 223 | // 镜像分层解压大小 224 | pub total_size: u64, 225 | // 镜像分层对应的文件树 226 | pub file_tree_list: Vec>, 227 | // 镜像删除与更新文件汇总 228 | pub file_summary_list: Vec, 229 | // 本次镜像变化的大文件 230 | pub big_modified_file_list: Vec, 231 | } 232 | 233 | #[derive(Default, Debug, Clone, PartialEq, Serialize)] 234 | pub struct ImageFileWastedSummary { 235 | pub path: String, 236 | pub total_size: u64, 237 | pub count: u32, 238 | } 239 | 240 | #[derive(Default, Debug, Clone, Serialize)] 241 | pub struct DockerAnalyzeSummary { 242 | pub wasted_list: Vec, 243 | pub wasted_size: u64, 244 | pub wasted_percent: f64, 245 | pub score: u64, 246 | } 247 | 248 | impl DockerAnalyzeResult { 249 | pub fn summary(&self) -> DockerAnalyzeSummary { 250 | let mut wasted_list: Vec = vec![]; 251 | let mut wasted_size = 0; 252 | for file in self.file_summary_list.iter() { 253 | let mut found = false; 254 | let info = &file.info; 255 | wasted_size += info.size; 256 | for wasted in wasted_list.iter_mut() { 257 | if wasted.path == info.path { 258 | found = true; 259 | wasted.count += 1; 260 | wasted.total_size += info.size; 261 | } 262 | } 263 | if !found { 264 | wasted_list.push(ImageFileWastedSummary { 265 | path: info.path.clone(), 266 | count: 1, 267 | total_size: info.size, 268 | }); 269 | } 270 | } 271 | wasted_list.sort_by(|a, b| b.total_size.cmp(&a.total_size)); 272 | 273 | let mut score = 100 - wasted_size * 100 / self.total_size; 274 | // 有浪费空间,则分数-1 275 | if wasted_size != 0 { 276 | score -= 1; 277 | } 278 | DockerAnalyzeSummary { 279 | wasted_list, 280 | wasted_size, 281 | wasted_percent: (wasted_size as f64) / (self.total_size as f64), 282 | score, 283 | } 284 | } 285 | } 286 | 287 | impl DockerTokenInfo { 288 | // 判断docker token是否已过期 289 | fn expired(&self) -> bool { 290 | let issued_at = self.issued_at.clone().unwrap_or_default(); 291 | if let Ok(value) = DateTime::::from_str(&issued_at) { 292 | // 因为后续需要使用token获取数据 293 | // 因此提交10秒认为过期,避免请求时失效 294 | let offset = (self.expires_in.unwrap_or(600) - 10) as i64; 295 | let now = Utc::now().timestamp(); 296 | return value.timestamp() + offset <= now; 297 | } 298 | false 299 | } 300 | } 301 | 302 | // 获取docker token的缓存实例 303 | fn get_docker_token_cache() -> &'static Mutex> { 304 | static DOCKER_TOKEN_CACHE: OnceCell>> = OnceCell::new(); 305 | DOCKER_TOKEN_CACHE.get_or_init(|| { 306 | let c = LruCache::new(NonZeroUsize::new(100).unwrap()); 307 | Mutex::new(c) 308 | }) 309 | } 310 | 311 | // 从缓存中获取docker token 312 | fn get_docker_token_from_cache(key: &String) -> Option { 313 | if let Ok(mut cache) = get_docker_token_cache().lock() { 314 | if let Some(info) = cache.get(key) { 315 | return Some(info.clone()); 316 | } 317 | } 318 | Option::None 319 | } 320 | 321 | // 将docker token写入缓存 322 | fn set_docker_token_to_cache(key: &String, info: DockerTokenInfo) { 323 | // 失败忽略 324 | if let Ok(mut cache) = get_docker_token_cache().lock() { 325 | cache.put(key.to_string(), info); 326 | } 327 | } 328 | 329 | // 获取manifest缓存实例 330 | fn get_manifest_cache() -> &'static Mutex> { 331 | static MANIFEST_CACHE: OnceCell>> = 332 | OnceCell::new(); 333 | MANIFEST_CACHE.get_or_init(|| { 334 | let c = LruCache::new(NonZeroUsize::new(100).unwrap()); 335 | Mutex::new(c) 336 | }) 337 | } 338 | 339 | fn get_manifest_from_cache(key: &String) -> Option { 340 | if let Ok(mut cache) = get_manifest_cache().lock() { 341 | if let Some(info) = cache.get(key) { 342 | // 数据未过期 343 | if info.expired_at > Utc::now().timestamp() { 344 | return Some(info.manifest.clone()); 345 | } 346 | } 347 | } 348 | Option::None 349 | } 350 | 351 | fn set_manifest_to_cache(key: &String, manifest: ImageManifest, ttl_seconds: i64) { 352 | // 失败忽略 353 | if let Ok(mut cache) = get_manifest_cache().lock() { 354 | // 设置5分钟有效 355 | cache.push( 356 | key.to_string(), 357 | ImageManifestCacheInfo { 358 | expired_at: Utc::now().timestamp() + ttl_seconds, 359 | manifest, 360 | }, 361 | ); 362 | } 363 | } 364 | 365 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 366 | #[serde(rename_all = "camelCase")] 367 | struct DockerRequestErrorResp { 368 | pub errors: Vec, 369 | } 370 | 371 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 372 | #[serde(rename_all = "camelCase")] 373 | struct DockerRequestError { 374 | pub code: String, 375 | pub message: String, 376 | } 377 | 378 | fn get_value_from_json(v: &[u8], key: &str) -> Result { 379 | let mut root: Value = serde_json::from_slice(v).context(SerdeJsonSnafu { 380 | category: "get_from_json", 381 | })?; 382 | for k in key.split('.') { 383 | let value = root.get(k); 384 | if value.is_none() { 385 | return Ok("".to_string()); 386 | } 387 | root = value.unwrap().to_owned(); 388 | } 389 | Ok(root.as_str().unwrap_or("").to_string()) 390 | } 391 | 392 | fn add_to_file_summary( 393 | file_summary_list: &mut Vec, 394 | layer_index: usize, 395 | files: &[ImageFileInfo], 396 | file_tree_list: &[Vec], 397 | ) { 398 | for file in files.iter() { 399 | for items in file_tree_list.iter() { 400 | let arr: Vec<&str> = file.path.split('/').collect(); 401 | if let Some(found) = find_file_tree_item(items, arr) { 402 | // 以前已存在,因此为修改或删除 403 | // 文件删除 404 | let mut op = Op::Modified; 405 | let mut info = file.clone(); 406 | if file.is_whiteout.is_some() { 407 | op = Op::Removed; 408 | info.size = found.size; 409 | } 410 | file_summary_list.push(ImageFileSummary { 411 | layer_index, 412 | op, 413 | info, 414 | }); 415 | } 416 | } 417 | } 418 | } 419 | 420 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 421 | #[serde(rename_all = "camelCase")] 422 | pub struct LocalManifest { 423 | #[serde(rename = "Config")] 424 | pub config: String, 425 | #[serde(rename = "RepoTags")] 426 | pub repo_tags: Vec, 427 | #[serde(rename = "Layers")] 428 | pub layers: Vec, 429 | } 430 | 431 | impl From for ImageManifest { 432 | fn from(value: LocalManifest) -> Self { 433 | let layers = value 434 | .layers 435 | .iter() 436 | .map(|layer| ImageManifestLayer { 437 | media_type: "application/vnd.docker.image.rootfs.diff.tar".to_string(), 438 | digest: layer.to_string(), 439 | ..Default::default() 440 | }) 441 | .collect(); 442 | ImageManifest { 443 | media_type: MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST.to_string(), 444 | schema_version: 2, 445 | config: ImageManifestConfig { 446 | digest: value.config, 447 | ..Default::default() 448 | }, 449 | layers, 450 | } 451 | } 452 | } 453 | 454 | #[derive(Default, Debug, Clone, PartialEq, Serialize, Deserialize)] 455 | pub struct DockerImageParams { 456 | // 用户 457 | pub user: String, 458 | // 镜像 459 | pub img: String, 460 | // 镜像tag 461 | pub tag: String, 462 | // docker token 463 | pub token: String, 464 | // 镜像架构 465 | pub arch: String, 466 | } 467 | 468 | fn get_buf_from_local_docker(image: &str) -> Result> { 469 | tl_info!(image = image, "saving image"); 470 | let docker_save = Command::new("docker") 471 | .arg("save") 472 | .arg(image) 473 | .stdout(Stdio::piped()) 474 | .spawn() 475 | .map_err(|err| Error::IO { source: err })?; 476 | let output = docker_save 477 | .wait_with_output() 478 | .map_err(|err| Error::IO { source: err })?; 479 | if !output.status.success() { 480 | return Err(Error::Whatever { 481 | message: "docker save fail".to_string(), 482 | }); 483 | } 484 | tl_info!(image = image, "save image done"); 485 | Ok(output.stdout) 486 | } 487 | 488 | impl DockerClient { 489 | pub fn new(register: &str) -> Self { 490 | DockerClient { 491 | registry: register.to_string(), 492 | } 493 | } 494 | fn is_local(&self) -> bool { 495 | self.registry == REGISTRY_LOCAL_FILE 496 | } 497 | async fn get_local_manifest(&self, image: &str) -> Result { 498 | let data = get_file_content_from_tar(image, "manifest.json") 499 | .await 500 | .context(LayerSnafu {})?; 501 | 502 | let manifest_list = 503 | serde_json::from_slice::>(&data).context(SerdeJsonSnafu { 504 | category: "get_local_manifest", 505 | })?; 506 | if manifest_list.is_empty() { 507 | return Err(Error::Whatever { 508 | message: "Local Manifest Not Found".to_string(), 509 | }); 510 | } 511 | Ok(manifest_list[0].clone()) 512 | } 513 | async fn get_bytes( 514 | &self, 515 | url: String, 516 | headers: HashMap, 517 | ) -> Result { 518 | let mut builder = Client::builder() 519 | .build() 520 | .context(BuildSnafu { url: url.clone() })? 521 | .get(url.clone()); 522 | builder = builder.timeout(Duration::from_secs(30 * 60)); 523 | for (key, value) in headers { 524 | builder = builder.header(key, value); 525 | } 526 | let resp = builder 527 | .send() 528 | .await 529 | .context(RequestSnafu { url: url.clone() })?; 530 | if resp.status().as_u16() >= StatusCode::UNAUTHORIZED.as_u16() { 531 | let err = resp 532 | .json::() 533 | .await 534 | .context(JsonSnafu { url: url.clone() })?; 535 | return Err(Error::Docker { 536 | message: err.errors[0].message.clone(), 537 | code: err.errors[0].code.clone(), 538 | url: url.clone(), 539 | }); 540 | } 541 | 542 | let result = resp.bytes().await.context(JsonSnafu { url: url.clone() })?; 543 | Ok(result) 544 | } 545 | async fn get( 546 | &self, 547 | url: String, 548 | headers: HashMap, 549 | ) -> Result { 550 | let data = self.get_bytes(url.clone(), headers).await?; 551 | let result = serde_json::from_slice(&data).context(SerdeJsonSnafu { 552 | category: "request", 553 | })?; 554 | Ok(result) 555 | } 556 | // 获取manifest 557 | pub async fn get_manifest(&self, params: &DockerImageParams) -> Result { 558 | let img = ¶ms.img; 559 | let user = ¶ms.user; 560 | let tag = ¶ms.tag; 561 | let token = ¶ms.token; 562 | if self.is_local() { 563 | let local_manifest = self.get_local_manifest(img).await?; 564 | let mut image_manifest: ImageManifest = local_manifest.into(); 565 | for layer in image_manifest.layers.iter_mut() { 566 | let size = get_file_size_from_tar(img, &layer.digest) 567 | .await 568 | .context(LayerSnafu {})?; 569 | layer.size = size; 570 | } 571 | return Ok(image_manifest); 572 | } 573 | // TODO 如果tag非latest,是否可以缓存 574 | // 需要注意以命令行或以web server执行的程序生命周期的差别 575 | 576 | // 根据tag获取manifest 577 | let url = format!("{}/{user}/{img}/manifests/{tag}", self.registry); 578 | // 如果缓存中有,直接读取缓存 579 | let key = format!("{url}:{}", params.arch); 580 | if let Some(manifest) = get_manifest_from_cache(&key) { 581 | return Ok(manifest); 582 | } 583 | tl_info!(url = url, "getting manifest"); 584 | let mut headers = HashMap::new(); 585 | if !token.is_empty() { 586 | headers.insert("Authorization".to_string(), format!("Bearer {token}")); 587 | } 588 | // 支持的类型 589 | let accepts = [ 590 | MEDIA_TYPE_IMAGE_INDEX, 591 | MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST, 592 | MEDIA_TYPE_MANIFEST_LIST, 593 | ]; 594 | 595 | headers.insert("Accept".to_string(), accepts.join(", ")); 596 | let data = self.get_bytes(url.clone(), headers).await?; 597 | let media_type = get_value_from_json(&data, "mediaType")?; 598 | let resp: ImageManifest = if media_type == MEDIA_TYPE_DOCKER_SCHEMA2_MANIFEST { 599 | // docker的版本则可直接返回 600 | serde_json::from_slice(&data).context(SerdeJsonSnafu { 601 | category: "get_manifest_schema2", 602 | })? 603 | } else { 604 | let manifest = serde_json::from_slice::(&data) 605 | .context(SerdeJsonSnafu { 606 | category: "guess_manifest", 607 | })? 608 | .guess_manifest(¶ms.arch); 609 | tl_info!(arch = manifest.platform.architecture, "guess manifest"); 610 | let mut headers = HashMap::new(); 611 | if !token.is_empty() { 612 | headers.insert("Authorization".to_string(), format!("Bearer {token}")); 613 | } 614 | headers.insert("Accept".to_string(), manifest.media_type); 615 | // 根据digest再次获取 616 | let url = format!( 617 | "{}/{user}/{img}/manifests/{}", 618 | self.registry, manifest.digest 619 | ); 620 | let data = self.get_bytes(url.clone(), headers).await?; 621 | serde_json::from_slice(&data).context(SerdeJsonSnafu { 622 | category: "get_manifest", 623 | })? 624 | }; 625 | // 暂时有效期全部设置为5分钟 626 | // 后续考虑是否根据tag使用不同有效期 627 | set_manifest_to_cache(&key, resp.clone(), 5 * 60); 628 | tl_info!(url = url, "got manifest"); 629 | Ok(resp) 630 | } 631 | // 获取镜像的信息 632 | pub async fn get_image_config(&self, params: &DockerImageParams) -> Result { 633 | let img = ¶ms.img; 634 | let data = if self.is_local() { 635 | let local_manifest = self.get_local_manifest(img).await?; 636 | get_file_content_from_tar(img, &local_manifest.config) 637 | .await 638 | .context(LayerSnafu {})? 639 | } else { 640 | // 暂时只获取amd64, linux 641 | let manifest = self.get_manifest(params).await?; 642 | self.get_blob(params, &manifest.config.digest).await? 643 | }; 644 | 645 | let result = serde_json::from_slice(&data.to_vec()).context(SerdeJsonSnafu { 646 | category: "get_image_config", 647 | })?; 648 | Ok(result) 649 | } 650 | // 获取镜像分层的blob 651 | pub async fn get_blob(&self, params: &DockerImageParams, digest: &str) -> Result> { 652 | // 是否需要加锁避免同时读写 653 | // 忽略出错,如果出错直接从网络加载 654 | if let Ok(data) = get_blob_from_file(digest).await { 655 | return Ok(data); 656 | } 657 | let user = ¶ms.user; 658 | let img = ¶ms.img; 659 | let token = ¶ms.token; 660 | let url = format!("{}/{user}/{img}/blobs/{digest}", self.registry); 661 | tl_info!(url = url, "getting blob"); 662 | let mut headers = HashMap::new(); 663 | if !token.is_empty() { 664 | headers.insert("Authorization".to_string(), format!("Bearer {token}")); 665 | } 666 | let resp = self.get_bytes(url.clone(), headers).await?; 667 | 668 | // 出错忽略 669 | // 写入数据失败不影响后续 670 | let _ = save_blob_to_file(digest, &resp).await; 671 | tl_info!(url = url, "got blob"); 672 | Ok(resp.to_vec()) 673 | } 674 | async fn get_layer_files( 675 | &self, 676 | params: &DockerImageParams, 677 | layer: ImageManifestLayer, 678 | ) -> Result { 679 | let img = ¶ms.img; 680 | let buf = if self.is_local() { 681 | get_file_content_from_tar(img, &layer.digest) 682 | .await 683 | .context(LayerSnafu {})? 684 | } else { 685 | self.get_blob(params, &layer.digest).await? 686 | }; 687 | 688 | let info = get_files_from_layer(&buf, &layer.media_type) 689 | .await 690 | .context(LayerSnafu {})?; 691 | Ok(info) 692 | } 693 | async fn get_all_layer_info( 694 | &self, 695 | params: DockerImageParams, 696 | layers: Vec, 697 | ) -> Result> { 698 | let s = self.clone(); 699 | let trace_id = TRACE_ID.with(clone_value_from_task_local); 700 | let result = std::thread::spawn(move || { 701 | let threads = must_load_config().threads.unwrap_or(layers.len()); 702 | // 新的thread需要重新设置trace id 703 | let runtime = tokio::runtime::Builder::new_multi_thread() 704 | .enable_all() 705 | .thread_name("getAllLayerInfo") 706 | .worker_threads(threads) 707 | .build() 708 | .expect("Creating tokio runtime"); 709 | runtime.block_on(async move { 710 | TRACE_ID 711 | .scope(trace_id, async { 712 | let mut handles = Vec::with_capacity(layers.len()); 713 | for layer in layers { 714 | handles.push(s.get_layer_files(¶ms, layer)); 715 | } 716 | 717 | let arr = futures::future::join_all(handles).await; 718 | let mut info_list = vec![]; 719 | for result in arr { 720 | let info = result?; 721 | info_list.push(info); 722 | } 723 | Ok::, Error>(info_list) 724 | }) 725 | .await 726 | }) 727 | }) 728 | .join() 729 | .map_err(|_| Error::Whatever { 730 | message: "thread join error".to_string(), 731 | })?; 732 | let infos = result?; 733 | Ok(infos) 734 | } 735 | async fn get_auth_token(&self, params: &DockerImageParams) -> Result { 736 | // 本地文件无需token 737 | if self.is_local() { 738 | return Ok("".to_string()); 739 | } 740 | let user = ¶ms.user; 741 | let img = ¶ms.img; 742 | let tag = ¶ms.tag; 743 | let url = format!("{}/{user}/{img}/manifests/{tag}", self.registry); 744 | let mut builder = Client::builder() 745 | .build() 746 | .context(BuildSnafu { url: url.clone() })? 747 | .head(url.clone()); 748 | builder = builder.timeout(Duration::from_secs(5 * 60)); 749 | let resp = builder 750 | .send() 751 | .await 752 | .context(RequestSnafu { url: url.clone() })?; 753 | if resp.status().as_u16() == StatusCode::UNAUTHORIZED.as_u16() { 754 | if let Some(value) = resp.headers().get("www-authenticate") { 755 | let auth_info = parse_auth_info(value.to_str().unwrap_or_default())?; 756 | let url = format!( 757 | "{}?service={}&scope={}", 758 | auth_info.auth, auth_info.service, auth_info.scope 759 | ); 760 | let key = &url.clone(); 761 | if let Some(info) = get_docker_token_from_cache(&url) { 762 | if !info.expired() { 763 | return Ok(info.token); 764 | } 765 | } 766 | tl_info!(url = url, "getting token"); 767 | let mut resp = self 768 | .get::(url.clone(), HashMap::new()) 769 | .await?; 770 | if resp.issued_at.is_none() { 771 | resp.issued_at = Some(Utc::now().to_rfc3339()); 772 | } 773 | // 将token缓存,方便后续使用 774 | set_docker_token_to_cache(key, resp.clone()); 775 | tl_info!(url = url, "got token"); 776 | return Ok(resp.token); 777 | } 778 | } 779 | Ok("".to_string()) 780 | } 781 | pub async fn analyze(&self, params: &mut DockerImageParams) -> Result { 782 | let token = self.get_auth_token(params).await?; 783 | params.token = token; 784 | let manifest = self.get_manifest(params).await?; 785 | let config = self.get_image_config(params).await?; 786 | let user = ¶ms.user; 787 | let img = ¶ms.img; 788 | let tag = ¶ms.tag; 789 | 790 | let mut layers = vec![]; 791 | // let mut layer_infos = vec![]; 792 | let mut file_tree_list: Vec> = vec![]; 793 | let mut index = 0; 794 | let mut file_summary_list = vec![]; 795 | tl_info!(user = user, img = img, tag = tag, "analyzing image",); 796 | 797 | let mut image_size = 0; 798 | let mut image_total_size = 0; 799 | let info_list = self 800 | .get_all_layer_info(params.clone(), manifest.layers.clone()) 801 | .await?; 802 | let mut image_created = 0; 803 | if let Some(value) = config.history.last() { 804 | if let Ok(value) = DateTime::parse_from_rfc3339(&value.created) { 805 | image_created = value.timestamp(); 806 | } 807 | } 808 | let mut big_modified_file_list = vec![]; 809 | for (layer_index, history) in config.history.iter().enumerate() { 810 | let is_new = if let Ok(value) = DateTime::parse_from_rfc3339(&history.created) { 811 | // 如果5分钟内 812 | image_created - value.timestamp() < 300 813 | } else { 814 | false 815 | }; 816 | let empty = history.empty_layer.unwrap_or_default(); 817 | let mut digest = "".to_string(); 818 | let mut info = &ImageLayerInfo { 819 | ..Default::default() 820 | }; 821 | let mut media_type = "".to_string(); 822 | let mut size = 0; 823 | let mut file_tree = vec![]; 824 | // 只有非空的layer需要获取files 825 | if !empty { 826 | // manifest中的layer只对应非空的操作 827 | if let Some(value) = manifest.layers.get(index) { 828 | info = info_list.get(index).unwrap(); 829 | size = value.size; 830 | digest = value.digest.clone(); 831 | media_type = value.media_type.clone(); 832 | if layer_index != 0 { 833 | add_to_file_summary( 834 | &mut file_summary_list, 835 | layer_index, 836 | &info.files, 837 | &file_tree_list, 838 | ); 839 | } 840 | image_size += info.size; 841 | image_total_size += info.unpack_size; 842 | if is_new { 843 | for file in info.files.iter() { 844 | if file.size < 1000 * 1000 || !file.link.is_empty() { 845 | continue; 846 | } 847 | big_modified_file_list.push(BigModifiedFileInfo { 848 | path: file.path.clone(), 849 | size: file.size, 850 | digest: digest.clone(), 851 | }); 852 | } 853 | } 854 | // TODO 根据file summary判断文件是否更新或删除 855 | file_tree = convert_files_to_file_tree(&info.files, &file_summary_list); 856 | } 857 | index += 1; 858 | } 859 | 860 | let created_by = if let Some(ref value) = history.created_by { 861 | value.clone() 862 | } else { 863 | "".to_string() 864 | }; 865 | 866 | layers.push(ImageLayer { 867 | created: history.created.clone(), 868 | cmd: created_by, 869 | empty, 870 | digest, 871 | media_type, 872 | unpack_size: info.unpack_size, 873 | size, 874 | }); 875 | file_tree_list.push(file_tree); 876 | } 877 | 878 | tl_info!(user = user, img = img, tag = tag, "analyze image done",); 879 | let mut user = "".to_string(); 880 | let mut envs = vec![]; 881 | let mut labels = vec![]; 882 | if let Some(ref extra_info) = config.config { 883 | if let Some(ref value) = extra_info.user { 884 | user = value.to_string(); 885 | } 886 | if let Some(ref value) = extra_info.env { 887 | envs = value.clone(); 888 | } 889 | if let Some(ref value) = extra_info.labels { 890 | for (k, v) in value.iter() { 891 | labels.push(format!("{k}={v}")); 892 | } 893 | } 894 | } 895 | 896 | Ok(DockerAnalyzeResult { 897 | name: format!("{user}/{img}:{tag}"), 898 | arch: config.architecture, 899 | os: config.os, 900 | user, 901 | envs, 902 | labels, 903 | layers, 904 | size: image_size, 905 | total_size: image_total_size, 906 | file_tree_list, 907 | file_summary_list, 908 | big_modified_file_list, 909 | }) 910 | } 911 | } 912 | 913 | pub async fn analyze_docker_image(image_info: ImageInfo) -> Result { 914 | if image_info.registry == REGISTRY_LOCAL_DOCKER { 915 | let buf = get_buf_from_local_docker(&image_info.name)?; 916 | let mut tmpfile = tempfile::Builder::new().tempfile().unwrap(); 917 | let filename = tmpfile.path().to_string_lossy().to_string(); 918 | tl_info!("saving tmp file"); 919 | tmpfile.write_all(&buf).context(IOSnafu {})?; 920 | tmpfile.flush().context(IOSnafu {})?; 921 | tl_info!("save tmp file done"); 922 | 923 | let c = DockerClient::new(REGISTRY_LOCAL_FILE); 924 | c.analyze(&mut DockerImageParams { 925 | img: filename, 926 | ..Default::default() 927 | }) 928 | .await 929 | } else { 930 | let c = DockerClient::new(&image_info.registry); 931 | c.analyze(&mut DockerImageParams { 932 | user: image_info.user, 933 | img: image_info.name, 934 | tag: image_info.tag, 935 | arch: image_info.arch, 936 | ..Default::default() 937 | }) 938 | .await 939 | } 940 | } 941 | --------------------------------------------------------------------------------