├── .github └── workflows │ └── docker-publish.yml ├── Dockerfile ├── README.md ├── d2c.py ├── requirements.txt └── run.sh /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker Multi-Platform Build 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | workflow_dispatch: 8 | 9 | # 添加权限配置 10 | permissions: 11 | contents: read 12 | packages: write 13 | 14 | env: 15 | IMAGE_NAME: d2c 16 | 17 | jobs: 18 | build: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | 24 | - name: Set up QEMU 25 | uses: docker/setup-qemu-action@v3 26 | 27 | - name: Set up Docker Buildx 28 | uses: docker/setup-buildx-action@v3 29 | with: 30 | buildkitd-flags: --debug 31 | 32 | - name: Login to GitHub Container Registry 33 | uses: docker/login-action@v3 34 | with: 35 | registry: ghcr.io 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | 39 | - name: Login to Ali Registry 40 | uses: docker/login-action@v3 41 | with: 42 | registry: ${{ secrets.ALI_REGISTRY }} 43 | username: ${{ secrets.ALI_USERNAME }} 44 | password: ${{ secrets.ALI_PASSWORD }} 45 | 46 | - name: Extract metadata 47 | id: meta 48 | uses: docker/metadata-action@v5 49 | with: 50 | images: | 51 | ghcr.io/${{ github.repository_owner }}/${{ env.IMAGE_NAME }} 52 | ${{ secrets.ALI_REGISTRY }}/cherry4nas/${{ env.IMAGE_NAME }} 53 | tags: | 54 | type=semver,pattern={{version}} 55 | type=raw,value=latest,enable={{is_default_branch}} 56 | labels: | 57 | maintainer="可爱的小cherry" 58 | 59 | - name: Build and push 60 | uses: docker/build-push-action@v5 61 | with: 62 | context: . 63 | platforms: linux/amd64,linux/arm64,linux/arm/v7 64 | push: true 65 | tags: ${{ steps.meta.outputs.tags }} 66 | labels: ${{ steps.meta.outputs.labels }} 67 | cache-from: type=gha 68 | cache-to: type=gha,mode=max -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # 使用多平台基础镜像 2 | FROM python:3.9-slim 3 | 4 | # 添加环境变量 5 | ENV NAS=debian 6 | ENV CRON="0 */12 * * *" 7 | ENV NETWORK=true 8 | ENV TZ=Asia/Shanghai 9 | 10 | WORKDIR /app 11 | 12 | # 设置构建参数 13 | ARG TARGETPLATFORM 14 | ARG BUILDPLATFORM 15 | 16 | # 根据目标平台安装对应架构的依赖 17 | RUN echo "Building for $TARGETPLATFORM on $BUILDPLATFORM" 18 | 19 | # 配置国内APT源 20 | RUN echo "deb http://mirrors.aliyun.com/debian/ bullseye main non-free contrib" > /etc/apt/sources.list && \ 21 | echo "deb http://mirrors.aliyun.com/debian-security bullseye-security main" >> /etc/apt/sources.list && \ 22 | echo "deb http://mirrors.aliyun.com/debian/ bullseye-updates main non-free contrib" >> /etc/apt/sources.list 23 | 24 | # 安装必要依赖 25 | RUN apt-get update && apt-get install -y --no-install-recommends \ 26 | curl \ 27 | ca-certificates \ 28 | gnupg \ 29 | cron \ 30 | && mkdir -p /etc/apt/keyrings \ 31 | && curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ 32 | && chmod a+r /etc/apt/keyrings/docker.gpg \ 33 | && echo \ 34 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.aliyun.com/docker-ce/linux/debian \ 35 | $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ 36 | tee /etc/apt/sources.list.d/docker.list > /dev/null \ 37 | && apt-get update \ 38 | && apt-get install -y --no-install-recommends \ 39 | docker-ce-cli \ 40 | docker-compose-plugin \ 41 | && apt-get clean \ 42 | && rm -rf /var/lib/apt/lists/* 43 | 44 | # 配置pip国内源并安装依赖 45 | RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple \ 46 | && pip install pyyaml && pip3 install croniter 47 | 48 | # 复制应用文件并设置权限 49 | COPY . /app/ 50 | RUN chmod +x /app/d2c.py /app/run.sh 51 | 52 | CMD ["/app/run.sh"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker Compose 生成器 2 | 3 | 本工具用于读取极空间私有云系统中的存量Docker容器信息,自动生成对应的docker-compose.yaml文件。 4 | 5 | 它会根据容器之间的网络关系(自定义网络或link连接)将相关容器分组,并为每组容器生成一个独立的docker-compose.yaml文件。 6 | 7 | 理论上所有NAS都可以用,但是有些特意删除的功能,比如命令、性能限制、endpiont等,由于极空间不支持,所以删除了。 8 | 9 | ------------------------------------- 10 | 11 | ## 功能特点 12 | 13 | - 读取系统中所有Docker容器信息 14 | - 分析容器之间的网络关系(自定义network和link连接) 15 | - 根据网络关系将相关容器分组 16 | - 为每组容器生成对应的docker-compose.yaml文件(根据首个容器名称) 17 | - 支持提取容器的各种配置,包括: 18 | - 容器名称 19 | - 镜像 20 | - 端口映射 21 | - 环境变量 22 | - 数据卷(volume/bind) 23 | - 网络(host/bridge/macvlan单独配置,其它网络根据名称在一起) 24 | - 重启策略 25 | - 特权模式 26 | - 硬件设备挂载 27 | - cap_add 能力 28 | - ~~性能限制~~(极空间暂不支持,暂时移除) 29 | - command和entrypoint(在ZOS系统中不生成) 30 | - 健康检测 31 | - 其他配置等等 32 | 33 | # 使用方法 34 | 35 | ## 1、通过compose部署(推荐) 36 | 37 | 启用前确保系统安装了docker 38 | 39 | **🔻docker cli** 40 | ``` 41 | docker run -itd --name d2c \ 42 | -v /var/run/docker.sock:/var/run/docker.sock:ro \ 43 | -v /{path}:/app/compose \ 44 | -e NAS=debian \ # 可选,默认debian,详见下文说明 45 | -e CRON="0 */12 * * *" \ # 可选,默认每天0点起,每天12小时执行一次,详见下文说明 46 | -e NETWORK=true \ # 可选,默认true,详见下文说明 47 | -e TZ=Asia/Shanghai \ # 可选,默认Asia/Shanghai 48 | # 阿里云镜像源,国内选择 49 | crpi-xg6dfmt5h2etc7hg.cn-hangzhou.personal.cr.aliyuncs.com/cherry4nas/d2c:latest 50 | # github镜像源 51 | # ghcr.io/coracoo/d2c:latest 52 | ``` 53 | 54 | **🔻docker-compose.yaml** 55 | ``` 56 | services: 57 | d2c: 58 | # 阿里云镜像源,国内选择 59 | image: crpi-xg6dfmt5h2etc7hg.cn-hangzhou.personal.cr.aliyuncs.com/cherry4nas/d2c:latest 60 | # github镜像源 61 | # image: ghcr.io/coracoo/d2c:latest 62 | container_name: d2c 63 | volumes: 64 | - /var/run/docker.sock:/var/run/docker.sock:ro 65 | - /{path}:/app/compose 66 | environment: 67 | - NAS=debian 68 | - CRON="0 */12 * * *" 69 | - NETWORK=true 70 | - TZ=Asia/Shanghai 71 | ``` 72 | ### 环境变量说明 73 | 74 | - `NAS`: 指定NAS系统类型 75 | - `debian`: 默认值,生成完整配置 76 | - `zos`: 极空间系统,不生成command和entrypoint配置 77 | 78 | - `CRON`: 定时执行配置,使用标准cron表达式,示例:`0 2 * * *`(每天凌晨2点执行) 79 | - 默认值:`0 */12 * * *`(每天0点起,每天12小时执行一次) 80 | - `once`: 执行一次后退出 81 | 82 | - `NETWORK`: 控制bridge网络配置的显示方式 83 | - `true`: 默认值,显式配置bridge网络模式,即新创建的compose还是在bridge网络下 84 | - `false`: 隐式配置bridge网络模式,即新创建的compose会遵循compose的逻辑,创建新的网络 85 | 86 | - `TZ`: 时区,用于定时执行 87 | - 默认值:`Asia/Shanghai` 88 | 89 | ### 输出目录说明 90 | - `/app/compose`: 脚本输出目录,默认值为`/app/compose` 91 | - `YYYY_MM_DD_HH_MM`: 脚本执行时间,格式为`YYYY_MM_DD_HH_MM`,例如`2023_05_04_15_00` 92 | 93 | ## 2、直接运行(需要Python环境) 94 | 95 | 如果您的系统已安装Python环境,也可以直接运行: 96 | 97 | 1. 确保系统中已安装Python 3和Docker 98 | 2. 确保脚本有执行权限 99 | 100 | ```bash 101 | chmod +x d2c.py 102 | ``` 103 | 104 | 3. 安装python所需的依赖包 105 | 106 | ```bash 107 | pip install -r requirements.txt 108 | ``` 109 | 110 | 4. 运行脚本 111 | 112 | ```bash 113 | ./run.sh 114 | ``` 115 | 116 | 5. 脚本会在当前目录下创建一个`compose`文件夹,并在其中生成docker-compose.yaml文件 117 | 118 | ## 输出说明 119 | 120 | - 对于单个独立的容器,生成的文件名格式为:`{容器名}.yaml` 121 | - 对于有网络关系的容器组,生成的文件名格式为:`{第一个容器名前缀}-group.yaml` 122 | - 所有生成的文件都会保存在`compose/时间戳`目录下 123 | 124 | ## 注意事项 125 | 126 | - 该工具需要Docker命令行权限才能正常工作 127 | - 生成的docker-compose.yaml文件可能需要手动调整以满足特定需求 128 | - 对于使用默认bridge网络但没有显式link的容器,它们可能会被分到不同的组中 129 | - 工具会将自定义网络标记为`external: true`,因为它假设这些网络已经存在 130 | - 通过Docker运行时,会将宿主机的Docker套接字挂载到容器中,以便获取容器信息 131 | - 工具支持定时执行,默认每12小时执行一次,可通过CRON环境变量自定义执行时间 132 | 133 | # 更新说明 134 | 135 | ## 2023-05-04(v1.0.3) 136 | 137 | 1. 添加了command、entrypoint的生成,若环境变量配置NAS配置为ZOS,则不生成 138 | 2. 添加了环境变量:NAS、CRON、TZ、NETWORK 139 | 3. 支持定时执行,支持标准CRON表达式;支持一次性任务执行(CRON=once) 140 | 4. 重新修改yaml文件生成路径,在`./compose/`路径下,按`YYYY-MM-DD-HH-MM`时间戳组织输出文件 141 | 5. 完善日志输出内容;完善README.md 142 | 6. 创建Github Action,自动构建并推送到github和阿里云 143 | 7. 适配 amd64/arm64/arm7 架构 -------------------------------------------------------------------------------- /d2c.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import subprocess 5 | import yaml 6 | import os 7 | import re 8 | from collections import defaultdict 9 | 10 | 11 | def run_command(command): 12 | """执行shell命令并返回输出 13 | 14 | 当在容器内运行时,确保命令能够访问宿主机的Docker守护进程 15 | 这需要容器启动时挂载了Docker socket (/var/run/docker.sock) 16 | """ 17 | # 检查是否在容器内运行 18 | in_container = os.path.exists('/.dockerenv') 19 | 20 | # 如果在容器内运行且命令是docker相关,确保使用宿主机的Docker socket 21 | if in_container and command.startswith('docker'): 22 | # 确保Docker socket已挂载 23 | if not os.path.exists('/var/run/docker.sock'): 24 | print("错误: 未找到Docker socket挂载。请确保容器启动时使用了 -v /var/run/docker.sock:/var/run/docker.sock") 25 | return None 26 | 27 | process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, text=True) 28 | stdout, stderr = process.communicate() 29 | if process.returncode != 0: 30 | print(f"执行命令出错: {command}") 31 | print(f"错误信息: {stderr}") 32 | return None 33 | return stdout 34 | 35 | 36 | def get_containers(): 37 | """获取所有运行中的容器信息""" 38 | cmd = "docker ps -a --format '{{.ID}}'" 39 | output = run_command(cmd) 40 | if not output: 41 | return [] 42 | 43 | container_ids = output.strip().split('\n') 44 | containers = [] 45 | 46 | for container_id in container_ids: 47 | cmd = f"docker inspect {container_id}" 48 | output = run_command(cmd) 49 | if output: 50 | container_info = json.loads(output) 51 | # 检查容器的网络配置 52 | container = container_info[0] 53 | 54 | # 如果容器已停止,尝试从容器标签中获取网络信息 55 | if not container['State']['Running']: 56 | if 'Labels' in container['Config']: 57 | network_labels = {k: v for k, v in container['Config']['Labels'].items() if 'network' in k.lower()} 58 | if network_labels: 59 | print(f"警告: 容器 {container['Name']} 已停止,但从标签中找到网络配置") 60 | else: 61 | print(f"警告: 容器 {container['Name']} 已停止,可能无法获取完整的网络配置") 62 | 63 | containers.append(container) 64 | 65 | return containers 66 | 67 | 68 | def get_networks(): 69 | """获取所有网络信息""" 70 | cmd = "docker network ls --format '{{.ID}}'" 71 | output = run_command(cmd) 72 | if not output: 73 | return {} 74 | 75 | network_ids = output.strip().split('\n') 76 | networks = {} 77 | 78 | for network_id in network_ids: 79 | cmd = f"docker network inspect {network_id}" 80 | output = run_command(cmd) 81 | if output: 82 | network_info = json.loads(output) 83 | network_name = network_info[0]['Name'] 84 | # 排除默认的bridge和host网络 85 | if network_name not in ['bridge', 'host', 'none']: 86 | networks[network_name] = network_info[0] 87 | 88 | return networks 89 | 90 | 91 | def group_containers_by_network(containers, networks): 92 | 93 | """根据网络关系对容器进行分组""" 94 | # 初始化网络分组 95 | network_groups = defaultdict(list) 96 | container_to_networks = defaultdict(list) 97 | container_links = defaultdict(list) 98 | special_network_containers = [] 99 | 100 | # 记录每个容器所属的网络 101 | for container in containers: 102 | container_id = container['Id'] 103 | container_name = container['Name'].lstrip('/') 104 | 105 | # 检查网络模式 106 | network_mode = container.get('HostConfig', {}).get('NetworkMode', '') 107 | 108 | # 检查是否使用特殊网络(bridge、host或macvlan) 109 | is_special_network = ( 110 | network_mode in ['bridge', 'host'] or 111 | any( 112 | networks.get(net_name, {}).get('Driver', '') == 'macvlan' 113 | for net_name in container.get('NetworkSettings', {}).get('Networks', {}) 114 | ) 115 | ) 116 | 117 | if is_special_network: 118 | special_network_containers.append(container_id) 119 | continue 120 | 121 | # 处理网络连接 122 | for network_name, network_config in container.get('NetworkSettings', {}).get('Networks', {}).items(): 123 | # 排除默认的bridge和host网络 124 | if network_name not in ['bridge', 'host', 'none']: 125 | container_to_networks[container_id].append(network_name) 126 | network_groups[network_name].append(container_id) 127 | 128 | # 处理容器链接 129 | for link in container.get('HostConfig', {}).get('Links', []) or []: 130 | linked_container = link.split(':')[0].lstrip('/') 131 | container_links[container_id].append(linked_container) 132 | 133 | # 合并有链接关系的容器组 134 | merged_groups = [] 135 | processed_networks = set() 136 | 137 | # 首先基于自定义网络分组 138 | for network_name, container_ids in network_groups.items(): 139 | if network_name in processed_networks: 140 | continue 141 | 142 | group = set(container_ids) 143 | processed_networks.add(network_name) 144 | 145 | # 查找与当前网络有重叠容器的其他网络 146 | for other_network, other_containers in network_groups.items(): 147 | if other_network != network_name and not other_network in processed_networks: 148 | if any(c in group for c in other_containers): 149 | group.update(other_containers) 150 | processed_networks.add(other_network) 151 | 152 | merged_groups.append(list(group)) 153 | 154 | # 处理通过links连接但没有共享自定义网络的容器 155 | for container_id, linked_containers in container_links.items(): 156 | if not any(container_id in group for group in merged_groups): 157 | # 查找所有链接的容器 158 | linked_group = {container_id} 159 | for linked in linked_containers: 160 | for c in containers: 161 | if c['Name'].lstrip('/') == linked: 162 | linked_group.add(c['Id']) 163 | 164 | # 检查是否可以合并到现有组 165 | merged = False 166 | for i, group in enumerate(merged_groups): 167 | if any(c in group for c in linked_group): 168 | merged_groups[i] = list(set(group).union(linked_group)) 169 | merged = True 170 | break 171 | 172 | if not merged: 173 | merged_groups.append(list(linked_group)) 174 | 175 | # 处理剩余的独立容器 176 | standalone_containers = [] 177 | for container in containers: 178 | container_id = container['Id'] 179 | if not any(container_id in group for group in merged_groups) and container_id not in special_network_containers: 180 | standalone_containers.append(container_id) 181 | 182 | if standalone_containers: 183 | merged_groups.append(standalone_containers) 184 | 185 | # 为每个bridge、host或macvlan网络的容器创建单独的组 186 | for container_id in special_network_containers: 187 | merged_groups.append([container_id]) 188 | 189 | return merged_groups 190 | 191 | 192 | def convert_container_to_service(container): 193 | """将容器配置转换为docker-compose服务配置""" 194 | service = {} 195 | 196 | # 获取NAS环境变量 197 | nas_env = os.getenv('NAS', 'debian').lower() 198 | network_env = os.getenv('NETWORK', 'true').lower() == 'true' 199 | print(f"列出NAS环境变量:1.系统版本是:{nas_env};2.网络环境变量是:{network_env};") 200 | 201 | # 输出容器信息 202 | # print(f"容器信息:{container}") 203 | 204 | # 获取容器名称 205 | container_name = container['Name'].lstrip('/') 206 | service['container_name'] = container_name 207 | 208 | # 获取容器镜像 209 | service['image'] = container['Config']['Image'] 210 | 211 | # 获取容器重启策略 212 | restart_policy = container['HostConfig'].get('RestartPolicy', {}) 213 | if restart_policy and restart_policy.get('Name'): 214 | if restart_policy['Name'] != 'no': 215 | service['restart'] = restart_policy['Name'] 216 | if restart_policy['Name'] == 'on-failure' and restart_policy.get('MaximumRetryCount'): 217 | service['restart'] = f"{restart_policy['Name']}:{restart_policy['MaximumRetryCount']}" 218 | 219 | # 获取容器端口映射 220 | port_mappings = {} 221 | for port in container['NetworkSettings'].get('Ports', {}) or {}: 222 | if container['NetworkSettings']['Ports'][port]: 223 | for binding in container['NetworkSettings']['Ports'][port]: 224 | # 提取端口信息 225 | host_ip = binding['HostIp'] 226 | host_port = int(binding['HostPort']) # 转换为整数 227 | container_port = port.split('/')[0] # 移除协议部分 228 | protocol = port.split('/')[1] if '/' in port else 'tcp' 229 | 230 | # 标准化IP地址 231 | if host_ip in ['0.0.0.0', '::', '']: 232 | key = f"{container_port}/{protocol}" 233 | else: 234 | key = f"{host_ip}:{container_port}/{protocol}" 235 | 236 | # 使用集合去重 237 | if key not in port_mappings: 238 | port_mappings[key] = set() 239 | port_mappings[key].add(host_port) 240 | 241 | # 处理端口映射,合并连续端口 242 | ports = [] 243 | for container_port, host_ports in port_mappings.items(): 244 | # 转换为列表并排序 245 | host_ports = sorted(list(host_ports)) 246 | 247 | # 查找连续的端口范围 248 | if len(host_ports) > 0: 249 | ranges = [] 250 | start = host_ports[0] 251 | prev = start 252 | 253 | for curr in host_ports[1:]: 254 | if curr != prev + 1: 255 | # 如果不连续,添加之前的范围 256 | if start == prev: 257 | ranges.append(str(start)) 258 | else: 259 | ranges.append(f"{start}-{prev}") 260 | start = curr 261 | prev = curr 262 | 263 | # 添加最后一个范围 264 | if start == prev: 265 | ranges.append(str(start)) 266 | else: 267 | ranges.append(f"{start}-{prev}") 268 | 269 | # 生成端口映射字符串 270 | if ':' in container_port: # 包含特定IP 271 | host_ip, port_proto = container_port.split(':', 1) 272 | for port_range in ranges: 273 | ports.append(f"{host_ip}:{port_range}:{port_proto}") 274 | else: 275 | for port_range in ranges: 276 | ports.append(f"{port_range}:{container_port}") 277 | 278 | if ports: 279 | service['ports'] = ports 280 | 281 | # 环境变量 (忽略PATH) 282 | if container['Config'].get('Env'): 283 | env = {} 284 | for env_var in container['Config']['Env']: 285 | if '=' in env_var: 286 | key, value = env_var.split('=', 1) 287 | if key != 'PATH': # 忽略PATH环境变量 288 | env[key] = value 289 | if env: 290 | service['environment'] = env 291 | 292 | # 获取容器数据卷,包含volume和bind类型 293 | volumes = [] 294 | for mount in container['Mounts']: 295 | if mount['Type'] == 'bind': 296 | mode = mount.get('RW', True) 297 | if mode: 298 | mode_suffix = 'rw' 299 | else: 300 | mode_suffix = 'ro' 301 | volumes.append(f"{mount['Source']}:{mount['Destination']}:{mode_suffix}") 302 | elif mount['Type'] == 'volume': 303 | mode = mount.get('RW', True) 304 | if mode : 305 | mode_suffix = 'rw' 306 | else: 307 | mode_suffix = 'ro' 308 | print('ro') 309 | volumes.append(f"{mount['Name']}:{mount['Destination']}:{mode_suffix}") 310 | if volumes: 311 | service['volumes'] = volumes 312 | 313 | # 统一网络配置处理 314 | network_mode = container['HostConfig'].get('NetworkMode', '') 315 | 316 | if network_mode == 'host': 317 | service['network_mode'] = 'host' 318 | elif network_mode == 'container': 319 | linked_container = network_mode.split(':')[1] 320 | service['network_mode'] = f"container:{linked_container}" 321 | elif network_mode == 'bridge': 322 | if network_env: 323 | service['network_mode'] = 'bridge' 324 | elif network_mode != 'default': 325 | service['networks'] = [network_mode] 326 | else: 327 | networks = [] 328 | for network_name in container['NetworkSettings'].get('Networks', {}): 329 | if network_name not in ['bridge', 'host', 'none']: 330 | networks.append(network_name) 331 | if networks: 332 | service['networks'] = networks 333 | elif network_env: 334 | service['network_mode'] = 'bridge' 335 | 336 | # 获取容器之间的link信息,如果有link指向,则组合到group中 337 | links = container['HostConfig'].get('Links', []) 338 | if links: 339 | service['links'] = [link.replace(':', ':') for link in links] 340 | 341 | # 获取特权模式 342 | if container['HostConfig'].get('Privileged'): 343 | service['privileged'] = container['HostConfig']['Privileged'] 344 | 345 | # 获取硬件设备挂载 346 | if container['HostConfig'].get('Devices'): 347 | devices = [] 348 | for device in container['HostConfig']['Devices']: 349 | devices.append(f"{device['PathOnHost']}:{device['PathInContainer']}:{device['CgroupPermissions']}") 350 | service['devices'] = devices 351 | 352 | # 获取watchtower.enable标签 353 | if container['Config'].get('Labels'): 354 | labels = {} 355 | for label_key, label_value in container['Config']['Labels'].items(): 356 | # 保留所有watchtower相关标签 357 | if 'watchtower' in label_key.lower(): 358 | labels[label_key] = label_value 359 | # 保留关于com/org/io开头的标签 360 | # elif label_key.startswith('com.') or label_key.startswith('org.') or label_key.startswith('io.'): 361 | # labels[label_key] = label_value 362 | if labels: 363 | service['labels'] = labels 364 | 365 | # 获取容器的cap_add权限 366 | if container['HostConfig'].get('CapAdd'): 367 | caps = [] 368 | if 'SYS_ADMIN' in container['HostConfig']['CapAdd']: 369 | service['security_opt'] = ['apparmor:unconfined'] 370 | caps.append('SYS_ADMIN') 371 | if 'NET_ADMIN' in container['HostConfig']['CapAdd']: 372 | service['security_opt'] = ['apparmor:unconfined'] 373 | caps.append('NET_ADMIN') 374 | if caps: 375 | service['cap_add'] = caps 376 | 377 | ''' 378 | # 获取容器性能限制配置 ,极空间compose暂不支持性能限制配置,其它NAS可以用0.3版本。 379 | host_config = container.get('HostConfig', {}) 380 | 381 | # CPU限制 382 | cpu_shares = host_config.get('CpuShares') 383 | cpu_period = host_config.get('CpuPeriod') 384 | cpu_quota = host_config.get('CpuQuota') 385 | cpuset_cpus = host_config.get('CpusetCpus') 386 | 387 | # 内存限制 388 | memory = host_config.get('Memory') 389 | memory_swap = host_config.get('MemorySwap') 390 | memory_reservation = host_config.get('MemoryReservation') 391 | 392 | # 如果设置了资源限制,添加到服务配置中 393 | if any([cpu_shares, cpu_period, cpu_quota, cpuset_cpus, memory, memory_swap, memory_reservation]): 394 | deploy = {} 395 | resources = {'limits': {}, 'reservations': {}} 396 | 397 | # CPU配置 398 | if cpu_quota and cpu_period: 399 | # 将CPU配额转换为cores数量 400 | cores = float(cpu_quota) / float(cpu_period) 401 | resources['limits']['cpus'] = str(cores) 402 | elif cpu_shares: 403 | # cpu_shares是相对权重,1024为默认值 404 | resources['limits']['cpus'] = str(float(cpu_shares) / 1024.0) 405 | 406 | if cpuset_cpus: 407 | resources['limits']['cpus'] = cpuset_cpus 408 | 409 | # 内存配置 410 | if memory and memory > 0: 411 | resources['limits']['memory'] = memory 412 | if memory_reservation and memory_reservation > 0: 413 | resources['reservations']['memory'] = memory_reservation 414 | 415 | # 只有当实际设置了资源限制时才添加配置 416 | if resources['limits'] or resources['reservations']: 417 | deploy['resources'] = resources 418 | service['deploy'] = deploy 419 | ''' 420 | 421 | # 获取容器的command和entrypoint配置,ZOS系统不执行 422 | if nas_env != 'zos': 423 | # 获取容器的command配置 424 | if container['Config'].get('Cmd'): 425 | service['command'] = container['Config']['Cmd'] 426 | 427 | # 获取容器的entrypoint配置 428 | if container['Config'].get('Entrypoint'): 429 | service['entrypoint'] = container['Config']['Entrypoint'] 430 | 431 | # 获取容器的健康检查配置 432 | if container['Config'].get('Healthcheck'): 433 | healthcheck = { 434 | 'test': container['Config']['Healthcheck'].get('Test', []), 435 | 'interval': container['Config']['Healthcheck'].get('Interval'), 436 | 'timeout': container['Config']['Healthcheck'].get('Timeout'), 437 | 'retries': container['Config']['Healthcheck'].get('Retries') 438 | } 439 | # 移除None值 440 | healthcheck = {k: v for k, v in healthcheck.items() if v is not None} 441 | if healthcheck: 442 | service['healthcheck'] = healthcheck 443 | 444 | return service 445 | 446 | 447 | def generate_compose_file(containers_group, all_containers, output_dir): 448 | """为一组容器生成docker-compose.yaml文件""" 449 | # 使用环境变量中的输出目录 450 | output_dir = os.getenv('OUTPUT_DIR', 'compose') 451 | 452 | # 确保输出目录存在 453 | os.makedirs(output_dir, exist_ok=True) 454 | 455 | compose = { 456 | 'version': '3', 457 | 'services': {}, 458 | } 459 | 460 | # 添加网络配置 461 | networks = set() 462 | for container_id in containers_group: 463 | for container in all_containers: 464 | if container['Id'] == container_id: 465 | for network_name in container['NetworkSettings'].get('Networks', {}): 466 | if network_name not in ['bridge', 'host', 'none']: 467 | networks.add(network_name) 468 | 469 | if networks: 470 | compose['networks'] = {network: {'external': True} for network in networks} 471 | 472 | # 添加服务配置 473 | for container_id in containers_group: 474 | for container in all_containers: 475 | if container['Id'] == container_id: 476 | container_name = container['Name'].lstrip('/') 477 | service_name = re.sub(r'[^a-zA-Z0-9_]', '_', container_name) 478 | compose['services'][service_name] = convert_container_to_service(container) 479 | 480 | # 生成文件名 481 | if len(containers_group) == 1: 482 | for container in all_containers: 483 | if container['Id'] == containers_group[0]: 484 | filename = f"{container['Name'].lstrip('/')}.yaml" 485 | break 486 | else: 487 | # 使用第一个容器的名称作为文件名前缀 488 | for container in all_containers: 489 | if container['Id'] == containers_group[0]: 490 | prefix = container['Name'].lstrip('/').split('_')[0] 491 | filename = f"{prefix}-group.yaml" 492 | break 493 | 494 | # 确保输出目录存在 495 | os.makedirs(output_dir, exist_ok=True) 496 | 497 | # 写入文件 498 | file_path = os.path.join(output_dir, filename) 499 | with open(file_path, 'w') as f: 500 | yaml.dump(compose, f, default_flow_style=False, sort_keys=False) 501 | 502 | print(f"已生成 {file_path}") 503 | return file_path 504 | 505 | 506 | def main(): 507 | print("开始读取Docker容器信息...") 508 | containers = get_containers() 509 | if not containers: 510 | print("未找到Docker容器") 511 | return 512 | 513 | print(f"找到 {len(containers)} 个Docker容器") 514 | 515 | print("读取网络信息...") 516 | networks = get_networks() 517 | print(f"找到 {len(networks)} 个自定义网络") 518 | 519 | print("根据网络关系对容器进行分组...") 520 | container_groups = group_containers_by_network(containers, networks) 521 | print(f"分组完成,共 {len(container_groups)} 个分组") 522 | 523 | # 创建输出目录 524 | output_dir = "compose" 525 | 526 | print("生成docker-compose文件...") 527 | generated_files = [] 528 | for i, group in enumerate(container_groups): 529 | print(f"处理第 {i+1} 组,包含 {len(group)} 个容器") 530 | file_path = generate_compose_file(group, containers, output_dir) 531 | generated_files.append(file_path) 532 | 533 | print("\n生成完成!生成的文件列表:") 534 | for file_path in generated_files: 535 | print(f"- {file_path}") 536 | 537 | 538 | if __name__ == "__main__": 539 | main() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml>=6.0.1 -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 获取CRON环境变量,如果未设置则使用默认值 4 | CRON=${CRON:-"0 */12 * * *"} 5 | echo "当前CRON设置为: ${CRON}" 6 | 7 | # 获取当前时间作为目录名(年月日时分) 8 | TIMESTAMP=$(date +"%Y_%m_%d_%H_%M") 9 | export OUTPUT_DIR="/app/compose/${TIMESTAMP}" 10 | 11 | # 创建输出目录 12 | mkdir -p "$OUTPUT_DIR" 13 | 14 | # 检查是否为一次性任务 15 | if [ "$CRON" = "once" ]; then 16 | echo "执行一次性任务..." 17 | python3 ./d2c.py 18 | echo "任务完成!请查看 ${OUTPUT_DIR} 目录下的生成文件。" 19 | exit 0 20 | fi 21 | 22 | # 验证CRON表达式格式 23 | if ! [[ "$CRON" =~ ^[0-9*/-]+" "[0-9*/-]+" "[0-9*/-]+" "[0-9*/-]+" "[0-9*/-]+$ ]]; then 24 | echo "错误:无效的CRON表达式格式: ${CRON}" 25 | echo "使用默认值:0 */12 * * *" 26 | CRON="0 */12 * * *" 27 | fi 28 | 29 | # 计算下次执行时间 30 | next_run=$(date -d "$(python3 -c " 31 | from datetime import datetime, timedelta 32 | from croniter import croniter 33 | base = datetime.now() 34 | cron = croniter('${CRON}', base) 35 | next_time = cron.get_next(datetime) 36 | print(next_time.strftime('%Y-%m-%d %H:%M:%S')) 37 | ")" '+%Y年%m月%d日 %H时%M分%S秒') 38 | 39 | # 创建cron任务 40 | echo "${CRON} /usr/local/bin/python3 /app/d2c.py >> /var/log/cron.log 2>&1" > /etc/cron.d/d2c-cron 41 | chmod 0644 /etc/cron.d/d2c-cron 42 | 43 | # 创建日志文件 44 | touch /var/log/cron.log 45 | 46 | # 启动cron服务 47 | cron 48 | 49 | # 首次运行生成器脚本 50 | python3 ./d2c.py 51 | 52 | echo "完成!请查看 compose 目录下的生成文件。下次执行时间: ${next_run} " 53 | 54 | # 持续输出日志,保持容器运行 55 | tail -f /var/log/cron.log --------------------------------------------------------------------------------