├── .github
└── workflows
│ ├── dockerimage.yml
│ └── issue.yml
├── .gitignore
├── LICENSE
├── README.md
├── _config.yml
├── docker
├── Dockerfile.all-py38-cpu
├── Dockerfile.all-py38-cu113
├── Dockerfile.chainer-py38-cpu
├── Dockerfile.chainer-py38-cu113
├── Dockerfile.darknet-cpu
├── Dockerfile.darknet-cu113
├── Dockerfile.keras-py38-cpu
├── Dockerfile.keras-py38-cu113
├── Dockerfile.mxnet-py38-cpu
├── Dockerfile.mxnet-py38-cu113
├── Dockerfile.paddle-py38-cpu
├── Dockerfile.paddle-py38-cu113
├── Dockerfile.pytorch-py38-cpu
├── Dockerfile.pytorch-py38-cu113
├── Dockerfile.tensorflow-py38-cpu
└── Dockerfile.tensorflow-py38-cu113
├── generator
├── __init__.py
├── core
│ ├── __init__.py
│ └── composer.py
├── generate.py
└── modules
│ ├── __init__.py
│ ├── __module__.py
│ ├── boost.py
│ ├── caffe.py
│ ├── chainer.py
│ ├── cntk.py
│ ├── darknet.py
│ ├── jupyter.py
│ ├── jupyterlab.py
│ ├── keras.py
│ ├── lasagne.py
│ ├── mxnet.py
│ ├── onnx.py
│ ├── opencv.py
│ ├── paddle.py
│ ├── python.py
│ ├── pytorch.py
│ ├── sonnet.py
│ ├── tensorflow.py
│ ├── theano.py
│ ├── tools.py
│ └── torch.py
└── scripts
├── README.md
├── build.sh
├── clean.sh
├── gen-docker.sh
├── make-ci.py
└── make-gen-docker.py
/.github/workflows/dockerimage.yml:
--------------------------------------------------------------------------------
1 | name: deepo CI
2 | on: [push]
3 | jobs:
4 |
5 | mxnet-py38-cu113_mxnet-py38_mxnet-cu113_mxnet:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - uses: actions/checkout@master
9 | - name: Free disk space
10 | run: |
11 | df -h
12 | sudo apt-get autoremove -y
13 | sudo apt-get clean
14 | sudo swapoff -a
15 | sudo rm -f /swapfile
16 | docker rmi $(docker image ls -aq)
17 | df -h
18 | - name: Build docker image
19 | run: docker build -t ${{secrets.DOCKER_REPO}}:mxnet-py38-cu113 -t ${{secrets.DOCKER_REPO}}:mxnet-py38 -t ${{secrets.DOCKER_REPO}}:mxnet-cu113 -t ${{secrets.DOCKER_REPO}}:mxnet -f docker/Dockerfile.mxnet-py38-cu113 .
20 | - name: Deploy docker image
21 | run: |
22 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
23 | docker push ${{secrets.DOCKER_REPO}}:mxnet-py38-cu113
24 | docker push ${{secrets.DOCKER_REPO}}:mxnet-py38
25 | docker push ${{secrets.DOCKER_REPO}}:mxnet-cu113
26 | docker push ${{secrets.DOCKER_REPO}}:mxnet
27 |
28 |
29 | darknet-cpu:
30 | runs-on: ubuntu-latest
31 | steps:
32 | - uses: actions/checkout@master
33 | - name: Free disk space
34 | run: |
35 | df -h
36 | sudo apt-get autoremove -y
37 | sudo apt-get clean
38 | sudo swapoff -a
39 | sudo rm -f /swapfile
40 | docker rmi $(docker image ls -aq)
41 | df -h
42 | - name: Build docker image
43 | run: docker build -t ${{secrets.DOCKER_REPO}}:darknet-cpu -f docker/Dockerfile.darknet-cpu .
44 | - name: Deploy docker image
45 | run: |
46 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
47 | docker push ${{secrets.DOCKER_REPO}}:darknet-cpu
48 |
49 |
50 | pytorch-py38-cpu_pytorch-cpu:
51 | runs-on: ubuntu-latest
52 | steps:
53 | - uses: actions/checkout@master
54 | - name: Free disk space
55 | run: |
56 | df -h
57 | sudo apt-get autoremove -y
58 | sudo apt-get clean
59 | sudo swapoff -a
60 | sudo rm -f /swapfile
61 | docker rmi $(docker image ls -aq)
62 | df -h
63 | - name: Build docker image
64 | run: docker build -t ${{secrets.DOCKER_REPO}}:pytorch-py38-cpu -t ${{secrets.DOCKER_REPO}}:pytorch-cpu -f docker/Dockerfile.pytorch-py38-cpu .
65 | - name: Deploy docker image
66 | run: |
67 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
68 | docker push ${{secrets.DOCKER_REPO}}:pytorch-py38-cpu
69 | docker push ${{secrets.DOCKER_REPO}}:pytorch-cpu
70 |
71 |
72 | paddle-py38-cpu_paddle-cpu:
73 | runs-on: ubuntu-latest
74 | steps:
75 | - uses: actions/checkout@master
76 | - name: Free disk space
77 | run: |
78 | df -h
79 | sudo apt-get autoremove -y
80 | sudo apt-get clean
81 | sudo swapoff -a
82 | sudo rm -f /swapfile
83 | docker rmi $(docker image ls -aq)
84 | df -h
85 | - name: Build docker image
86 | run: docker build -t ${{secrets.DOCKER_REPO}}:paddle-py38-cpu -t ${{secrets.DOCKER_REPO}}:paddle-cpu -f docker/Dockerfile.paddle-py38-cpu .
87 | - name: Deploy docker image
88 | run: |
89 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
90 | docker push ${{secrets.DOCKER_REPO}}:paddle-py38-cpu
91 | docker push ${{secrets.DOCKER_REPO}}:paddle-cpu
92 |
93 |
94 | paddle-py38-cu113_paddle-py38_paddle-cu113_paddle:
95 | runs-on: ubuntu-latest
96 | steps:
97 | - uses: actions/checkout@master
98 | - name: Free disk space
99 | run: |
100 | df -h
101 | sudo apt-get autoremove -y
102 | sudo apt-get clean
103 | sudo swapoff -a
104 | sudo rm -f /swapfile
105 | docker rmi $(docker image ls -aq)
106 | df -h
107 | - name: Build docker image
108 | run: docker build -t ${{secrets.DOCKER_REPO}}:paddle-py38-cu113 -t ${{secrets.DOCKER_REPO}}:paddle-py38 -t ${{secrets.DOCKER_REPO}}:paddle-cu113 -t ${{secrets.DOCKER_REPO}}:paddle -f docker/Dockerfile.paddle-py38-cu113 .
109 | - name: Deploy docker image
110 | run: |
111 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
112 | docker push ${{secrets.DOCKER_REPO}}:paddle-py38-cu113
113 | docker push ${{secrets.DOCKER_REPO}}:paddle-py38
114 | docker push ${{secrets.DOCKER_REPO}}:paddle-cu113
115 | docker push ${{secrets.DOCKER_REPO}}:paddle
116 |
117 |
118 | darknet-cu113_darknet:
119 | runs-on: ubuntu-latest
120 | steps:
121 | - uses: actions/checkout@master
122 | - name: Free disk space
123 | run: |
124 | df -h
125 | sudo apt-get autoremove -y
126 | sudo apt-get clean
127 | sudo swapoff -a
128 | sudo rm -f /swapfile
129 | docker rmi $(docker image ls -aq)
130 | df -h
131 | - name: Build docker image
132 | run: docker build -t ${{secrets.DOCKER_REPO}}:darknet-cu113 -t ${{secrets.DOCKER_REPO}}:darknet -f docker/Dockerfile.darknet-cu113 .
133 | - name: Deploy docker image
134 | run: |
135 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
136 | docker push ${{secrets.DOCKER_REPO}}:darknet-cu113
137 | docker push ${{secrets.DOCKER_REPO}}:darknet
138 |
139 |
140 | chainer-py38-cpu_chainer-cpu:
141 | runs-on: ubuntu-latest
142 | steps:
143 | - uses: actions/checkout@master
144 | - name: Free disk space
145 | run: |
146 | df -h
147 | sudo apt-get autoremove -y
148 | sudo apt-get clean
149 | sudo swapoff -a
150 | sudo rm -f /swapfile
151 | docker rmi $(docker image ls -aq)
152 | df -h
153 | - name: Build docker image
154 | run: docker build -t ${{secrets.DOCKER_REPO}}:chainer-py38-cpu -t ${{secrets.DOCKER_REPO}}:chainer-cpu -f docker/Dockerfile.chainer-py38-cpu .
155 | - name: Deploy docker image
156 | run: |
157 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
158 | docker push ${{secrets.DOCKER_REPO}}:chainer-py38-cpu
159 | docker push ${{secrets.DOCKER_REPO}}:chainer-cpu
160 |
161 |
162 | keras-py38-cpu_keras-cpu:
163 | runs-on: ubuntu-latest
164 | steps:
165 | - uses: actions/checkout@master
166 | - name: Free disk space
167 | run: |
168 | df -h
169 | sudo apt-get autoremove -y
170 | sudo apt-get clean
171 | sudo swapoff -a
172 | sudo rm -f /swapfile
173 | docker rmi $(docker image ls -aq)
174 | df -h
175 | - name: Build docker image
176 | run: docker build -t ${{secrets.DOCKER_REPO}}:keras-py38-cpu -t ${{secrets.DOCKER_REPO}}:keras-cpu -f docker/Dockerfile.keras-py38-cpu .
177 | - name: Deploy docker image
178 | run: |
179 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
180 | docker push ${{secrets.DOCKER_REPO}}:keras-py38-cpu
181 | docker push ${{secrets.DOCKER_REPO}}:keras-cpu
182 |
183 |
184 | chainer-py38-cu113_chainer-py38_chainer-cu113_chainer:
185 | runs-on: ubuntu-latest
186 | steps:
187 | - uses: actions/checkout@master
188 | - name: Free disk space
189 | run: |
190 | df -h
191 | sudo apt-get autoremove -y
192 | sudo apt-get clean
193 | sudo swapoff -a
194 | sudo rm -f /swapfile
195 | docker rmi $(docker image ls -aq)
196 | df -h
197 | - name: Build docker image
198 | run: docker build -t ${{secrets.DOCKER_REPO}}:chainer-py38-cu113 -t ${{secrets.DOCKER_REPO}}:chainer-py38 -t ${{secrets.DOCKER_REPO}}:chainer-cu113 -t ${{secrets.DOCKER_REPO}}:chainer -f docker/Dockerfile.chainer-py38-cu113 .
199 | - name: Deploy docker image
200 | run: |
201 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
202 | docker push ${{secrets.DOCKER_REPO}}:chainer-py38-cu113
203 | docker push ${{secrets.DOCKER_REPO}}:chainer-py38
204 | docker push ${{secrets.DOCKER_REPO}}:chainer-cu113
205 | docker push ${{secrets.DOCKER_REPO}}:chainer
206 |
207 |
208 | keras-py38-cu113_keras-py38_keras-cu113_keras:
209 | runs-on: ubuntu-latest
210 | steps:
211 | - uses: actions/checkout@master
212 | - name: Free disk space
213 | run: |
214 | df -h
215 | sudo apt-get autoremove -y
216 | sudo apt-get clean
217 | sudo swapoff -a
218 | sudo rm -f /swapfile
219 | docker rmi $(docker image ls -aq)
220 | df -h
221 | - name: Build docker image
222 | run: docker build -t ${{secrets.DOCKER_REPO}}:keras-py38-cu113 -t ${{secrets.DOCKER_REPO}}:keras-py38 -t ${{secrets.DOCKER_REPO}}:keras-cu113 -t ${{secrets.DOCKER_REPO}}:keras -f docker/Dockerfile.keras-py38-cu113 .
223 | - name: Deploy docker image
224 | run: |
225 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
226 | docker push ${{secrets.DOCKER_REPO}}:keras-py38-cu113
227 | docker push ${{secrets.DOCKER_REPO}}:keras-py38
228 | docker push ${{secrets.DOCKER_REPO}}:keras-cu113
229 | docker push ${{secrets.DOCKER_REPO}}:keras
230 |
231 |
232 | all-py38-cpu_all-cpu_py38-cpu_cpu_all-jupyter-py38-cpu_all-jupyter-cpu:
233 | runs-on: ubuntu-latest
234 | steps:
235 | - uses: actions/checkout@master
236 | - name: Free disk space
237 | run: |
238 | df -h
239 | sudo apt-get autoremove -y
240 | sudo apt-get clean
241 | sudo swapoff -a
242 | sudo rm -f /swapfile
243 | docker rmi $(docker image ls -aq)
244 | df -h
245 | - name: Build docker image
246 | run: docker build -t ${{secrets.DOCKER_REPO}}:all-py38-cpu -t ${{secrets.DOCKER_REPO}}:all-cpu -t ${{secrets.DOCKER_REPO}}:py38-cpu -t ${{secrets.DOCKER_REPO}}:cpu -t ${{secrets.DOCKER_REPO}}:all-jupyter-py38-cpu -t ${{secrets.DOCKER_REPO}}:all-jupyter-cpu -f docker/Dockerfile.all-py38-cpu .
247 | - name: Deploy docker image
248 | run: |
249 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
250 | docker push ${{secrets.DOCKER_REPO}}:all-py38-cpu
251 | docker push ${{secrets.DOCKER_REPO}}:all-cpu
252 | docker push ${{secrets.DOCKER_REPO}}:py38-cpu
253 | docker push ${{secrets.DOCKER_REPO}}:cpu
254 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter-py38-cpu
255 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter-cpu
256 |
257 | - run: docker run ${{secrets.DOCKER_REPO}}:all-py38-cpu python -c "import tensorflow as m; print(m.__name__, ':', m.__version__);import mxnet as m; print(m.__name__, ':', m.__version__);from tensorflow import keras as m; print(m.__name__, ':', m.__version__);import torch as m; print(m.__name__, ':', m.__version__);import chainer as m; print(m.__name__, ':', m.__version__);import paddle as m; print(m.__name__, ':', m.__version__);"
258 | - run: docker run ${{secrets.DOCKER_REPO}}:all-py38-cpu darknet
259 |
260 |
261 | all-py38-cu113_all-py38_all-cu113_py38-cu113_all_cu113_py38_latest_all-jupyter-py38-cu113_all-jupyt:
262 | runs-on: ubuntu-latest
263 | steps:
264 | - uses: actions/checkout@master
265 | - name: Free disk space
266 | run: |
267 | df -h
268 | sudo apt-get autoremove -y
269 | sudo apt-get clean
270 | sudo swapoff -a
271 | sudo rm -f /swapfile
272 | docker rmi $(docker image ls -aq)
273 | df -h
274 | - name: Build docker image
275 | run: docker build -t ${{secrets.DOCKER_REPO}}:all-py38-cu113 -t ${{secrets.DOCKER_REPO}}:all-py38 -t ${{secrets.DOCKER_REPO}}:all-cu113 -t ${{secrets.DOCKER_REPO}}:py38-cu113 -t ${{secrets.DOCKER_REPO}}:all -t ${{secrets.DOCKER_REPO}}:cu113 -t ${{secrets.DOCKER_REPO}}:py38 -t ${{secrets.DOCKER_REPO}}:latest -t ${{secrets.DOCKER_REPO}}:all-jupyter-py38-cu113 -t ${{secrets.DOCKER_REPO}}:all-jupyter-py38 -t ${{secrets.DOCKER_REPO}}:all-jupyter-cu113 -t ${{secrets.DOCKER_REPO}}:all-jupyter -f docker/Dockerfile.all-py38-cu113 .
276 | - name: Deploy docker image
277 | run: |
278 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
279 | docker push ${{secrets.DOCKER_REPO}}:all-py38-cu113
280 | docker push ${{secrets.DOCKER_REPO}}:all-py38
281 | docker push ${{secrets.DOCKER_REPO}}:all-cu113
282 | docker push ${{secrets.DOCKER_REPO}}:py38-cu113
283 | docker push ${{secrets.DOCKER_REPO}}:all
284 | docker push ${{secrets.DOCKER_REPO}}:cu113
285 | docker push ${{secrets.DOCKER_REPO}}:py38
286 | docker push ${{secrets.DOCKER_REPO}}:latest
287 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter-py38-cu113
288 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter-py38
289 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter-cu113
290 | docker push ${{secrets.DOCKER_REPO}}:all-jupyter
291 |
292 |
293 | mxnet-py38-cpu_mxnet-cpu:
294 | runs-on: ubuntu-latest
295 | steps:
296 | - uses: actions/checkout@master
297 | - name: Free disk space
298 | run: |
299 | df -h
300 | sudo apt-get autoremove -y
301 | sudo apt-get clean
302 | sudo swapoff -a
303 | sudo rm -f /swapfile
304 | docker rmi $(docker image ls -aq)
305 | df -h
306 | - name: Build docker image
307 | run: docker build -t ${{secrets.DOCKER_REPO}}:mxnet-py38-cpu -t ${{secrets.DOCKER_REPO}}:mxnet-cpu -f docker/Dockerfile.mxnet-py38-cpu .
308 | - name: Deploy docker image
309 | run: |
310 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
311 | docker push ${{secrets.DOCKER_REPO}}:mxnet-py38-cpu
312 | docker push ${{secrets.DOCKER_REPO}}:mxnet-cpu
313 |
314 |
315 | pytorch-py38-cu113_pytorch-py38_pytorch-cu113_pytorch:
316 | runs-on: ubuntu-latest
317 | steps:
318 | - uses: actions/checkout@master
319 | - name: Free disk space
320 | run: |
321 | df -h
322 | sudo apt-get autoremove -y
323 | sudo apt-get clean
324 | sudo swapoff -a
325 | sudo rm -f /swapfile
326 | docker rmi $(docker image ls -aq)
327 | df -h
328 | - name: Build docker image
329 | run: docker build -t ${{secrets.DOCKER_REPO}}:pytorch-py38-cu113 -t ${{secrets.DOCKER_REPO}}:pytorch-py38 -t ${{secrets.DOCKER_REPO}}:pytorch-cu113 -t ${{secrets.DOCKER_REPO}}:pytorch -f docker/Dockerfile.pytorch-py38-cu113 .
330 | - name: Deploy docker image
331 | run: |
332 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
333 | docker push ${{secrets.DOCKER_REPO}}:pytorch-py38-cu113
334 | docker push ${{secrets.DOCKER_REPO}}:pytorch-py38
335 | docker push ${{secrets.DOCKER_REPO}}:pytorch-cu113
336 | docker push ${{secrets.DOCKER_REPO}}:pytorch
337 |
338 |
339 | tensorflow-py38-cpu_tensorflow-cpu:
340 | runs-on: ubuntu-latest
341 | steps:
342 | - uses: actions/checkout@master
343 | - name: Free disk space
344 | run: |
345 | df -h
346 | sudo apt-get autoremove -y
347 | sudo apt-get clean
348 | sudo swapoff -a
349 | sudo rm -f /swapfile
350 | docker rmi $(docker image ls -aq)
351 | df -h
352 | - name: Build docker image
353 | run: docker build -t ${{secrets.DOCKER_REPO}}:tensorflow-py38-cpu -t ${{secrets.DOCKER_REPO}}:tensorflow-cpu -f docker/Dockerfile.tensorflow-py38-cpu .
354 | - name: Deploy docker image
355 | run: |
356 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
357 | docker push ${{secrets.DOCKER_REPO}}:tensorflow-py38-cpu
358 | docker push ${{secrets.DOCKER_REPO}}:tensorflow-cpu
359 |
360 |
361 | tensorflow-py38-cu113_tensorflow-py38_tensorflow-cu113_tensorflow:
362 | runs-on: ubuntu-latest
363 | steps:
364 | - uses: actions/checkout@master
365 | - name: Free disk space
366 | run: |
367 | df -h
368 | sudo apt-get autoremove -y
369 | sudo apt-get clean
370 | sudo swapoff -a
371 | sudo rm -f /swapfile
372 | docker rmi $(docker image ls -aq)
373 | df -h
374 | - name: Build docker image
375 | run: docker build -t ${{secrets.DOCKER_REPO}}:tensorflow-py38-cu113 -t ${{secrets.DOCKER_REPO}}:tensorflow-py38 -t ${{secrets.DOCKER_REPO}}:tensorflow-cu113 -t ${{secrets.DOCKER_REPO}}:tensorflow -f docker/Dockerfile.tensorflow-py38-cu113 .
376 | - name: Deploy docker image
377 | run: |
378 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
379 | docker push ${{secrets.DOCKER_REPO}}:tensorflow-py38-cu113
380 | docker push ${{secrets.DOCKER_REPO}}:tensorflow-py38
381 | docker push ${{secrets.DOCKER_REPO}}:tensorflow-cu113
382 | docker push ${{secrets.DOCKER_REPO}}:tensorflow
383 |
384 |
--------------------------------------------------------------------------------
/.github/workflows/issue.yml:
--------------------------------------------------------------------------------
1 | name: Close inactive issues
2 | on:
3 | schedule:
4 | - cron: "30 1 * * *"
5 |
6 | jobs:
7 | close-issues:
8 | runs-on: ubuntu-latest
9 | permissions:
10 | issues: write
11 | pull-requests: write
12 | steps:
13 | - uses: actions/stale@v3
14 | with:
15 | days-before-issue-stale: 180
16 | days-before-issue-close: 30
17 | stale-issue-label: "stale"
18 | stale-issue-message: "This issue is stale because it has been open for 180 days with no activity."
19 | close-issue-message: "This issue was closed because it has been inactive for 30 days since being marked as stale."
20 | days-before-pr-stale: -1
21 | days-before-pr-close: -1
22 | repo-token: ${{ secrets.GITHUB_TOKEN }}
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 |
27 | # PyInstaller
28 | # Usually these files are written by a python script from a template
29 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
30 | *.manifest
31 | *.spec
32 |
33 | # Installer logs
34 | pip-log.txt
35 | pip-delete-this-directory.txt
36 |
37 | # Unit test / coverage reports
38 | htmlcov/
39 | .tox/
40 | .coverage
41 | .coverage.*
42 | .cache
43 | nosetests.xml
44 | coverage.xml
45 | *,cover
46 | .hypothesis/
47 |
48 | # Translations
49 | *.mo
50 | *.pot
51 |
52 | # Django stuff:
53 | *.log
54 |
55 | # Sphinx documentation
56 | docs/_build/
57 |
58 | # PyBuilder
59 | target/
60 |
61 | # pyenv python configuration file
62 | .python-version
63 | .DS_Store
64 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Ming
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | 
4 | [](https://hub.docker.com/r/ufoym/deepo)
5 | 
6 | 
7 |
8 |
9 | ***PLEASE NOTE, THE DEEP LEARNING FRAMEWORK WAR IS OVER, THIS PROJECT IS NO LONGER BEING MAINTAINED.***
10 |
11 | ---
12 |
13 | ***Deepo*** is an open framework to assemble specialized [*docker*](http://www.docker.com/) images for deep learning research without pain. It provides a “lego set” of dozens of standard components for preparing deep learning tools and a framework for assembling them into custom docker images.
14 |
15 | At the core of Deepo is a Dockerfile generator that
16 | - allows you to [customize your deep learning environment](#Build) with Lego-like modules
17 | - define your environment in a single command line,
18 | - then deepo will generate Dockerfiles with best practices
19 | - and do all the configuration for you
20 | - automatically resolves the dependencies for you
21 | - deepo knows which combos (CUDA/cuDNN/Python/PyTorch/Tensorflow, ..., tons of dependancies) are compatible
22 | - and will pick the right versions for you
23 | - and arrange sequence of installation procedures using [topological sorting](https://en.wikipedia.org/wiki/Topological_sorting)
24 |
25 | We also prepare a series of pre-built docker images that
26 | - allows you to instantly set up common deep learning research environment
27 | - supports almost all [commonly used deep learning frameworks](#Available-tags)
28 | - supports [GPU acceleration](#GPU) (CUDA and cuDNN included), also works in [CPU-only mode](#CPU)
29 | - works on Linux ([CPU version](#CPU)/[GPU version](#GPU)), Windows ([CPU version](#CPU)) and OS X ([CPU version](#CPU))
30 |
31 | ---
32 |
33 | # Table of contents
34 | - [Quick Start](#Quick-Start)
35 | - [GPU Version](#GPU)
36 | - [Installation](#Installation)
37 | - [Usage](#Usage)
38 | - [CPU Version](#CPU)
39 | - [Installation](#Installation-cpu)
40 | - [Usage](#Usage-cpu)
41 | - [Customization](#Customization)
42 | - [Unhappy with all-in-one solution?](#One)
43 | - [Jupyter support](#Jupyter)
44 | - [Build your own customized image](#Build)
45 | - [Comparison to Alternatives](#Comparison)
46 | - [Tags](#Tags)
47 | - [Available Tags](#Available-tags)
48 | - [Deprecated Tags](#Deprecated-tags)
49 | - [Citation](#Citation)
50 | - [Contributing](#Contributing)
51 | - [Licensing](#Licensing)
52 |
53 | ---
54 |
55 |
56 |
57 | # Quick Start
58 |
59 |
60 |
61 |
62 | ## GPU Version
63 |
64 |
65 |
66 | ### Installation
67 |
68 | #### Step 1. Install [Docker](https://docs.docker.com/engine/installation/) and [nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
69 |
70 | #### Step 2. Obtain the all-in-one image from [Docker Hub](https://hub.docker.com/r/ufoym/deepo)
71 |
72 | ```bash
73 | docker pull ufoym/deepo
74 | ```
75 |
76 | For users in China who may suffer from slow speeds when pulling the image from the public Docker registry, you can pull `deepo` images from the China registry mirror by specifying the full path, including the registry, in your docker pull command, for example:
77 |
78 | ```bash
79 | docker pull registry.docker-cn.com/ufoym/deepo
80 | ```
81 |
82 |
83 |
84 | ### Usage
85 |
86 | Now you can try this command:
87 | ```bash
88 | docker run --gpus all --rm ufoym/deepo nvidia-smi
89 | ```
90 | This should work and enables Deepo to use the GPU from inside a docker container.
91 | If this does not work, search [the issues section on the nvidia-docker GitHub](https://github.com/NVIDIA/nvidia-docker/issues) -- many solutions are already documented. To get an interactive shell to a container that will not be automatically deleted after you exit do
92 |
93 | ```bash
94 | docker run --gpus all -it ufoym/deepo bash
95 | ```
96 |
97 | If you want to share your data and configurations between the host (your machine or VM) and the container in which you are using Deepo, use the -v option, e.g.
98 | ```bash
99 | docker run --gpus all -it -v /host/data:/data -v /host/config:/config ufoym/deepo bash
100 | ```
101 | This will make `/host/data` from the host visible as `/data` in the container, and `/host/config` as `/config`. Such isolation reduces the chances of your containerized experiments overwriting or using wrong data.
102 |
103 | Please note that some frameworks (e.g. PyTorch) use shared memory to share data between processes, so if multiprocessing is used the default shared memory segment size that container runs with is not enough, and you should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `docker run`.
104 | ```bash
105 | docker run --gpus all -it --ipc=host ufoym/deepo bash
106 | ```
107 |
108 |
109 |
110 |
111 | ## CPU Version
112 |
113 |
114 |
115 | ### Installation
116 |
117 | #### Step 1. Install [Docker](https://docs.docker.com/engine/installation/).
118 |
119 | #### Step 2. Obtain the all-in-one image from [Docker Hub](https://hub.docker.com/r/ufoym/deepo)
120 |
121 | ```bash
122 | docker pull ufoym/deepo:cpu
123 | ```
124 |
125 |
126 |
127 | ### Usage
128 |
129 | Now you can try this command:
130 | ```bash
131 | docker run -it ufoym/deepo:cpu bash
132 | ```
133 |
134 | If you want to share your data and configurations between the host (your machine or VM) and the container in which you are using Deepo, use the -v option, e.g.
135 | ```bash
136 | docker run -it -v /host/data:/data -v /host/config:/config ufoym/deepo:cpu bash
137 | ```
138 | This will make `/host/data` from the host visible as `/data` in the container, and `/host/config` as `/config`. Such isolation reduces the chances of your containerized experiments overwriting or using wrong data.
139 |
140 | Please note that some frameworks (e.g. PyTorch) use shared memory to share data between processes, so if multiprocessing is used the default shared memory segment size that container runs with is not enough, and you should increase shared memory size either with `--ipc=host` or `--shm-size` command line options to `docker run`.
141 | ```bash
142 | docker run -it --ipc=host ufoym/deepo:cpu bash
143 | ```
144 |
145 |
146 | _You are now ready to begin your journey._
147 |
148 |
149 | ```$ python```
150 | ```python
151 | >>> import tensorflow
152 | >>> import sonnet
153 | >>> import torch
154 | >>> import keras
155 | >>> import mxnet
156 | >>> import cntk
157 | >>> import chainer
158 | >>> import theano
159 | >>> import lasagne
160 | >>> import caffe
161 | >>> import paddle
162 | ```
163 |
164 | ```$ caffe --version```
165 | ```
166 | caffe version 1.0.0
167 | ```
168 |
169 | ```$ darknet```
170 | ```
171 | usage: darknet
172 | ```
173 |
174 |
175 |
176 | # Customization
177 |
178 | Note that `docker pull ufoym/deepo` mentioned in [Quick Start](#Quick-Start) will give you a standard image containing all available deep learning frameworks. You can customize your own environment as well.
179 |
180 |
181 |
182 | ## Unhappy with all-in-one solution?
183 |
184 | If you prefer a specific framework rather than an all-in-one image, just append a tag with the name of the framework.
185 | Take tensorflow for example:
186 | ```bash
187 | docker pull ufoym/deepo:tensorflow
188 | ```
189 |
190 |
191 |
192 | ## Jupyter support
193 |
194 | #### Step 1. pull the all-in-one image
195 |
196 | ```bash
197 | docker pull ufoym/deepo
198 | ```
199 |
200 | #### Step 2. run the image
201 | ```bash
202 | docker run --gpus all -it -p 8888:8888 -v /home/u:/root --ipc=host ufoym/deepo jupyter lab --no-browser --ip=0.0.0.0 --allow-root --LabApp.allow_origin='*' --LabApp.root_dir='/root'
203 | ```
204 |
205 |
206 |
207 |
208 | ## Build your own customized image with Lego-like modules
209 |
210 | #### Step 1. prepare generator
211 |
212 | ```bash
213 | git clone https://github.com/ufoym/deepo.git
214 | cd deepo/generator
215 | ```
216 |
217 | #### Step 2. generate your customized Dockerfile
218 |
219 | For example, if you like `pytorch` and `lasagne`, then
220 | ```bash
221 | python generate.py Dockerfile pytorch lasagne
222 | ```
223 | or with CUDA 11.1 and CUDNN 8
224 | ```bash
225 | python generate.py Dockerfile pytorch lasagne --cuda-ver 11.1 --cudnn-ver 8
226 | ```
227 |
228 | This should generate a Dockerfile that contains everything for building `pytorch` and `lasagne`. Note that the generator can handle automatic dependency processing and topologically sort the lists. So you don't need to worry about missing dependencies and the list order.
229 |
230 | You can also specify the version of Python:
231 | ```bash
232 | python generate.py Dockerfile pytorch lasagne python==3.6
233 | ```
234 |
235 | #### Step 3. build your Dockerfile
236 |
237 | ```bash
238 | docker build -t my/deepo .
239 | ```
240 |
241 | This may take several minutes as it compiles a few libraries from scratch.
242 |
243 |
244 |
245 |
246 | # Comparison to alternatives
247 |
248 |
249 | . | modern-deep-learning | dl-docker | jupyter-deeplearning | Deepo
250 | :------------------------------------------------: | :------------------: | :----------------: | :------------------: | :----------------:
251 | [ubuntu](https://www.ubuntu.com) | 16.04 | 14.04 | 14.04 | 18.04
252 | [cuda](https://developer.nvidia.com/cuda-zone) | X | 8.0 | 6.5-8.0 | 8.0-10.2/None
253 | [cudnn](https://developer.nvidia.com/cudnn) | X | v5 | v2-5 | v7
254 | [onnx](https://onnx.ai) | X | X | X | O
255 | [theano](http://deeplearning.net/software/theano) | X | O | O | O
256 | [tensorflow](http://www.tensorflow.org) | O | O | O | O
257 | [sonnet](https://github.com/deepmind/sonnet) | X | X | X | O
258 | [pytorch](http://pytorch.org) | X | X | X | O
259 | [keras](https://keras.io) | O | O | O | O
260 | [lasagne](http://lasagne.readthedocs.io) | X | O | O | O
261 | [mxnet](http://mxnet.incubator.apache.org) | X | X | X | O
262 | [cntk](http://cntk.ai) | X | X | X | O
263 | [chainer](https://chainer.org) | X | X | X | O
264 | [caffe](http://caffe.berkeleyvision.org) | O | O | O | O
265 | [caffe2](https://caffe2.ai) | X | X | X | O
266 | [torch](http://torch.ch/) | X | O | O | O
267 | [darknet](https://pjreddie.com/darknet/) | X | X | X | O
268 | [paddlepaddle](https://www.paddlepaddle.org.cn/) | X | X | X | O
269 |
270 |
271 |
272 |
273 |
274 | # Tags
275 |
276 |
277 |
278 |
279 | ## Available Tags
280 |
281 |
282 | . | CUDA 11.3 / Python 3.8 | CPU-only / Python 3.8
283 | :------------------------------------------------: | :-------------------------------------------------------: | :-----------------------------------------:
284 | all-in-one | `latest` `all` `all-py38` `py38-cu113` `all-py38-cu113` | `all-py38-cpu` `all-cpu` `py38-cpu` `cpu`
285 | [TensorFlow](http://www.tensorflow.org) | `tensorflow-py38-cu113` `tensorflow-py38` `tensorflow` | `tensorflow-py38-cpu` `tensorflow-cpu`
286 | [PyTorch](http://pytorch.org) | `pytorch-py38-cu113` `pytorch-py38` `pytorch` | `pytorch-py38-cpu` `pytorch-cpu`
287 | [Keras](https://keras.io) | `keras-py38-cu113` `keras-py38` `keras` | `keras-py38-cpu` `keras-cpu`
288 | [MXNet](http://mxnet.incubator.apache.org) | `mxnet-py38-cu113` `mxnet-py38` `mxnet` | `mxnet-py38-cpu` `mxnet-cpu`
289 | [Chainer](https://chainer.org) | `chainer-py38-cu113` `chainer-py38` `chainer` | `chainer-py38-cpu` `chainer-cpu`
290 | [Darknet](https://pjreddie.com/darknet/) | `darknet-cu113` `darknet` | `darknet-cpu`
291 | [paddlepaddle](https://www.paddlepaddle.org.cn/) | `paddle-cu113` `paddle` | `paddle-cpu`
292 |
293 |
294 |
295 |
296 | ## Deprecated Tags
297 |
298 | . | CUDA 11.3 / Python 3.6 | CUDA 11.1 / Python 3.6 | CUDA 10.1 / Python 3.6 | CUDA 10.0 / Python 3.6 | CUDA 9.0 / Python 3.6 | CUDA 9.0 / Python 2.7 | CPU-only / Python 3.6 | CPU-only / Python 2.7
299 | :------------------------------------------------: | :----------------------------: | :----------------------------: | :----------------------------: | :----------------------------: | :------------------------------------------: | :--------------------------------: | :-----------------------------------------: | :----------------------------------------:
300 | all-in-one | `py36-cu113` `all-py36-cu113` | `py36-cu111` `all-py36-cu111` | `py36-cu101` `all-py36-cu101` | `py36-cu100` `all-py36-cu100` | `py36-cu90` `all-py36-cu90` | `all-py27-cu90` `all-py27` `py27-cu90` | | `all-py27-cpu` `py27-cpu`
301 | all-in-one with jupyter | | | | | `all-jupyter-py36-cu90` | `all-py27-jupyter` `py27-jupyter` | | `all-py27-jupyter-cpu` `py27-jupyter-cpu`
302 | [Theano](http://deeplearning.net/software/theano) | `theano-py36-cu113` | `theano-py36-cu111` | `theano-py36-cu101` | `theano-py36-cu100` | `theano-py36-cu90` | `theano-py27-cu90` `theano-py27` | | `theano-py27-cpu`
303 | [TensorFlow](http://www.tensorflow.org) | `tensorflow-py36-cu113` | `tensorflow-py36-cu111` | `tensorflow-py36-cu101` | `tensorflow-py36-cu100` | `tensorflow-py36-cu90` | `tensorflow-py27-cu90` `tensorflow-py27` | | `tensorflow-py27-cpu`
304 | [Sonnet](https://github.com/deepmind/sonnet) | `sonnet-py36-cu113` | `sonnet-py36-cu111` | `sonnet-py36-cu101` | `sonnet-py36-cu100` | `sonnet-py36-cu90` | `sonnet-py27-cu90` `sonnet-py27` | | `sonnet-py27-cpu`
305 | [PyTorch](http://pytorch.org) | `pytorch-py36-cu113` | `pytorch-py36-cu111` | `pytorch-py36-cu101` | `pytorch-py36-cu100` | `pytorch-py36-cu90` | `pytorch-py27-cu90` `pytorch-py27` | | `pytorch-py27-cpu`
306 | [Keras](https://keras.io) | `keras-py36-cu113` | `keras-py36-cu111` | `keras-py36-cu101` | `keras-py36-cu100` | `keras-py36-cu90` | `keras-py27-cu90` `keras-py27` | | `keras-py27-cpu`
307 | [Lasagne](http://lasagne.readthedocs.io) | `lasagne-py36-cu113` | `lasagne-py36-cu111` | `lasagne-py36-cu101` | `lasagne-py36-cu100` | `lasagne-py36-cu90` | `lasagne-py27-cu90` `lasagne-py27` | | `lasagne-py27-cpu`
308 | [MXNet](http://mxnet.incubator.apache.org) | `mxnet-py36-cu113` | `mxnet-py36-cu111` | `mxnet-py36-cu101` | `mxnet-py36-cu100` | `mxnet-py36-cu90` | `mxnet-py27-cu90` `mxnet-py27` | | `mxnet-py27-cpu`
309 | [CNTK](http://cntk.ai) | `cntk-py36-cu113` | `cntk-py36-cu111` | `cntk-py36-cu101` | `cntk-py36-cu100` | `cntk-py36-cu90` | `cntk-py27-cu90` `cntk-py27` | | `cntk-py27-cpu`
310 | [Chainer](https://chainer.org) | `chainer-py36-cu113` | `chainer-py36-cu111` | `chainer-py36-cu101` | `chainer-py36-cu100` | `chainer-py36-cu90` | `chainer-py27-cu90` `chainer-py27` | | `chainer-py27-cpu`
311 | [Caffe](http://caffe.berkeleyvision.org) | `caffe-py36-cu113` | `caffe-py36-cu111` | `caffe-py36-cu101` | `caffe-py36-cu100` | `caffe-py36-cu90` | `caffe-py27-cu90` `caffe-py27` | | `caffe-py27-cpu`
312 | [Caffe2](https://caffe2.ai) | | | | | `caffe2-py36-cu90` `caffe2-py36` `caffe2` | `caffe2-py27-cu90` `caffe2-py27` | `caffe2-py36-cpu` `caffe2-cpu` | `caffe2-py27-cpu`
313 | [Torch](http://torch.ch/) | `torch-cu113` | `torch-cu111` | `torch-cu101` | `torch-cu100` | `torch-cu90` | `torch-cu90` `torch` | | `torch-cpu`
314 | [Darknet](https://pjreddie.com/darknet/) | `darknet-cu113` | `darknet-cu111` | `darknet-cu101` | `darknet-cu100` | `darknet-cu90` | `darknet-cu90` `darknet` | | `darknet-cpu`
315 |
316 |
317 |
318 |
319 | # Citation
320 | ```
321 | @misc{ming2017deepo,
322 | author = {Ming Yang},
323 | title = {Deepo: set up deep learning environment in a single command line.},
324 | year = {2017},
325 | publisher = {GitHub},
326 | journal = {GitHub repository},
327 | howpublished = {\url{https://github.com/ufoym/deepo}}
328 | }
329 | ```
330 |
331 |
332 | # Contributing
333 |
334 | We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. If you plan to contribute new features, utility functions or extensions, please first open an issue and discuss the feature with us.
335 |
336 |
337 |
338 | # Licensing
339 |
340 | Deepo is [MIT licensed](https://github.com/ufoym/deepo/blob/master/LICENSE).
341 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-modernist
--------------------------------------------------------------------------------
/docker/Dockerfile.all-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # darknet latest (git)
5 | # python 3.8 (apt)
6 | # chainer latest (pip)
7 | # jupyter latest (pip)
8 | # mxnet latest (pip)
9 | # onnx latest (pip)
10 | # paddle latest (pip)
11 | # pytorch latest (pip)
12 | # tensorflow latest (pip)
13 | # jupyterlab latest (pip)
14 | # keras latest (pip)
15 | # ==================================================================
16 |
17 | FROM ubuntu:20.04
18 | ENV LANG C.UTF-8
19 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
20 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
21 | GIT_CLONE="git clone --depth 10" && \
22 |
23 | rm -rf /var/lib/apt/lists/* \
24 | /etc/apt/sources.list.d/cuda.list \
25 | /etc/apt/sources.list.d/nvidia-ml.list && \
26 |
27 | apt-get update && \
28 |
29 | # ==================================================================
30 | # tools
31 | # ------------------------------------------------------------------
32 |
33 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
34 | build-essential \
35 | apt-utils \
36 | ca-certificates \
37 | wget \
38 | git \
39 | vim \
40 | libssl-dev \
41 | curl \
42 | unzip \
43 | unrar \
44 | cmake \
45 | && \
46 |
47 | # ==================================================================
48 | # darknet
49 | # ------------------------------------------------------------------
50 |
51 | $GIT_CLONE https://github.com/AlexeyAB/darknet ~/darknet && \
52 | cd ~/darknet && \
53 | sed -i 's/GPU=0/GPU=0/g' ~/darknet/Makefile && \
54 | sed -i 's/CUDNN=0/CUDNN=0/g' ~/darknet/Makefile && \
55 | make -j"$(nproc)" && \
56 | cp ~/darknet/include/* /usr/local/include && \
57 | cp ~/darknet/darknet /usr/local/bin && \
58 |
59 | # ==================================================================
60 | # python
61 | # ------------------------------------------------------------------
62 |
63 | apt-get update && \
64 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
65 | python3.8 \
66 | python3.8-dev \
67 | python3.8-distutils \
68 | && \
69 | wget -O ~/get-pip.py \
70 | https://bootstrap.pypa.io/get-pip.py && \
71 | python3.8 ~/get-pip.py && \
72 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
73 | $PIP_INSTALL \
74 | numpy \
75 | scipy \
76 | pandas \
77 | scikit-image \
78 | scikit-learn \
79 | matplotlib \
80 | Cython \
81 | tqdm \
82 | && \
83 | # ==================================================================
84 | # chainer
85 | # ------------------------------------------------------------------
86 |
87 | $PIP_INSTALL \
88 | chainer \
89 | && \
90 |
91 | # ==================================================================
92 | # jupyter
93 | # ------------------------------------------------------------------
94 |
95 | $PIP_INSTALL \
96 | jupyter \
97 | && \
98 |
99 | # ==================================================================
100 | # mxnet
101 | # ------------------------------------------------------------------
102 |
103 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
104 | libatlas-base-dev \
105 | graphviz \
106 | && \
107 |
108 | $PIP_INSTALL \
109 | mxnet \
110 | graphviz \
111 | && \
112 |
113 | # ==================================================================
114 | # onnx
115 | # ------------------------------------------------------------------
116 |
117 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
118 | protobuf-compiler \
119 | libprotoc-dev \
120 | && \
121 |
122 | $PIP_INSTALL \
123 | numpy \
124 | protobuf \
125 | onnx \
126 | onnxruntime \
127 | && \
128 |
129 | # ==================================================================
130 | # paddle
131 | # ------------------------------------------------------------------
132 |
133 | $PIP_INSTALL \
134 | paddlepaddle \
135 | && \
136 |
137 | # ==================================================================
138 | # pytorch
139 | # ------------------------------------------------------------------
140 |
141 | $PIP_INSTALL \
142 | future \
143 | numpy \
144 | protobuf \
145 | enum34 \
146 | pyyaml \
147 | typing \
148 | && \
149 | $PIP_INSTALL \
150 | --pre torch torchvision torchaudio -f \
151 | https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
152 | && \
153 |
154 | # ==================================================================
155 | # tensorflow
156 | # ------------------------------------------------------------------
157 |
158 | $PIP_INSTALL \
159 | tensorflow \
160 | && \
161 |
162 | # ==================================================================
163 | # jupyterlab
164 | # ------------------------------------------------------------------
165 |
166 | $PIP_INSTALL \
167 | jupyterlab \
168 | && \
169 |
170 | # ==================================================================
171 | # keras
172 | # ------------------------------------------------------------------
173 |
174 | # Now Keras comes packaged with TensorFlow 2
175 | # as tensorflow.keras. To start using Keras,
176 | # simply install TensorFlow 2.
177 |
178 | # ==================================================================
179 | # config & cleanup
180 | # ------------------------------------------------------------------
181 |
182 | ldconfig && \
183 | apt-get clean && \
184 | apt-get autoremove && \
185 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
186 |
187 | EXPOSE 8888 6006
188 |
--------------------------------------------------------------------------------
/docker/Dockerfile.all-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # darknet latest (git)
5 | # python 3.8 (apt)
6 | # chainer latest (pip)
7 | # jupyter latest (pip)
8 | # mxnet latest (pip)
9 | # onnx latest (pip)
10 | # paddle latest (pip)
11 | # pytorch latest (pip)
12 | # tensorflow latest (pip)
13 | # jupyterlab latest (pip)
14 | # keras latest (pip)
15 | # ==================================================================
16 |
17 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
18 | ENV LANG C.UTF-8
19 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
20 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
21 | GIT_CLONE="git clone --depth 10" && \
22 |
23 | rm -rf /var/lib/apt/lists/* \
24 | /etc/apt/sources.list.d/cuda.list \
25 | /etc/apt/sources.list.d/nvidia-ml.list && \
26 |
27 | apt-get update && \
28 |
29 | # ==================================================================
30 | # tools
31 | # ------------------------------------------------------------------
32 |
33 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
34 | build-essential \
35 | apt-utils \
36 | ca-certificates \
37 | wget \
38 | git \
39 | vim \
40 | libssl-dev \
41 | curl \
42 | unzip \
43 | unrar \
44 | cmake \
45 | && \
46 |
47 | # ==================================================================
48 | # darknet
49 | # ------------------------------------------------------------------
50 |
51 | $GIT_CLONE https://github.com/AlexeyAB/darknet ~/darknet && \
52 | cd ~/darknet && \
53 | sed -i 's/GPU=0/GPU=1/g' ~/darknet/Makefile && \
54 | sed -i 's/CUDNN=0/CUDNN=1/g' ~/darknet/Makefile && \
55 | make -j"$(nproc)" && \
56 | cp ~/darknet/include/* /usr/local/include && \
57 | cp ~/darknet/darknet /usr/local/bin && \
58 |
59 | # ==================================================================
60 | # python
61 | # ------------------------------------------------------------------
62 |
63 | apt-get update && \
64 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
65 | python3.8 \
66 | python3.8-dev \
67 | python3.8-distutils \
68 | && \
69 | wget -O ~/get-pip.py \
70 | https://bootstrap.pypa.io/get-pip.py && \
71 | python3.8 ~/get-pip.py && \
72 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
73 | $PIP_INSTALL \
74 | numpy \
75 | scipy \
76 | pandas \
77 | scikit-image \
78 | scikit-learn \
79 | matplotlib \
80 | Cython \
81 | tqdm \
82 | && \
83 | # ==================================================================
84 | # chainer
85 | # ------------------------------------------------------------------
86 |
87 | $PIP_INSTALL \
88 | cupy \
89 | chainer \
90 | && \
91 |
92 | # ==================================================================
93 | # jupyter
94 | # ------------------------------------------------------------------
95 |
96 | $PIP_INSTALL \
97 | jupyter \
98 | && \
99 |
100 | # ==================================================================
101 | # mxnet
102 | # ------------------------------------------------------------------
103 |
104 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
105 | libatlas-base-dev \
106 | graphviz \
107 | && \
108 |
109 | $PIP_INSTALL \
110 | mxnet-cu112 \
111 | graphviz \
112 | && \
113 |
114 | # ==================================================================
115 | # onnx
116 | # ------------------------------------------------------------------
117 |
118 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
119 | protobuf-compiler \
120 | libprotoc-dev \
121 | && \
122 |
123 | $PIP_INSTALL \
124 | numpy \
125 | protobuf \
126 | onnx \
127 | onnxruntime-gpu \
128 | && \
129 |
130 | # ==================================================================
131 | # paddle
132 | # ------------------------------------------------------------------
133 |
134 | $PIP_INSTALL \
135 | paddlepaddle-gpu \
136 | && \
137 |
138 | # ==================================================================
139 | # pytorch
140 | # ------------------------------------------------------------------
141 |
142 | $PIP_INSTALL \
143 | future \
144 | numpy \
145 | protobuf \
146 | enum34 \
147 | pyyaml \
148 | typing \
149 | && \
150 | $PIP_INSTALL \
151 | --pre torch torchvision torchaudio -f \
152 | https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html \
153 | && \
154 |
155 | # ==================================================================
156 | # tensorflow
157 | # ------------------------------------------------------------------
158 |
159 | $PIP_INSTALL \
160 | tensorflow-gpu \
161 | && \
162 |
163 | # ==================================================================
164 | # jupyterlab
165 | # ------------------------------------------------------------------
166 |
167 | $PIP_INSTALL \
168 | jupyterlab \
169 | && \
170 |
171 | # ==================================================================
172 | # keras
173 | # ------------------------------------------------------------------
174 |
175 | # Now Keras comes packaged with TensorFlow 2
176 | # as tensorflow.keras. To start using Keras,
177 | # simply install TensorFlow 2.
178 |
179 | # ==================================================================
180 | # config & cleanup
181 | # ------------------------------------------------------------------
182 |
183 | ldconfig && \
184 | apt-get clean && \
185 | apt-get autoremove && \
186 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
187 |
188 | EXPOSE 8888 6006
189 |
--------------------------------------------------------------------------------
/docker/Dockerfile.chainer-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # chainer latest (pip)
6 | # ==================================================================
7 |
8 | FROM ubuntu:20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # chainer
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | chainer \
68 | && \
69 |
70 | # ==================================================================
71 | # config & cleanup
72 | # ------------------------------------------------------------------
73 |
74 | ldconfig && \
75 | apt-get clean && \
76 | apt-get autoremove && \
77 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
78 |
--------------------------------------------------------------------------------
/docker/Dockerfile.chainer-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # chainer latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # chainer
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | cupy \
68 | chainer \
69 | && \
70 |
71 | # ==================================================================
72 | # config & cleanup
73 | # ------------------------------------------------------------------
74 |
75 | ldconfig && \
76 | apt-get clean && \
77 | apt-get autoremove && \
78 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
79 |
--------------------------------------------------------------------------------
/docker/Dockerfile.darknet-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # darknet latest (git)
5 | # ==================================================================
6 |
7 | FROM ubuntu:20.04
8 | ENV LANG C.UTF-8
9 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
10 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
11 | GIT_CLONE="git clone --depth 10" && \
12 |
13 | rm -rf /var/lib/apt/lists/* \
14 | /etc/apt/sources.list.d/cuda.list \
15 | /etc/apt/sources.list.d/nvidia-ml.list && \
16 |
17 | apt-get update && \
18 |
19 | # ==================================================================
20 | # tools
21 | # ------------------------------------------------------------------
22 |
23 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
24 | build-essential \
25 | apt-utils \
26 | ca-certificates \
27 | wget \
28 | git \
29 | vim \
30 | libssl-dev \
31 | curl \
32 | unzip \
33 | unrar \
34 | cmake \
35 | && \
36 |
37 | # ==================================================================
38 | # darknet
39 | # ------------------------------------------------------------------
40 |
41 | $GIT_CLONE https://github.com/AlexeyAB/darknet ~/darknet && \
42 | cd ~/darknet && \
43 | sed -i 's/GPU=0/GPU=0/g' ~/darknet/Makefile && \
44 | sed -i 's/CUDNN=0/CUDNN=0/g' ~/darknet/Makefile && \
45 | make -j"$(nproc)" && \
46 | cp ~/darknet/include/* /usr/local/include && \
47 | cp ~/darknet/darknet /usr/local/bin && \
48 |
49 | # ==================================================================
50 | # config & cleanup
51 | # ------------------------------------------------------------------
52 |
53 | ldconfig && \
54 | apt-get clean && \
55 | apt-get autoremove && \
56 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
57 |
--------------------------------------------------------------------------------
/docker/Dockerfile.darknet-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # darknet latest (git)
5 | # ==================================================================
6 |
7 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
8 | ENV LANG C.UTF-8
9 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
10 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
11 | GIT_CLONE="git clone --depth 10" && \
12 |
13 | rm -rf /var/lib/apt/lists/* \
14 | /etc/apt/sources.list.d/cuda.list \
15 | /etc/apt/sources.list.d/nvidia-ml.list && \
16 |
17 | apt-get update && \
18 |
19 | # ==================================================================
20 | # tools
21 | # ------------------------------------------------------------------
22 |
23 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
24 | build-essential \
25 | apt-utils \
26 | ca-certificates \
27 | wget \
28 | git \
29 | vim \
30 | libssl-dev \
31 | curl \
32 | unzip \
33 | unrar \
34 | cmake \
35 | && \
36 |
37 | # ==================================================================
38 | # darknet
39 | # ------------------------------------------------------------------
40 |
41 | $GIT_CLONE https://github.com/AlexeyAB/darknet ~/darknet && \
42 | cd ~/darknet && \
43 | sed -i 's/GPU=0/GPU=1/g' ~/darknet/Makefile && \
44 | sed -i 's/CUDNN=0/CUDNN=1/g' ~/darknet/Makefile && \
45 | make -j"$(nproc)" && \
46 | cp ~/darknet/include/* /usr/local/include && \
47 | cp ~/darknet/darknet /usr/local/bin && \
48 |
49 | # ==================================================================
50 | # config & cleanup
51 | # ------------------------------------------------------------------
52 |
53 | ldconfig && \
54 | apt-get clean && \
55 | apt-get autoremove && \
56 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
57 |
--------------------------------------------------------------------------------
/docker/Dockerfile.keras-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # tensorflow latest (pip)
6 | # keras latest (pip)
7 | # ==================================================================
8 |
9 | FROM ubuntu:20.04
10 | ENV LANG C.UTF-8
11 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
12 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
13 | GIT_CLONE="git clone --depth 10" && \
14 |
15 | rm -rf /var/lib/apt/lists/* \
16 | /etc/apt/sources.list.d/cuda.list \
17 | /etc/apt/sources.list.d/nvidia-ml.list && \
18 |
19 | apt-get update && \
20 |
21 | # ==================================================================
22 | # tools
23 | # ------------------------------------------------------------------
24 |
25 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
26 | build-essential \
27 | apt-utils \
28 | ca-certificates \
29 | wget \
30 | git \
31 | vim \
32 | libssl-dev \
33 | curl \
34 | unzip \
35 | unrar \
36 | cmake \
37 | && \
38 |
39 | # ==================================================================
40 | # python
41 | # ------------------------------------------------------------------
42 |
43 | apt-get update && \
44 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
45 | python3.8 \
46 | python3.8-dev \
47 | python3.8-distutils \
48 | && \
49 | wget -O ~/get-pip.py \
50 | https://bootstrap.pypa.io/get-pip.py && \
51 | python3.8 ~/get-pip.py && \
52 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
53 | $PIP_INSTALL \
54 | numpy \
55 | scipy \
56 | pandas \
57 | scikit-image \
58 | scikit-learn \
59 | matplotlib \
60 | Cython \
61 | tqdm \
62 | && \
63 | # ==================================================================
64 | # tensorflow
65 | # ------------------------------------------------------------------
66 |
67 | $PIP_INSTALL \
68 | tensorflow \
69 | && \
70 |
71 | # ==================================================================
72 | # keras
73 | # ------------------------------------------------------------------
74 |
75 | # Now Keras comes packaged with TensorFlow 2
76 | # as tensorflow.keras. To start using Keras,
77 | # simply install TensorFlow 2.
78 |
79 | # ==================================================================
80 | # config & cleanup
81 | # ------------------------------------------------------------------
82 |
83 | ldconfig && \
84 | apt-get clean && \
85 | apt-get autoremove && \
86 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
87 |
88 | EXPOSE 6006
89 |
--------------------------------------------------------------------------------
/docker/Dockerfile.keras-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # tensorflow latest (pip)
6 | # keras latest (pip)
7 | # ==================================================================
8 |
9 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
10 | ENV LANG C.UTF-8
11 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
12 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
13 | GIT_CLONE="git clone --depth 10" && \
14 |
15 | rm -rf /var/lib/apt/lists/* \
16 | /etc/apt/sources.list.d/cuda.list \
17 | /etc/apt/sources.list.d/nvidia-ml.list && \
18 |
19 | apt-get update && \
20 |
21 | # ==================================================================
22 | # tools
23 | # ------------------------------------------------------------------
24 |
25 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
26 | build-essential \
27 | apt-utils \
28 | ca-certificates \
29 | wget \
30 | git \
31 | vim \
32 | libssl-dev \
33 | curl \
34 | unzip \
35 | unrar \
36 | cmake \
37 | && \
38 |
39 | # ==================================================================
40 | # python
41 | # ------------------------------------------------------------------
42 |
43 | apt-get update && \
44 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
45 | python3.8 \
46 | python3.8-dev \
47 | python3.8-distutils \
48 | && \
49 | wget -O ~/get-pip.py \
50 | https://bootstrap.pypa.io/get-pip.py && \
51 | python3.8 ~/get-pip.py && \
52 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
53 | $PIP_INSTALL \
54 | numpy \
55 | scipy \
56 | pandas \
57 | scikit-image \
58 | scikit-learn \
59 | matplotlib \
60 | Cython \
61 | tqdm \
62 | && \
63 | # ==================================================================
64 | # tensorflow
65 | # ------------------------------------------------------------------
66 |
67 | $PIP_INSTALL \
68 | tensorflow-gpu \
69 | && \
70 |
71 | # ==================================================================
72 | # keras
73 | # ------------------------------------------------------------------
74 |
75 | # Now Keras comes packaged with TensorFlow 2
76 | # as tensorflow.keras. To start using Keras,
77 | # simply install TensorFlow 2.
78 |
79 | # ==================================================================
80 | # config & cleanup
81 | # ------------------------------------------------------------------
82 |
83 | ldconfig && \
84 | apt-get clean && \
85 | apt-get autoremove && \
86 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
87 |
88 | EXPOSE 6006
89 |
--------------------------------------------------------------------------------
/docker/Dockerfile.mxnet-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # mxnet latest (pip)
6 | # ==================================================================
7 |
8 | FROM ubuntu:20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # mxnet
64 | # ------------------------------------------------------------------
65 |
66 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
67 | libatlas-base-dev \
68 | graphviz \
69 | && \
70 |
71 | $PIP_INSTALL \
72 | mxnet \
73 | graphviz \
74 | && \
75 |
76 | # ==================================================================
77 | # config & cleanup
78 | # ------------------------------------------------------------------
79 |
80 | ldconfig && \
81 | apt-get clean && \
82 | apt-get autoremove && \
83 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
84 |
--------------------------------------------------------------------------------
/docker/Dockerfile.mxnet-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # mxnet latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # mxnet
64 | # ------------------------------------------------------------------
65 |
66 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
67 | libatlas-base-dev \
68 | graphviz \
69 | && \
70 |
71 | $PIP_INSTALL \
72 | mxnet-cu112 \
73 | graphviz \
74 | && \
75 |
76 | # ==================================================================
77 | # config & cleanup
78 | # ------------------------------------------------------------------
79 |
80 | ldconfig && \
81 | apt-get clean && \
82 | apt-get autoremove && \
83 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
84 |
--------------------------------------------------------------------------------
/docker/Dockerfile.paddle-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # paddle latest (pip)
6 | # ==================================================================
7 |
8 | FROM ubuntu:20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # paddle
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | paddlepaddle \
68 | && \
69 |
70 | # ==================================================================
71 | # config & cleanup
72 | # ------------------------------------------------------------------
73 |
74 | ldconfig && \
75 | apt-get clean && \
76 | apt-get autoremove && \
77 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
78 |
--------------------------------------------------------------------------------
/docker/Dockerfile.paddle-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # paddle latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # paddle
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | paddlepaddle-gpu \
68 | && \
69 |
70 | # ==================================================================
71 | # config & cleanup
72 | # ------------------------------------------------------------------
73 |
74 | ldconfig && \
75 | apt-get clean && \
76 | apt-get autoremove && \
77 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
78 |
--------------------------------------------------------------------------------
/docker/Dockerfile.pytorch-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # pytorch latest (pip)
6 | # ==================================================================
7 |
8 | FROM ubuntu:20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # pytorch
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | future \
68 | numpy \
69 | protobuf \
70 | enum34 \
71 | pyyaml \
72 | typing \
73 | && \
74 | $PIP_INSTALL \
75 | --pre torch torchvision torchaudio -f \
76 | https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html \
77 | && \
78 |
79 | # ==================================================================
80 | # config & cleanup
81 | # ------------------------------------------------------------------
82 |
83 | ldconfig && \
84 | apt-get clean && \
85 | apt-get autoremove && \
86 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
87 |
--------------------------------------------------------------------------------
/docker/Dockerfile.pytorch-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # pytorch latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # pytorch
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | future \
68 | numpy \
69 | protobuf \
70 | enum34 \
71 | pyyaml \
72 | typing \
73 | && \
74 | $PIP_INSTALL \
75 | --pre torch torchvision torchaudio -f \
76 | https://download.pytorch.org/whl/nightly/cu113/torch_nightly.html \
77 | && \
78 |
79 | # ==================================================================
80 | # config & cleanup
81 | # ------------------------------------------------------------------
82 |
83 | ldconfig && \
84 | apt-get clean && \
85 | apt-get autoremove && \
86 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
87 |
--------------------------------------------------------------------------------
/docker/Dockerfile.tensorflow-py38-cpu:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # tensorflow latest (pip)
6 | # ==================================================================
7 |
8 | FROM ubuntu:20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # tensorflow
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | tensorflow \
68 | && \
69 |
70 | # ==================================================================
71 | # config & cleanup
72 | # ------------------------------------------------------------------
73 |
74 | ldconfig && \
75 | apt-get clean && \
76 | apt-get autoremove && \
77 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
78 |
79 | EXPOSE 6006
80 |
--------------------------------------------------------------------------------
/docker/Dockerfile.tensorflow-py38-cu113:
--------------------------------------------------------------------------------
1 | # ==================================================================
2 | # module list
3 | # ------------------------------------------------------------------
4 | # python 3.8 (apt)
5 | # tensorflow latest (pip)
6 | # ==================================================================
7 |
8 | FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu20.04
9 | ENV LANG C.UTF-8
10 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
11 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
12 | GIT_CLONE="git clone --depth 10" && \
13 |
14 | rm -rf /var/lib/apt/lists/* \
15 | /etc/apt/sources.list.d/cuda.list \
16 | /etc/apt/sources.list.d/nvidia-ml.list && \
17 |
18 | apt-get update && \
19 |
20 | # ==================================================================
21 | # tools
22 | # ------------------------------------------------------------------
23 |
24 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
25 | build-essential \
26 | apt-utils \
27 | ca-certificates \
28 | wget \
29 | git \
30 | vim \
31 | libssl-dev \
32 | curl \
33 | unzip \
34 | unrar \
35 | cmake \
36 | && \
37 |
38 | # ==================================================================
39 | # python
40 | # ------------------------------------------------------------------
41 |
42 | apt-get update && \
43 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
44 | python3.8 \
45 | python3.8-dev \
46 | python3.8-distutils \
47 | && \
48 | wget -O ~/get-pip.py \
49 | https://bootstrap.pypa.io/get-pip.py && \
50 | python3.8 ~/get-pip.py && \
51 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \
52 | $PIP_INSTALL \
53 | numpy \
54 | scipy \
55 | pandas \
56 | scikit-image \
57 | scikit-learn \
58 | matplotlib \
59 | Cython \
60 | tqdm \
61 | && \
62 | # ==================================================================
63 | # tensorflow
64 | # ------------------------------------------------------------------
65 |
66 | $PIP_INSTALL \
67 | tensorflow-gpu \
68 | && \
69 |
70 | # ==================================================================
71 | # config & cleanup
72 | # ------------------------------------------------------------------
73 |
74 | ldconfig && \
75 | apt-get clean && \
76 | apt-get autoremove && \
77 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
78 |
79 | EXPOSE 6006
80 |
--------------------------------------------------------------------------------
/generator/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ufoym/deepo/d8cbd2c6e063609c42e09b137f7f84f0b1015634/generator/__init__.py
--------------------------------------------------------------------------------
/generator/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ufoym/deepo/d8cbd2c6e063609c42e09b137f7f84f0b1015634/generator/core/__init__.py
--------------------------------------------------------------------------------
/generator/core/composer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import textwrap
3 | import functools
4 |
5 |
6 | class Composer(object):
7 |
8 | def __init__(self, modules, cuda_ver, cudnn_ver, ubuntu_ver, versions={}):
9 | if len(modules) == 0:
10 | raise ValueError('Modules should contain at least one module')
11 | pending = self._traverse(modules)
12 | self.modules = [m for m in self._toposort(pending)]
13 | self.instances = self._get_instances(versions)
14 | self.cuda_ver = cuda_ver
15 | self.cudnn_ver = cudnn_ver
16 | self.ubuntu_ver = ubuntu_ver
17 |
18 | def get(self):
19 | return self.modules
20 |
21 | def ver(self, module):
22 | for ins in self.instances:
23 | if ins.__class__ is module:
24 | return ins.version
25 | return None
26 |
27 | def to_dockerfile(self):
28 | def _indent(n, s):
29 | prefix = ' ' * 4 * n
30 | return ''.join(prefix + l for l in s.splitlines(True))
31 |
32 | ports = ' '.join([str(p) for m in self.instances for p in m.expose()])
33 | return textwrap.dedent(''.join([
34 | _indent(3, ''.join([
35 | self._split('module list'),
36 | ''.join('# %s\n' % repr(m)
37 | for m in self.instances if repr(m)),
38 | self._split(),
39 | ])),
40 | r'''
41 | FROM %s
42 | ENV LANG C.UTF-8
43 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \
44 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \
45 | GIT_CLONE="git clone --depth 10" && \
46 |
47 | rm -rf /var/lib/apt/lists/* \
48 | /etc/apt/sources.list.d/cuda.list \
49 | /etc/apt/sources.list.d/nvidia-ml.list && \
50 |
51 | apt-get update && \
52 | ''' % ('ubuntu:%s' % self.ubuntu_ver if self.cuda_ver is None
53 | else 'nvidia/cuda:%s%s-devel-ubuntu%s' % (
54 | self.cuda_ver,
55 | '-cudnn%s' % self.cudnn_ver if self.cudnn_ver else '',
56 | self.ubuntu_ver)),
57 | '\n',
58 | '\n'.join([
59 | ''.join([
60 | _indent(3, self._split(m.name())),
61 | _indent(1, m.build()),
62 | ]) for m in self.instances
63 | ]),
64 | '\n',
65 | _indent(3, self._split('config & cleanup')),
66 | r'''
67 | ldconfig && \
68 | apt-get clean && \
69 | apt-get autoremove && \
70 | rm -rf /var/lib/apt/lists/* /tmp/* ~/*
71 | ''',
72 | r'''
73 | EXPOSE %s
74 | ''' % ports if ports else '',
75 | ]))
76 |
77 | def _traverse(self, modules):
78 | seen = set(modules)
79 | current_level = modules
80 | while current_level:
81 | next_level = []
82 | for module in current_level:
83 | yield module
84 | for child in (dep for dep in module.deps if dep not in seen):
85 | next_level.append(child)
86 | seen.add(child)
87 | current_level = next_level
88 |
89 | def _toposort(self, pending):
90 | data = {m: set(m.deps) for m in pending}
91 | for k, v in data.items():
92 | v.discard(k)
93 | extra_items_in_deps = functools.reduce(
94 | set.union, data.values()) - set(data.keys())
95 | data.update({item: set() for item in extra_items_in_deps})
96 | while True:
97 | ordered = set(item for item, dep in data.items() if len(dep) == 0)
98 | if not ordered:
99 | break
100 | for m in sorted(ordered, key=lambda m: m.__name__):
101 | yield m
102 | data = {
103 | item: (dep - ordered)
104 | for item, dep in data.items()
105 | if item not in ordered
106 | }
107 | if len(data) != 0:
108 | raise ValueError(
109 | 'Circular dependencies exist among these items: '
110 | '{{{}}}'.format(', '.join(
111 | '{!r}:{!r}'.format(
112 | key, value) for key, value in sorted(
113 | data.items()))))
114 |
115 | def _split(self, title=None):
116 | split_l = '# ' + '=' * 66 + '\n'
117 | split_s = '# ' + '-' * 66 + '\n'
118 | s = split_l if title is None else (
119 | split_l + '# %s\n' % title + split_s)
120 | return s
121 |
122 | def _get_instances(self, versions):
123 | inses = []
124 | for m in self.modules:
125 | ins = m(self)
126 | if m in versions:
127 | ins.version = versions[m]
128 | inses.append(ins)
129 | return inses
130 |
--------------------------------------------------------------------------------
/generator/generate.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Console script for generator."""
4 | import argparse
5 | from core.composer import Composer
6 |
7 |
8 | def _import(name):
9 | mname = name.lower()
10 | cname = name.title()
11 | mod = __import__('modules.%s' % mname, fromlist=[cname])
12 | mod = getattr(mod, cname)
13 | return mod
14 |
15 |
16 | def main():
17 | """
18 | Generate a dockerfile according to the given modules to be installed.
19 | """
20 | parser = argparse.ArgumentParser(description='Composer')
21 | parser.add_argument('path')
22 | parser.add_argument('modules', nargs='*')
23 | parser.add_argument('--cuda-ver')
24 | parser.add_argument('--cudnn-ver')
25 | parser.add_argument('--ubuntu-ver', default='20.04')
26 | args = parser.parse_args()
27 |
28 | in_modules = []
29 | versions = {}
30 | for module in args.modules:
31 | terms = module.split('==')
32 | m = _import(terms[0])
33 | in_modules.append(m)
34 | if len(terms) > 1:
35 | versions[m] = terms[1]
36 | composer = Composer(in_modules, args.cuda_ver, args.cudnn_ver, args.ubuntu_ver, versions)
37 | with open(args.path, 'w') as f:
38 | f.write(composer.to_dockerfile())
39 |
40 |
41 | if __name__ == "__main__":
42 | main()
43 |
--------------------------------------------------------------------------------
/generator/modules/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ufoym/deepo/d8cbd2c6e063609c42e09b137f7f84f0b1015634/generator/modules/__init__.py
--------------------------------------------------------------------------------
/generator/modules/__module__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
4 | def parametrized(dec):
5 | def layer(*args, **kwargs):
6 | def repl(f):
7 | return dec(f, *args, **kwargs)
8 | return repl
9 | return layer
10 |
11 |
12 | @parametrized
13 | def dependency(module, *_deps):
14 | module.deps = _deps
15 | return module
16 |
17 |
18 | @parametrized
19 | def source(module, _source):
20 | module.source = _source
21 | return module
22 |
23 |
24 | @parametrized
25 | def version(module, _ver):
26 | module.version = _ver
27 | return module
28 |
29 |
30 | @dependency()
31 | @source('unknown')
32 | @version('latest')
33 | class Module(object):
34 |
35 | def __init__(self, composer):
36 | self.composer = composer
37 |
38 | def __repr__(self):
39 | return '%-13s %-6s (%s)' % (
40 | self.name(),
41 | self.version,
42 | self.source)
43 |
44 | def build(self):
45 | pass
46 |
47 | def expose(self):
48 | return []
49 |
50 | def name(self):
51 | return self.__class__.__name__.lower()
52 |
--------------------------------------------------------------------------------
/generator/modules/boost.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .tools import Tools
4 | from .python import Python
5 |
6 |
7 | @dependency(Tools, Python)
8 | @source('src')
9 | class Boost(Module):
10 |
11 | def __repr__(self):
12 | return ''
13 |
14 | # def build(self):
15 | # pyver = self.composer.ver(Python)
16 | # return r'''
17 | # DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
18 | # libboost-all-dev \
19 | # && \
20 | # ''' if pyver == '2.7' else (
21 | # r'''
22 | # wget -O ~/boost.tar.gz '''
23 | # + r'''https://dl.bintray.com/boostorg/release/1.69.0'''
24 | # + r'''/source/boost_1_69_0.tar.gz && \
25 | # tar -zxf ~/boost.tar.gz -C ~ && \
26 | # cd ~/boost_* && \
27 | # ./bootstrap.sh --with-python=python%s && \
28 | # ./b2 install -j"$(nproc)" --prefix=/usr/local && \
29 | # ''' % pyver
30 | # )
31 |
32 | def build(self):
33 | return r'''
34 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
35 | libboost-all-dev \
36 | && \
37 | '''
38 |
--------------------------------------------------------------------------------
/generator/modules/caffe.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .tools import Tools
4 | from .boost import Boost
5 | from .python import Python
6 | from .opencv import Opencv
7 |
8 |
9 | @dependency(Tools, Python, Boost, Opencv)
10 | @source('git')
11 | class Caffe(Module):
12 |
13 | def build(self):
14 | cpu_only = self.composer.cuda_ver is None
15 | return (r'''
16 | apt-get update && \
17 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
18 | caffe-%s \
19 | && \
20 | ''' % ('cpu' if cpu_only else 'cuda')
21 | ).rstrip()
22 |
--------------------------------------------------------------------------------
/generator/modules/chainer.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Chainer(Module):
9 |
10 | def build(self):
11 | return r'''
12 | $PIP_INSTALL \
13 | '''.rstrip() + (
14 | '' if self.composer.cuda_ver is None else \
15 | r'''
16 | cupy \
17 | '''.rstrip()
18 | ) + r'''
19 | chainer \
20 | && \
21 | '''
22 |
--------------------------------------------------------------------------------
/generator/modules/cntk.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .python import Python
4 | from .opencv import Opencv
5 |
6 |
7 | @dependency(Python, Opencv)
8 | @source('pip')
9 | class Cntk(Module):
10 |
11 | def build(self):
12 | return r'''
13 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
14 | openmpi-bin \
15 | libpng-dev \
16 | libjpeg-dev \
17 | libtiff-dev \
18 | && \
19 |
20 | # Fix ImportError for CNTK
21 | ln -s /usr/lib/x86_64-linux-gnu/libmpi_cxx.so.20 /usr/lib/x86_64-linux-gnu/libmpi_cxx.so.1 && \
22 | ln -s /usr/lib/x86_64-linux-gnu/libmpi.so.20.10.1 /usr/lib/x86_64-linux-gnu/libmpi.so.12 && \
23 |
24 | wget --no-verbose -O - https://github.com/01org/mkl-dnn/releases/download/v0.14/mklml_lnx_2018.0.3.20180406.tgz | tar -xzf - && \
25 | cp mklml*/* /usr/local -r && \
26 |
27 | wget --no-verbose -O - https://github.com/01org/mkl-dnn/archive/v0.14.tar.gz | tar -xzf - && \
28 | cd *-0.14 && mkdir build && cd build && \
29 | ln -s /usr/local external && \
30 | cmake -D CMAKE_BUILD_TYPE=RELEASE \
31 | -D CMAKE_INSTALL_PREFIX=/usr/local \
32 | .. && \
33 | make -j"$(nproc)" install && \
34 |
35 | $PIP_INSTALL \
36 | cntk%s \
37 | && \
38 | ''' % ('' if self.composer.cuda_ver is None else '-gpu')
39 |
--------------------------------------------------------------------------------
/generator/modules/darknet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .tools import Tools
4 |
5 |
6 | @dependency(Tools)
7 | @source('git')
8 | class Darknet(Module):
9 |
10 | def build(self):
11 | use_gpu = 1 if self.composer.cuda_ver else 0
12 |
13 | return r'''
14 | $GIT_CLONE https://github.com/AlexeyAB/darknet ~/darknet && \
15 | cd ~/darknet && \
16 | sed -i 's/GPU=0/GPU=%d/g' ~/darknet/Makefile && \
17 | sed -i 's/CUDNN=0/CUDNN=%d/g' ~/darknet/Makefile && \
18 | make -j"$(nproc)" && \
19 | cp ~/darknet/include/* /usr/local/include && \
20 | cp ~/darknet/darknet /usr/local/bin && \
21 | ''' % (use_gpu, use_gpu)
22 |
--------------------------------------------------------------------------------
/generator/modules/jupyter.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Jupyter(Module):
9 |
10 | def build(self):
11 | return r'''
12 | $PIP_INSTALL \
13 | jupyter \
14 | && \
15 | '''
16 |
17 | def expose(self):
18 | return [
19 | 8888, # expose port for jupyter
20 | ]
21 |
--------------------------------------------------------------------------------
/generator/modules/jupyterlab.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 | from .jupyter import Jupyter
5 |
6 |
7 | @dependency(Python, Jupyter)
8 | @source('pip')
9 | class Jupyterlab(Module):
10 |
11 | def build(self):
12 | return r'''
13 | $PIP_INSTALL \
14 | jupyterlab \
15 | && \
16 | '''
17 |
--------------------------------------------------------------------------------
/generator/modules/keras.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 | from .tensorflow import Tensorflow
5 |
6 |
7 | @dependency(Python, Tensorflow)
8 | @source('pip')
9 | class Keras(Module):
10 |
11 | def build(self):
12 | return r'''
13 | # Now Keras comes packaged with TensorFlow 2
14 | # as tensorflow.keras. To start using Keras,
15 | # simply install TensorFlow 2.
16 | '''
17 |
--------------------------------------------------------------------------------
/generator/modules/lasagne.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .tools import Tools
4 | from .python import Python
5 | from .theano import Theano
6 |
7 |
8 | @dependency(Tools, Python, Theano)
9 | @source('git')
10 | class Lasagne(Module):
11 |
12 | def build(self):
13 | return r'''
14 | $GIT_CLONE https://github.com/Lasagne/Lasagne ~/lasagne && \
15 | cd ~/lasagne && \
16 | $PIP_INSTALL \
17 | . && \
18 | '''
19 |
--------------------------------------------------------------------------------
/generator/modules/mxnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Mxnet(Module):
9 |
10 | def build(self):
11 | cuver = '' if self.composer.cuda_ver is None else '-cu%s' % ''.join(self.composer.cuda_ver.split('.')[:2])
12 | if cuver == '-cu113':
13 | cuver = '-cu112' # mxnet does not support cu113
14 | return r'''
15 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
16 | libatlas-base-dev \
17 | graphviz \
18 | && \
19 |
20 | $PIP_INSTALL \
21 | mxnet%s \
22 | graphviz \
23 | && \
24 | ''' % cuver
25 |
--------------------------------------------------------------------------------
/generator/modules/onnx.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Onnx(Module):
9 |
10 | def build(self):
11 | return r'''
12 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
13 | protobuf-compiler \
14 | libprotoc-dev \
15 | && \
16 |
17 | $PIP_INSTALL \
18 | numpy \
19 | protobuf \
20 | onnx \
21 | onnxruntime%s \
22 | && \
23 | ''' % ('' if self.composer.cuda_ver is None else '-gpu')
24 |
--------------------------------------------------------------------------------
/generator/modules/opencv.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .tools import Tools
4 | from .boost import Boost
5 | from .python import Python
6 |
7 |
8 | @dependency(Tools, Python, Boost)
9 | @source('git')
10 | @version('4.5.4')
11 | class Opencv(Module):
12 |
13 | def build(self):
14 | return r'''
15 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
16 | libatlas-base-dev \
17 | libgflags-dev \
18 | libgoogle-glog-dev \
19 | libhdf5-serial-dev \
20 | libleveldb-dev \
21 | liblmdb-dev \
22 | libprotobuf-dev \
23 | libsnappy-dev \
24 | protobuf-compiler \
25 | && \
26 |
27 | $GIT_CLONE --branch %s https://github.com/opencv/opencv ~/opencv && \
28 | mkdir -p ~/opencv/build && cd ~/opencv/build && \
29 | cmake -D CMAKE_BUILD_TYPE=RELEASE \
30 | -D CMAKE_INSTALL_PREFIX=/usr/local \
31 | -D WITH_IPP=OFF \
32 | -D WITH_CUDA=OFF \
33 | -D WITH_OPENCL=OFF \
34 | -D BUILD_TESTS=OFF \
35 | -D BUILD_PERF_TESTS=OFF \
36 | -D BUILD_DOCS=OFF \
37 | -D BUILD_EXAMPLES=OFF \
38 | .. && \
39 | make -j"$(nproc)" install && \
40 | ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2 && \
41 | ''' % self.version
42 |
--------------------------------------------------------------------------------
/generator/modules/paddle.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Paddle(Module):
9 |
10 | def build(self):
11 | return r'''
12 | $PIP_INSTALL \
13 | paddlepaddle%s \
14 | && \
15 | ''' % ('' if self.composer.cuda_ver is None else '-gpu')
16 |
--------------------------------------------------------------------------------
/generator/modules/python.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .tools import Tools
4 |
5 |
6 | @dependency(Tools)
7 | @version('3.8')
8 | @source('apt')
9 | class Python(Module):
10 |
11 | def __init__(self, manager, **args):
12 | super(self.__class__, self).__init__(manager, **args)
13 | if float(self.version) < 3.8:
14 | raise NotImplementedError('Only support python >= 3.8 currently.')
15 |
16 | def build(self):
17 | return (
18 | r'''
19 | apt-get update && \
20 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
21 | python%s \
22 | python%s-dev \
23 | python%s-distutils \
24 | && \
25 | wget -O ~/get-pip.py \
26 | https://bootstrap.pypa.io/get-pip.py && \
27 | python%s ~/get-pip.py && \
28 | ln -s /usr/bin/python%s /usr/local/bin/python && \
29 | ''' % tuple([self.version] * 5)
30 | ).rstrip() + (r'''
31 | $PIP_INSTALL \
32 | numpy \
33 | scipy \
34 | pandas \
35 | scikit-image \
36 | scikit-learn \
37 | matplotlib \
38 | Cython \
39 | tqdm \
40 | && \
41 | '''
42 | ).rstrip()
43 |
--------------------------------------------------------------------------------
/generator/modules/pytorch.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @source('pip')
8 | class Pytorch(Module):
9 |
10 | def build(self):
11 | cuver = 'cpu' if self.composer.cuda_ver is None else 'cu%s' % ''.join(self.composer.cuda_ver.split('.')[:2])
12 | return r'''
13 | $PIP_INSTALL \
14 | future \
15 | numpy \
16 | protobuf \
17 | enum34 \
18 | pyyaml \
19 | typing \
20 | && \
21 | $PIP_INSTALL \
22 | --pre torch torchvision torchaudio -f \
23 | https://download.pytorch.org/whl/nightly/%s/torch_nightly.html \
24 | && \
25 | ''' % cuver
26 |
--------------------------------------------------------------------------------
/generator/modules/sonnet.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .python import Python
4 | from .tensorflow import Tensorflow
5 |
6 |
7 | @dependency(Python, Tensorflow)
8 | @source('pip')
9 | class Sonnet(Module):
10 |
11 | def build(self):
12 | return r'''
13 | $PIP_INSTALL \
14 | tensorflow_probability \
15 | "dm-sonnet>=2.0.0b0" --pre \
16 | && \
17 | '''
18 |
--------------------------------------------------------------------------------
/generator/modules/tensorflow.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .python import Python
4 |
5 |
6 | @dependency(Python)
7 | @version('latest')
8 | @source('pip')
9 | class Tensorflow(Module):
10 |
11 | def __init__(self, manager, **args):
12 | super(self.__class__, self).__init__(manager, **args)
13 | if self.version not in ('1.13.1', '2.0.0a0', 'latest'):
14 | raise NotImplementedError('unsupported tensorflow version')
15 |
16 | def build(self):
17 | is_gpu = '' if self.composer.cuda_ver is None else '-gpu'
18 | tensorflow_version = ('tensorflow%s' % is_gpu) if self.version == 'latest' else ("tensorflow%s==%s" % (is_gpu, self.version))
19 | return r'''
20 | $PIP_INSTALL \
21 | %s \
22 | && \
23 | ''' % tensorflow_version
24 |
25 | def expose(self):
26 | return [
27 | 6006, # expose port for TensorBoard
28 | ]
29 |
--------------------------------------------------------------------------------
/generator/modules/theano.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source, version
3 | from .tools import Tools
4 | from .python import Python
5 |
6 |
7 | @dependency(Tools, Python)
8 | @source('git')
9 | class Theano(Module):
10 |
11 | def build(self):
12 | return r'''
13 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
14 | libblas-dev \
15 | && \
16 | ''' + (
17 | '' if self.composer.cuda_ver is None else r'''
18 | wget -qO- https://github.com/Theano/libgpuarray/archive/v0.7.6.tar.gz | tar xz -C ~ && \
19 | cd ~/libgpuarray* && mkdir -p build && cd build && \
20 | cmake -D CMAKE_BUILD_TYPE=RELEASE \
21 | -D CMAKE_INSTALL_PREFIX=/usr/local \
22 | .. && \
23 | make -j"$(nproc)" install && \
24 | cd ~/libgpuarray* && \
25 | python setup.py build && \
26 | python setup.py install && \
27 |
28 | printf '[global]\nfloatX = float32\ndevice = cuda0\n\n[dnn]\n'''
29 | + r'''include_path = /usr/local/cuda/targets'''
30 | + r'''/x86_64-linux/include\n' > ~/.theanorc && \
31 | ''') + r'''
32 | $PIP_INSTALL \
33 | https://github.com/Theano/Theano/archive/master.zip \
34 | && \
35 | '''
36 |
--------------------------------------------------------------------------------
/generator/modules/tools.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, source
3 |
4 |
5 | @source('apt')
6 | class Tools(Module):
7 |
8 | def __repr__(self):
9 | return ''
10 |
11 | def build(self):
12 | return r'''
13 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
14 | build-essential \
15 | apt-utils \
16 | ca-certificates \
17 | wget \
18 | git \
19 | vim \
20 | libssl-dev \
21 | curl \
22 | unzip \
23 | unrar \
24 | cmake \
25 | && \
26 | '''
27 |
--------------------------------------------------------------------------------
/generator/modules/torch.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from .__module__ import Module, dependency, source
3 | from .tools import Tools
4 |
5 |
6 | @dependency(Tools)
7 | @source('git')
8 | class Torch(Module):
9 |
10 | def build(self):
11 | return r'''
12 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \
13 | sudo \
14 | && \
15 |
16 | $GIT_CLONE https://github.com/nagadomi/distro.git ~/torch --recursive && \
17 | cd ~/torch && \
18 | bash install-deps && \
19 | sed -i 's/${THIS_DIR}\/install/\/usr\/local/g' ./install.sh && \
20 | ./install.sh && \
21 | '''
22 |
--------------------------------------------------------------------------------
/scripts/README.md:
--------------------------------------------------------------------------------
1 | # How to generate `docker/Dockerfile.*` and `circle.yml`
2 |
3 | #### Step 1. generate scripts for generating dockerfiles
4 |
5 | ```bash
6 | python make-gen-docker.py
7 | ```
8 |
9 | This should generate `gen-docker.sh`.
10 |
11 | #### Step 2. generate dockerfiles
12 |
13 | Run `gen-docker.sh`, which should generate `docker/Dockerfile.*`.
14 |
15 |
16 | #### Step 3. generate the configuration file for CircleCI
17 |
18 | ```bash
19 | python make-circleci.py
20 | ```
21 |
22 | This should generate `circle.yml`.
23 |
--------------------------------------------------------------------------------
/scripts/build.sh:
--------------------------------------------------------------------------------
1 | # bash clean.sh
2 | python make-gen-docker.py
3 | bash gen-docker.sh
4 | python make-ci.py
5 |
--------------------------------------------------------------------------------
/scripts/clean.sh:
--------------------------------------------------------------------------------
1 | rm gen-docker.sh
2 | rm ../docker/*
3 | rm ../circle.yml
--------------------------------------------------------------------------------
/scripts/gen-docker.sh:
--------------------------------------------------------------------------------
1 | python ../generator/generate.py ../docker/Dockerfile.tensorflow-py38-cpu tensorflow python==3.8
2 | python ../generator/generate.py ../docker/Dockerfile.mxnet-py38-cpu mxnet python==3.8
3 | python ../generator/generate.py ../docker/Dockerfile.keras-py38-cpu keras python==3.8
4 | python ../generator/generate.py ../docker/Dockerfile.pytorch-py38-cpu pytorch python==3.8
5 | python ../generator/generate.py ../docker/Dockerfile.chainer-py38-cpu chainer python==3.8
6 | python ../generator/generate.py ../docker/Dockerfile.darknet-cpu darknet
7 | python ../generator/generate.py ../docker/Dockerfile.paddle-py38-cpu paddle python==3.8
8 | python ../generator/generate.py ../docker/Dockerfile.all-py38-cpu tensorflow mxnet keras pytorch chainer darknet paddle python==3.8 onnx jupyterlab
9 | python ../generator/generate.py ../docker/Dockerfile.tensorflow-py38-cu113 tensorflow python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
10 | python ../generator/generate.py ../docker/Dockerfile.mxnet-py38-cu113 mxnet python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
11 | python ../generator/generate.py ../docker/Dockerfile.keras-py38-cu113 keras python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
12 | python ../generator/generate.py ../docker/Dockerfile.pytorch-py38-cu113 pytorch python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
13 | python ../generator/generate.py ../docker/Dockerfile.chainer-py38-cu113 chainer python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
14 | python ../generator/generate.py ../docker/Dockerfile.darknet-cu113 darknet --cuda-ver 11.3.1 --cudnn-ver 8
15 | python ../generator/generate.py ../docker/Dockerfile.paddle-py38-cu113 paddle python==3.8 --cuda-ver 11.3.1 --cudnn-ver 8
16 | python ../generator/generate.py ../docker/Dockerfile.all-py38-cu113 tensorflow mxnet keras pytorch chainer darknet paddle python==3.8 onnx jupyterlab --cuda-ver 11.3.1 --cudnn-ver 8
17 |
--------------------------------------------------------------------------------
/scripts/make-ci.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Generate dockerfiles & CI configuration."""
4 |
5 | import os
6 | import textwrap
7 |
8 |
9 | def indent(n, s):
10 | prefix = ' ' * 4 * n
11 | return ''.join(prefix + l for l in s.splitlines(True))
12 |
13 |
14 | def get_tags(postfix,
15 | default_mod='all',
16 | default_platform='cu',
17 | default_lang='py'):
18 |
19 | def is_default_mod(mod):
20 | return mod and default_mod in mod
21 | def is_default_platform(platform):
22 | return platform and default_platform in platform
23 | def is_default_lang(lang):
24 | return lang and default_lang in lang
25 |
26 | terms = postfix.split('-')
27 | if len(terms) == 2:
28 | mod, platform = terms
29 | pyver = None
30 | else:
31 | mod = '-'.join(terms[:-2])
32 | pyver, platform = terms[-2], terms[-1]
33 |
34 | tags = [postfix]
35 | if is_default_platform(platform):
36 | tags.append('-'.join(filter(None, (mod, pyver))))
37 | if is_default_lang(pyver):
38 | tags.append('-'.join(filter(None, (mod, platform))))
39 | if is_default_mod(mod):
40 | tags.append('-'.join(filter(None, (pyver, platform))))
41 | if is_default_platform(platform) and is_default_lang(pyver):
42 | tags.append(mod)
43 | if is_default_mod(mod) and is_default_lang(pyver):
44 | tags.append(platform)
45 | if is_default_mod(mod) and is_default_platform(platform):
46 | tags.append(pyver)
47 | if is_default_platform(platform):
48 | tags.append('latest')
49 |
50 | if mod == 'all':
51 | for t in list(tags):
52 | t = t.replace('all', 'all-jupyter')
53 | if t not in tags:
54 | tags.append(t)
55 |
56 | # for t in list(tags):
57 | # if 'latest' not in t:
58 | # tags.append('%s-ver%s' % (t, datetime.datetime.now().strftime('%y%m%d')))
59 |
60 | return tags
61 |
62 |
63 | def get_job(tags):
64 | job_name = '_'.join(tags)[:99]
65 | build_scripts = indent(1, textwrap.dedent('''
66 | %s:
67 | runs-on: ubuntu-latest
68 | steps:
69 | - uses: actions/checkout@master
70 | - name: Free disk space
71 | run: |
72 | df -h
73 | sudo apt-get autoremove -y
74 | sudo apt-get clean
75 | sudo swapoff -a
76 | sudo rm -f /swapfile
77 | docker rmi $(docker image ls -aq)
78 | df -h
79 | - name: Build docker image
80 | run: docker build %s -f docker/Dockerfile.%s .
81 | - name: Deploy docker image
82 | run: |
83 | docker login -u ${{secrets.DOCKER_USER}} -p ${{secrets.DOCKER_PASS}}
84 | ''' % (
85 | job_name,
86 | ' '.join('-t ${{secrets.DOCKER_REPO}}:%s' % tag for tag in tags),
87 | tags[0])))
88 | is_all = False
89 | is_cpu = False
90 | for tag in tags:
91 | build_scripts += indent(4, 'docker push ${{secrets.DOCKER_REPO}}:%s\n' % tag)
92 | if 'all' in tag:
93 | is_all = True
94 | if 'cpu' in tag:
95 | is_cpu = True
96 | if is_all and is_cpu:
97 | test_scripts = textwrap.dedent('''
98 | import tensorflow as m; print(m.__name__, ':', m.__version__);
99 | import mxnet as m; print(m.__name__, ':', m.__version__);
100 | from tensorflow import keras as m; print(m.__name__, ':', m.__version__);
101 | import torch as m; print(m.__name__, ':', m.__version__);
102 | import chainer as m; print(m.__name__, ':', m.__version__);
103 | import paddle as m; print(m.__name__, ':', m.__version__);
104 | ''').replace('\n', '')
105 | run_prefix = '- run: docker run ${{secrets.DOCKER_REPO}}:%s ' % tags[0]
106 | build_scripts += indent(3, textwrap.dedent('''
107 | %s python -c "%s"
108 | %s darknet
109 | ''' % (run_prefix, test_scripts, run_prefix)))
110 |
111 | build_scripts += '\n'
112 | return job_name, build_scripts
113 |
114 |
115 | def write(f, scripts):
116 | for line in scripts.splitlines():
117 | f.write(line.rstrip())
118 | f.write('\n')
119 |
120 |
121 | def generate(ci_fname):
122 | with open(ci_fname, 'w') as f:
123 | write(f, textwrap.dedent('''
124 | name: deepo CI
125 | on: [push]
126 | jobs:
127 | ''')[1:])
128 |
129 | job_names = []
130 | for fn in os.listdir(os.path.join('..', 'docker')):
131 | postfix = fn.split('.')[-1]
132 | tags = get_tags(postfix)
133 | job_name, build_scripts = get_job(tags)
134 | job_names.append(job_name)
135 |
136 | with open(ci_fname, 'a') as f:
137 | write(f, build_scripts)
138 |
139 |
140 | if __name__ == '__main__':
141 | generate('../.github/workflows/dockerimage.yml')
142 |
--------------------------------------------------------------------------------
/scripts/make-gen-docker.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """Generate scripts for generating dockerfiles."""
4 |
5 | candidate_modules = [
6 | 'tensorflow',
7 | 'mxnet',
8 | 'keras',
9 | 'pytorch',
10 | 'chainer',
11 | 'darknet',
12 | 'paddle',
13 | # 'caffe',
14 | # 'theano',
15 | # 'lasagne',
16 | # 'cntk',
17 | # 'sonnet',
18 | # 'torch',
19 | ]
20 |
21 | non_python_modules = [
22 | 'torch',
23 | 'darknet',
24 | ]
25 |
26 | deprecated_modules = {
27 | 'torch': ('10.2', '7'),
28 | }
29 |
30 | pyvers = [
31 | # '2.7',
32 | # '3.6',
33 | '3.8',
34 | ]
35 |
36 |
37 | def get_command(modules, postfix, cuda_ver, cudnn_ver):
38 | cuver = 'cpu' if cuda_ver is None else 'cu%s' % ''.join(cuda_ver.split('.')[:2])
39 | postfix += '-%s' % cuver
40 | return 'python ../generator/generate.py ../docker/Dockerfile.%s %s%s%s\n' % (
41 | postfix,
42 | ' '.join(m for m in modules),
43 | '' if cuda_ver is None else ' --cuda-ver %s' % cuda_ver,
44 | '' if cudnn_ver is None else ' --cudnn-ver %s' % cudnn_ver,
45 | )
46 |
47 |
48 | def generate(f, cuda_ver=None, cudnn_ver=None):
49 |
50 | # single module
51 | for module in candidate_modules:
52 | _cuda_ver, _cudnn_ver = cuda_ver, cudnn_ver
53 | if None not in (cuda_ver, cudnn_ver): # with cuda
54 | if module in deprecated_modules:
55 | _cuda_ver, _cudnn_ver = deprecated_modules[module]
56 | if module in non_python_modules:
57 | modules = [module]
58 | f.write(get_command(modules, module, _cuda_ver, _cudnn_ver))
59 | else:
60 | for pyver in pyvers:
61 | modules = [module, 'python==%s' % pyver]
62 | postfix = '%s-py%s' % (
63 | module, pyver.replace('.', ''))
64 | f.write(get_command(modules, postfix, _cuda_ver, _cudnn_ver))
65 |
66 | # all modules
67 | for pyver in pyvers:
68 | modules = list(candidate_modules)
69 | if None not in (cuda_ver, cudnn_ver): # with cuda
70 | modules = [m for m in modules if m not in deprecated_modules]
71 | modules += ['python==%s' % pyver, 'onnx', 'jupyterlab']
72 | postfix = 'all-py%s' % pyver.replace('.', '')
73 | f.write(get_command(modules, postfix, cuda_ver, cudnn_ver))
74 |
75 |
76 | if __name__ == '__main__':
77 | with open('gen-docker.sh', 'w') as f:
78 | generate(f)
79 | # generate(f, '8.0', '6')
80 | # generate(f, '9.0', '7')
81 | # generate(f, '10.1', '7')
82 | # generate(f, '10.2', '7')
83 | # generate(f, '11.1', '8')
84 | generate(f, '11.3.1', '8')
85 |
--------------------------------------------------------------------------------