├── .editorconfig ├── .gitignore ├── LICENSE ├── README.md ├── scripts ├── .ipynb_checkpoints │ ├── build_all-checkpoint.py │ └── build_config-checkpoint.yml ├── build_all.py └── build_config.yml └── tensorflow ├── centos-6.6 ├── Dockerfile ├── build.sh ├── build2.sh ├── cuda.sh └── docker-compose.yml ├── centos-7.4 ├── Dockerfile ├── build.sh ├── build2.sh ├── cuda.sh └── docker-compose.yml ├── ubuntu-16.04 ├── Dockerfile ├── build.sh ├── cuda.sh └── docker-compose.yml └── ubuntu-18.10 ├── Dockerfile ├── build.sh ├── cuda.sh └── docker-compose.yml /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | indent_style = space 8 | indent_size = 4 9 | 10 | [*.yml] 11 | indent_size = 2 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | wheels/ 2 | cudnn/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Hadrien Mary 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Compile Tensorflow on Docker 2 | 3 | Docker images to compile TensorFlow yourself. 4 | 5 | Tensorflow only provide a limited set of build and it can be challenging to compile yourself on certain configuration. With this `Dockerfile`, you should be able to compile TensorFlow on any Linux platform that run Docker. 6 | 7 | Compilation images are provided for Ubuntu 18.10, Ubuntu 16.04, CentOS 7.4 and CentOS 6.6. 8 | 9 | ## Requirements 10 | 11 | - `docker` 12 | - `docker-compose` 13 | 14 | ## Usage 15 | 16 | - Clone this repository: 17 | 18 | ```bash 19 | git clone https://github.com/hadim/docker-tensorflow-builder.git 20 | ``` 21 | 22 | ### TensoFlow CPU 23 | 24 | - Edit the `build.sh` file to modify TensorFlow compilation parameters. Then launch the build: 25 | 26 | ```bash 27 | LINUX_DISTRO="ubuntu-16.04" 28 | # or LINUX_DISTRO="ubuntu-18.10" 29 | # or LINUX_DISTRO="centos-7.4" 30 | # or LINUX_DISTRO="centos-6.6" 31 | cd "tensorflow/$LINUX_DISTRO" 32 | 33 | # Set env variables 34 | export PYTHON_VERSION=3.6 35 | export TF_VERSION_GIT_TAG=v1.13.1 36 | export BAZEL_VERSION=0.19 37 | export USE_GPU=0 38 | 39 | # Build the Docker image 40 | docker-compose build 41 | 42 | # Start the compilation 43 | docker-compose run tf 44 | 45 | # You can also do: 46 | # docker-compose run tf bash 47 | # bash build.sh 48 | ``` 49 | 50 | ### TensorFlow GPU 51 | 52 | - Edit the `build.sh` file to modify TensorFlow compilation parameters. Then launch the build: 53 | 54 | ```bash 55 | LINUX_DISTRO="ubuntu-16.04" 56 | # or LINUX_DISTRO="ubuntu-18.10" 57 | # or LINUX_DISTRO="centos-7.4" 58 | # or LINUX_DISTRO="centos-6.6" 59 | cd "tensorflow/$LINUX_DISTRO" 60 | 61 | # Set env variables 62 | export PYTHON_VERSION=3.6 63 | export TF_VERSION_GIT_TAG=v1.13.1 64 | export BAZEL_VERSION=0.19 65 | export USE_GPU=1 66 | export CUDA_VERSION=10.0 67 | export CUDNN_VERSION=7.5 68 | export NCCL_VERSION=2.4 69 | 70 | # Build the Docker image 71 | docker-compose build 72 | 73 | # Start the compilation 74 | docker-compose run tf 75 | 76 | # You can also do: 77 | # docker-compose run tf bash 78 | # bash build.sh 79 | ``` 80 | 81 | --- 82 | 83 | - Refer to [tested build configurations](https://www.tensorflow.org/install/source#tested_build_configurations) to know which `BAZEL_VERSION` you need. 84 | - Be patient, the compilation can be long. 85 | - Enjoy your Python wheels in the `wheels/` folder. 86 | - *Don't forget to remove the container to free the space after the build: `docker-compose rm --force`.* 87 | 88 | ## Builds 89 | 90 | | Tensorflow | Python | Distribution | Bazel | CUDA | cuDNN | NCCL | Comment | 91 | | --- | --- | --- | --- | --- | --- | --- | --- | 92 | | v2.0.0-alpha0 | 3.6 | Ubuntu 18.10 | 0.20 | 10.0 | 7.5 | 2.4 | seg fault error | 93 | | v2.0.0-alpha0 | 3.6 | Ubuntu 18.10 | 0.20 | - | - | - | OK | 94 | | v2.0.0-alpha0 | 3.6 | Ubuntu 16.04 | 0.20 | 10.0 | 7.5 | 2.4 | TODO | 95 | | v2.0.0-alpha0 | 3.6 | Ubuntu 16.04 | 0.20 | - | - | - | TODO | 96 | | 1.9.0 | 3.6 | Ubuntu 16.04 | - | - | 0.19 | - | OK | 97 | | 1.9.0 | 3.6 | Ubuntu 16.04 | 9.0 | 0.19 | 7.1 | - | OK | 98 | | 1.9.0 | 3.6 | Ubuntu 16.04 | 9.1 | 0.19 | 7.1 | - | OK | 99 | | 1.9.0 | 3.6 | Ubuntu 16.04 | 9.2 | 0.19 | 7.1 | - | OK | 100 | | 1.9.0 | 3.6 | CentOS 6.6 | - | - | 0.19 | - | OK | 101 | | 1.9.0 | 3.6 | CentOS 6.6 | 9.0 | 0.19 | 7.1 | - | OK | 102 | | 1.9.0 | 3.6 | CentOS 6.6 | 9.1 | 0.19 | 7.1 | - | OK | 103 | | 1.9.0 | 3.6 | CentOS 6.6 | 9.2 | 0.19 | 7.1 | - | OK | 104 | 105 | ## Authors 106 | 107 | - Hadrien Mary 108 | 109 | ## License 110 | 111 | MIT License. See [LICENSE](LICENSE). 112 | -------------------------------------------------------------------------------- /scripts/.ipynb_checkpoints/build_all-checkpoint.py: -------------------------------------------------------------------------------- 1 | # This is a work-in-progress. 2 | # The idea is to generate a combination of builds 3 | # with different parameters. 4 | 5 | from pathlib import Path 6 | import yaml 7 | import itertools 8 | 9 | base_dir = Path('../') 10 | config_path = Path('build_config.yml') 11 | 12 | config = yaml.load(open(config_path)) 13 | matrix = config['matrix'] 14 | excluded = config['exclude'] 15 | 16 | # List all possible combinations 17 | matrix_keys = sorted(matrix) 18 | temp_builds = list(itertools.product(*(matrix[key] for key in matrix_keys))) 19 | 20 | # Exclude unwanted builds. 21 | builds = [] 22 | for temp_build in temp_builds: 23 | build = dict(zip(matrix_keys, temp_build)) 24 | 25 | do_exclude = False 26 | for exclude_build in excluded: 27 | exclude_count = 0 28 | for k, v in exclude_build.items(): 29 | if build[k] == v: 30 | exclude_count += 1 31 | if exclude_count == len(exclude_build): 32 | do_exclude = True 33 | 34 | if not do_exclude: 35 | builds.append(build) 36 | 37 | print(builds) -------------------------------------------------------------------------------- /scripts/.ipynb_checkpoints/build_config-checkpoint.yml: -------------------------------------------------------------------------------- 1 | matrix: 2 | python_version: 3 | - 3.6 4 | - 3.7 5 | tf_version: 6 | - v1.13.1 7 | - v2.0.0-alpha0 8 | use_gpu: 9 | - True 10 | - False 11 | cuda_version: 12 | - 9.2 13 | - 10.0 14 | cudnn_version: 15 | - 7.1 16 | - 7.5 17 | exclude: 18 | - tf_version: v1.13.1 19 | cuda_version: 10.0 20 | - cudnn_version: 7.1 21 | cuda_version: 10.0 -------------------------------------------------------------------------------- /scripts/build_all.py: -------------------------------------------------------------------------------- 1 | # This is a work-in-progress. 2 | # The idea is to generate a combination of builds 3 | # with different parameters. 4 | 5 | from pathlib import Path 6 | import yaml 7 | import itertools 8 | 9 | base_dir = Path('../') 10 | config_path = Path('build_config.yml') 11 | 12 | config = yaml.load(open(config_path)) 13 | matrix = config['matrix'] 14 | excluded = config['exclude'] 15 | 16 | # List all possible combinations 17 | matrix_keys = sorted(matrix) 18 | temp_builds = list(itertools.product(*(matrix[key] for key in matrix_keys))) 19 | 20 | # Exclude unwanted builds. 21 | builds = [] 22 | for temp_build in temp_builds: 23 | build = dict(zip(matrix_keys, temp_build)) 24 | 25 | do_exclude = False 26 | for exclude_build in excluded: 27 | exclude_count = 0 28 | for k, v in exclude_build.items(): 29 | if build[k] == v: 30 | exclude_count += 1 31 | if exclude_count == len(exclude_build): 32 | do_exclude = True 33 | 34 | if not do_exclude: 35 | builds.append(build) 36 | 37 | print(builds) 38 | -------------------------------------------------------------------------------- /scripts/build_config.yml: -------------------------------------------------------------------------------- 1 | matrix: 2 | python_version: 3 | - 3.6 4 | - 3.7 5 | tf_version: 6 | - v1.13.1 7 | - v2.0.0-alpha0 8 | use_gpu: 9 | - True 10 | - False 11 | cuda_version: 12 | - 9.2 13 | - 10.0 14 | cudnn_version: 15 | - 7.1 16 | - 7.5 17 | exclude: 18 | - tf_version: v1.13.1 19 | cuda_version: 10.0 20 | - cudnn_version: 7.1 21 | cuda_version: 10.0 -------------------------------------------------------------------------------- /tensorflow/centos-6.6/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:6.6 2 | 3 | RUN yum update -y && \ 4 | yum install -y \ 5 | curl \ 6 | git \ 7 | wget \ 8 | tar \ 9 | bzip2 \ 10 | patch \ 11 | gcc \ 12 | gcc-c++ \ 13 | which && \ 14 | yum -y install centos-release-scl && \ 15 | yum -y install devtoolset-6-gcc devtoolset-6-gcc-c++ && \ 16 | yum clean all 17 | 18 | # Install Anaconda 19 | WORKDIR / 20 | RUN wget "https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh" -O "miniconda.sh" && \ 21 | bash "miniconda.sh" -b -p "/conda" && \ 22 | rm -f miniconda.sh && \ 23 | echo PATH='/conda/bin:$PATH' >> /root/.bashrc && \ 24 | /conda/bin/conda config --add channels conda-forge && \ 25 | /conda/bin/conda update --yes -n base conda && \ 26 | /conda/bin/conda update --all --yes 27 | 28 | COPY build.sh /build.sh 29 | COPY build2.sh /build2.sh 30 | COPY cuda.sh /cuda.sh 31 | 32 | CMD bash build.sh 33 | -------------------------------------------------------------------------------- /tensorflow/centos-6.6/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source /root/.bashrc 5 | 6 | if [ "$USE_GPU" -eq "1" ]; then 7 | export CUDA_HOME="/usr/local/cuda" 8 | alias sudo="" 9 | source cuda.sh 10 | cuda.install $CUDA_VERSION $CUDNN_VERSION $NCCL_VERSION 11 | cd / 12 | fi 13 | 14 | # Enable GCC 6 15 | chmod +x /build2.sh 16 | scl enable devtoolset-6 ./build2.sh 17 | -------------------------------------------------------------------------------- /tensorflow/centos-6.6/build2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | gcc --version 5 | 6 | # Install an appropriate Python environment 7 | conda config --add channels conda-forge 8 | conda create --yes -n tensorflow python==$PYTHON_VERSION 9 | source activate tensorflow 10 | conda install --yes numpy wheel bazel==$BAZEL_VERSION 11 | conda install --yes git 12 | pip install keras-applications keras-preprocessing 13 | 14 | # Compile TensorFlow 15 | 16 | # Here you can change the TensorFlow version you want to build. 17 | # You can also tweak the optimizations and various parameters for the build compilation. 18 | # See https://www.tensorflow.org/install/install_sources for more details. 19 | 20 | cd / 21 | rm -fr tensorflow/ 22 | git clone --depth 1 --branch $TF_VERSION_GIT_TAG "https://github.com/tensorflow/tensorflow.git" 23 | 24 | TF_ROOT=/tensorflow 25 | cd $TF_ROOT 26 | 27 | # Python path options 28 | export PYTHON_BIN_PATH=$(which python) 29 | export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" 30 | export PYTHONPATH=${TF_ROOT}/lib 31 | export PYTHON_ARG=${TF_ROOT}/lib 32 | 33 | # Compilation parameters 34 | export TF_NEED_CUDA=0 35 | export TF_NEED_GCP=1 36 | export TF_CUDA_COMPUTE_CAPABILITIES=5.2,3.5 37 | export TF_NEED_HDFS=1 38 | export TF_NEED_OPENCL=0 39 | export TF_NEED_JEMALLOC=0 # Need to be disabled on CentOS 6.6 40 | export TF_ENABLE_XLA=0 41 | export TF_NEED_VERBS=0 42 | export TF_CUDA_CLANG=0 43 | export TF_DOWNLOAD_CLANG=0 44 | export TF_NEED_MKL=0 45 | export TF_DOWNLOAD_MKL=0 46 | export TF_NEED_MPI=0 47 | export TF_NEED_S3=1 48 | export TF_NEED_KAFKA=1 49 | export TF_NEED_GDR=0 50 | export TF_NEED_OPENCL_SYCL=0 51 | export TF_SET_ANDROID_WORKSPACE=0 52 | export TF_NEED_AWS=0 53 | export TF_NEED_IGNITE=0 54 | export TF_NEED_ROCM=0 55 | 56 | # Compiler options 57 | export GCC_HOST_COMPILER_PATH=$(which gcc) 58 | export LDFLAGS="-lm -lrt" 59 | 60 | # Here you can edit this variable to set any optimizations you want. 61 | export CC_OPT_FLAGS="-march=native" 62 | 63 | if [ "$USE_GPU" -eq "1" ]; then 64 | # Cuda parameters 65 | export CUDA_TOOLKIT_PATH=/usr/local/cuda 66 | export CUDNN_INSTALL_PATH=/usr/local/cuda 67 | export TF_CUDA_VERSION="$CUDA_VERSION" 68 | export TF_CUDNN_VERSION="$CUDNN_VERSION" 69 | export TF_NEED_CUDA=1 70 | export TF_NEED_TENSORRT=0 71 | export TF_NCCL_VERSION=1.3 72 | 73 | # Those two lines are important for the linking step. 74 | export LD_LIBRARY_PATH="$CUDA_TOOLKIT_PATH/lib64:${LD_LIBRARY_PATH}" 75 | ldconfig 76 | fi 77 | 78 | # Compilation 79 | ./configure 80 | 81 | if [ "$USE_GPU" -eq "1" ]; then 82 | 83 | bazel build --config=opt \ 84 | --config=cuda \ 85 | --linkopt="-lrt" \ 86 | --linkopt="-lm" \ 87 | --host_linkopt="-lrt" \ 88 | --host_linkopt="-lm" \ 89 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 90 | //tensorflow/tools/pip_package:build_pip_package 91 | 92 | PACKAGE_NAME=tensorflow-gpu 93 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}-cuda${TF_CUDA_VERSION}-cudnn${TF_CUDNN_VERSION}" 94 | else 95 | 96 | bazel build --config=opt \ 97 | --linkopt="-lrt" \ 98 | --linkopt="-lm" \ 99 | --host_linkopt="-lrt" \ 100 | --host_linkopt="-lm" \ 101 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 102 | //tensorflow/tools/pip_package:build_pip_package 103 | 104 | PACKAGE_NAME=tensorflow 105 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}" 106 | fi 107 | 108 | mkdir -p "/wheels/$SUBFOLDER_NAME" 109 | 110 | # Project name can only be set for TF > 1.8 111 | bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" --project_name "$PACKAGE_NAME" 112 | 113 | # Use the following for TF <= 1.8 114 | #bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" 115 | 116 | # Fix wheel folder permissions 117 | chmod -R 777 /wheels/ 118 | -------------------------------------------------------------------------------- /tensorflow/centos-6.6/cuda.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cuda and friends installation done right. 4 | # Switch default Cuda version using symbolic link: cuda.switch 9.2 5 | # Install Cuda: cuda.install.cuda 10.0 6 | # Install cuDNN to CUDA_HOME: cuda.install.cudnn 7.5 7 | # Install NCCL to CUDA_HOME: cuda.install.nccl 2.4 8 | # Install Cuda, cuDNN and NCCL: cuda.install 10.0 7.5 2.4 9 | 10 | # Author: Hadrien Mary 11 | # License: MIT License 12 | # Date: 11/03/2019 13 | 14 | is_cuda_home_set() { 15 | if [ -z "$CUDA_HOME" ]; then 16 | echo "CUDA_HOME is not set. Please set it:" 17 | echo 'export CUDA_HOME="/usr/local/cuda/"' 18 | return 1 19 | fi 20 | return 0 21 | } 22 | 23 | is_cuda_home_symbolic_link() { 24 | if [[ -e "${CUDA_HOME}" && -L "${CUDA_HOME}" ]]; then 25 | return 0 26 | elif [[ ! -d "${CUDA_HOME}" && ! -f "${CUDA_HOME}" ]]; then 27 | return 0 28 | else 29 | echo "CUDA_HOME is not a symbolic link." 30 | echo "Please make it a symbolic link." 31 | return 1 32 | fi 33 | } 34 | 35 | guess_cuda_version() { 36 | if ! is_cuda_home_set; then 37 | return 1 38 | fi 39 | 40 | if ! is_cuda_home_symbolic_link; then 41 | return 1 42 | fi 43 | 44 | POSSIBLE_CUDA_VERSION=$(cat "$CUDA_HOME/version.txt" | cut -d' ' -f 3 | cut -d'.' -f 1-2) 45 | echo $POSSIBLE_CUDA_VERSION 46 | } 47 | 48 | cuda.see() { 49 | if ! is_cuda_home_set; then 50 | return 1 51 | fi 52 | 53 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 54 | ls -l $PARENT_BASE_DIR 55 | return 0 56 | } 57 | 58 | cuda.switch() { 59 | if ! is_cuda_home_set; then 60 | return 1 61 | fi 62 | 63 | if ! is_cuda_home_symbolic_link; then 64 | return 1 65 | fi 66 | 67 | if [ -z "$1" ]; then 68 | echo "Please specify a Cuda version." 69 | echo "Usage: cuda.switch CUDA_VERSION" 70 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 10.1" 71 | return 1 72 | fi 73 | 74 | NEW_CUDA_VERSION="$1" 75 | NEW_CUDA_HOME="$CUDA_HOME-$NEW_CUDA_VERSION" 76 | 77 | if [ ! -d $NEW_CUDA_HOME ]; then 78 | echo "Cuda $NEW_CUDA_VERSION doesn't exist at $NEW_CUDA_HOME." 79 | return 1 80 | fi 81 | 82 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 83 | if [ ! -w "$PARENT_BASE_DIR" ]; then 84 | sudo rm -f $CUDA_HOME 85 | sudo ln -s $NEW_CUDA_HOME $CUDA_HOME 86 | else 87 | rm -f $CUDA_HOME 88 | ln -s $NEW_CUDA_HOME $CUDA_HOME 89 | fi 90 | echo "Default Cuda version is now $NEW_CUDA_VERSION at $NEW_CUDA_HOME" 91 | } 92 | 93 | cuda.install() { 94 | cuda.install.cuda $1 95 | cuda.install.cudnn $2 96 | cuda.install.nccl $3 97 | } 98 | 99 | cuda.install.cuda() { 100 | 101 | CUDA_VERSION="$1" 102 | if [ -z "$CUDA_VERSION" ]; then 103 | echo "Please specify a Cuda version." 104 | echo "Usage: cuda.install.cuda CUDA_VERSION" 105 | echo "Example: cuda.install.cuda 10.0" 106 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 9.2." 107 | return 1 108 | fi 109 | 110 | if ! is_cuda_home_set; then 111 | return 1 112 | fi 113 | 114 | if ! is_cuda_home_symbolic_link; then 115 | return 1 116 | fi 117 | 118 | CUDA_PATH="$CUDA_HOME-$CUDA_VERSION" 119 | if [ -d $CUDA_PATH ]; then 120 | echo "$CUDA_PATH exists. Please remove the previous Cuda folder first." 121 | return 1 122 | fi 123 | 124 | # Setup Cuda URL 125 | if [ "$CUDA_VERSION" = "9.0" ]; then 126 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run" 127 | elif [ "$CUDA_VERSION" = "9.1" ]; then 128 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux" 129 | elif [ "$CUDA_VERSION" = "9.2" ]; then 130 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux" 131 | elif [ "$CUDA_VERSION" = "10.0" ]; then 132 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux" 133 | elif [ "$CUDA_VERSION" = "10.1" ]; then 134 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.105_418.39_linux.run" 135 | else 136 | echo "Error: You need to set CUDA_VERSION to 9.0, 9.1, 9.2, 10.0 or 10.1." 137 | return 1 138 | fi 139 | 140 | CUDA_INSTALLER_PATH="/tmp/cuda.run" 141 | 142 | echo "Download Cuda $CUDA_VERSION." 143 | wget "$CUDA_URL" -O "$CUDA_INSTALLER_PATH" 144 | 145 | echo "Install Cuda $CUDA_VERSION." 146 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 147 | if [ ! -w "$PARENT_BASE_DIR" ]; then 148 | sudo bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 149 | else 150 | bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 151 | fi 152 | rm -f "$CUDA_INSTALLER_PATH" 153 | 154 | # Set the symbolic link. 155 | cuda.switch $CUDA_VERSION 156 | 157 | echo "Cuda $CUDA_VERSION is installed at $CUDA_PATH." 158 | 159 | return 0 160 | } 161 | 162 | cuda.install.cudnn() { 163 | # Install cuDNN in $CUDA_HOME 164 | 165 | if ! is_cuda_home_set; then 166 | return 1 167 | fi 168 | 169 | if ! is_cuda_home_symbolic_link; then 170 | return 1 171 | fi 172 | 173 | CUDA_VERSION="$(guess_cuda_version)" 174 | if [ -z "$CUDA_VERSION" ]; then 175 | echo "Can't guess the Cuda version from $CUDA_HOME." 176 | return 1 177 | fi 178 | 179 | CUDNN_VERSION="$1" 180 | if [ -z "$CUDNN_VERSION" ]; then 181 | echo "Please specify a cuDNN version." 182 | echo "Usage: cuda.install.cudnn CUDNN_VERSION" 183 | echo "Example: cuda.install.cudnn 7.5" 184 | echo "cuDNN version available: 7.0, 7.1, 7.4, 7.5." 185 | return 1 186 | fi 187 | 188 | # cuDNN 7.0 189 | if [ "$CUDNN_VERSION" = "7.0" ]; then 190 | 191 | if [ "$CUDA_VERSION" = "9.0" ]; then 192 | CUDNN_VERSION_DETAILED="7.0.5.15" 193 | elif [ "$CUDA_VERSION" = "9.1" ]; then 194 | CUDNN_VERSION_DETAILED="7.0.5.15" 195 | elif [ -n "$CUDNN_VERSION" ]; then 196 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 197 | return 1 198 | fi 199 | 200 | # cuDNN 7.1 201 | elif [ "$CUDNN_VERSION" = "7.1" ]; then 202 | 203 | if [ "$CUDA_VERSION" = "9.0" ]; then 204 | CUDNN_VERSION_DETAILED="7.1.4.18" 205 | elif [ "$CUDA_VERSION" = "9.2" ]; then 206 | CUDNN_VERSION_DETAILED="7.1.4.18" 207 | elif [ -n "$CUDNN_VERSION" ]; then 208 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 209 | return 1 210 | fi 211 | 212 | # cuDNN 7.4 213 | elif [ "$CUDNN_VERSION" = "7.4" ]; then 214 | 215 | if [ "$CUDA_VERSION" = "9.0" ]; then 216 | CUDNN_VERSION_DETAILED="7.4.2.24" 217 | elif [ "$CUDA_VERSION" = "9.2" ]; then 218 | CUDNN_VERSION_DETAILED="7.4.2.24" 219 | elif [ "$CUDA_VERSION" = "10.0" ]; then 220 | CUDNN_VERSION_DETAILED="7.4.2.24" 221 | elif [ -n "$CUDNN_VERSION" ]; then 222 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 223 | return 1 224 | fi 225 | 226 | # cuDNN 7.5 227 | elif [ "$CUDNN_VERSION" = "7.5" ]; then 228 | 229 | if [ "$CUDA_VERSION" = "9.0" ]; then 230 | CUDNN_VERSION_DETAILED="7.5.0.56" 231 | elif [ "$CUDA_VERSION" = "9.2" ]; then 232 | CUDNN_VERSION_DETAILED="7.5.0.56" 233 | elif [ "$CUDA_VERSION" = "10.0" ]; then 234 | CUDNN_VERSION_DETAILED="7.5.0.56" 235 | elif [ "$CUDA_VERSION" = "10.1" ]; then 236 | CUDNN_VERSION_DETAILED="7.5.0.56" 237 | elif [ -n "$CUDNN_VERSION" ]; then 238 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 239 | return 1 240 | fi 241 | 242 | elif [ -n "$CUDNN_VERSION" ]; then 243 | echo "Error: You need to set CUDNN_VERSION to 7.0, 7.1, 7.4 or 7.5." 244 | return 1 245 | fi 246 | 247 | # Setup URLs 248 | CUDNN_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 249 | CUDNN_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7-dev_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 250 | 251 | # Setup temporary paths 252 | CUDNN_TMP_PATH="/tmp/cudnn.deb" 253 | CUDNN_DEV_TMP_PATH="/tmp/cudnn-dev.deb" 254 | 255 | CUDNN_TMP_DIR_PATH="/tmp/cudnn" 256 | CUDNN_DEV_TMP_DIR_PATH="/tmp/cudnn-dev" 257 | 258 | echo "Download binaries." 259 | wget "$CUDNN_URL" -O "$CUDNN_TMP_PATH" 260 | wget "$CUDNN_URL_DEV" -O "$CUDNN_DEV_TMP_PATH" 261 | 262 | mkdir -p "$CUDNN_TMP_DIR_PATH" 263 | mkdir -p "$CUDNN_DEV_TMP_DIR_PATH" 264 | 265 | echo "Extract binaries." 266 | cd "$CUDNN_TMP_DIR_PATH" 267 | ar x "$CUDNN_TMP_PATH" 268 | tar -xJf data.tar.xz 269 | cd "$CUDNN_DEV_TMP_DIR_PATH" 270 | ar x "$CUDNN_DEV_TMP_PATH" 271 | tar -xJf data.tar.xz 272 | 273 | echo "Install cuDNN files." 274 | 275 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 276 | if [ ! -w "$PARENT_BASE_DIR" ]; then 277 | sudo mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 278 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 279 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 280 | 281 | sudo rm -f "$CUDA_HOME/include/cudnn.h" 282 | sudo rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 283 | 284 | sudo ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 285 | sudo ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 286 | else 287 | mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 288 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 289 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 290 | 291 | rm -f "$CUDA_HOME/include/cudnn.h" 292 | rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 293 | 294 | ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 295 | ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 296 | fi 297 | 298 | echo "Cleanup files." 299 | rm -fr "$CUDNN_TMP_DIR_PATH" 300 | rm -fr "$CUDNN_DEV_TMP_DIR_PATH" 301 | rm -f "$CUDNN_TMP_PATH" 302 | rm -f "$CUDNN_DEV_TMP_PATH" 303 | 304 | echo "cuDNN $CUDNN_VERSION is installed at $CUDA_HOME." 305 | } 306 | 307 | cuda.install.nccl() { 308 | # Install NCCL in $CUDA_HOME 309 | 310 | if ! is_cuda_home_set; then 311 | return 1 312 | fi 313 | 314 | if ! is_cuda_home_symbolic_link; then 315 | return 1 316 | fi 317 | 318 | CUDA_VERSION="$(guess_cuda_version)" 319 | if [ -z "$CUDA_VERSION" ]; then 320 | echo "Can't guess the Cuda version from $CUDA_HOME." 321 | return 1 322 | fi 323 | 324 | NCCL_VERSION="$1" 325 | if [ -z "$NCCL_VERSION" ]; then 326 | # echo "Please specify a NCCL version." 327 | # echo "Usage: cuda.install.nccl NCCL_VERSION" 328 | # echo "Example: cuda.install.nccl 2.4" 329 | # echo "NCCL version available: 2.1, 2.2, 2.3 and 2.4" 330 | # return 1 331 | # Default NCCL version 332 | NCCL_VERSION="2.4" 333 | fi 334 | 335 | # NCCL 2.1 336 | if [ "$NCCL_VERSION" = "2.1" ]; then 337 | 338 | 339 | if [ "$CUDA_VERSION" = "9.0" ]; then 340 | NCCL_VERSION_DETAILED="2.1.15-1" 341 | elif [ "$CUDA_VERSION" = "9.1" ]; then 342 | NCCL_VERSION_DETAILED="2.1.15-1" 343 | elif [ -n "$NCCL_VERSION" ]; then 344 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 345 | return 1 346 | fi 347 | 348 | # NCCL 2.3 349 | elif [ "$NCCL_VERSION" = "2.2" ]; then 350 | 351 | # NCCL 2.2 352 | if [ "$CUDA_VERSION" = "9.0" ]; then 353 | NCCL_VERSION_DETAILED="2.2.13-1" 354 | elif [ "$CUDA_VERSION" = "9.2" ]; then 355 | NCCL_VERSION_DETAILED="2.2.13-1" 356 | elif [ -n "$NCCL_VERSION" ]; then 357 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 358 | return 1 359 | fi 360 | 361 | # NCCL 2.3 362 | elif [ "$NCCL_VERSION" = "2.3" ]; then 363 | 364 | if [ "$CUDA_VERSION" = "9.0" ]; then 365 | NCCL_VERSION_DETAILED="2.3.7-1" 366 | elif [ "$CUDA_VERSION" = "9.2" ]; then 367 | NCCL_VERSION_DETAILED="2.3.7-1" 368 | elif [ "$CUDA_VERSION" = "10.0" ]; then 369 | NCCL_VERSION_DETAILED="2.3.7-1" 370 | elif [ -n "$NCCL_VERSION" ]; then 371 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 372 | return 1 373 | fi 374 | 375 | # NCCL 2.4 376 | elif [ "$NCCL_VERSION" = "2.4" ]; then 377 | 378 | if [ "$CUDA_VERSION" = "9.0" ]; then 379 | NCCL_VERSION_DETAILED="2.4.2-1" 380 | elif [ "$CUDA_VERSION" = "9.2" ]; then 381 | NCCL_VERSION_DETAILED="2.4.2-1" 382 | elif [ "$CUDA_VERSION" = "10.0" ]; then 383 | NCCL_VERSION_DETAILED="2.4.2-1" 384 | elif [ "$CUDA_VERSION" = "10.1" ]; then 385 | NCCL_VERSION_DETAILED="2.4.2-1" 386 | elif [ -n "$NCCL_VERSION" ]; then 387 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 388 | return 1 389 | fi 390 | 391 | elif [ -n "$NCCL_VERSION" ]; then 392 | echo "Error: You need to set NCCL_VERSION to 2.1, 2.2, 2.3 and 2.4." 393 | return 1 394 | fi 395 | 396 | # Setup URLs 397 | NCCL_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl2_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 398 | NCCL_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl-dev_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 399 | 400 | # Setup temporary paths 401 | NCCL_TMP_PATH="/tmp/nccl.deb" 402 | NCCL_DEV_TMP_PATH="/tmp/nccl-dev.deb" 403 | 404 | NCCL_TMP_DIR_PATH="/tmp/nccl" 405 | NCCL_DEV_TMP_DIR_PATH="/tmp/nccl-dev" 406 | 407 | echo "Download binaries." 408 | wget "$NCCL_URL" -O "$NCCL_TMP_PATH" 409 | wget "$NCCL_URL_DEV" -O "$NCCL_DEV_TMP_PATH" 410 | 411 | mkdir -p "$NCCL_TMP_DIR_PATH" 412 | mkdir -p "$NCCL_DEV_TMP_DIR_PATH" 413 | 414 | echo "Extract binaries." 415 | cd "$NCCL_TMP_DIR_PATH" 416 | ar x "$NCCL_TMP_PATH" 417 | tar -xJf data.tar.xz 418 | cd "$NCCL_DEV_TMP_DIR_PATH" 419 | ar x "$NCCL_DEV_TMP_PATH" 420 | tar -xJf data.tar.xz 421 | 422 | echo "Install NCCL files." 423 | 424 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 425 | if [ ! -w "$PARENT_BASE_DIR" ]; then 426 | sudo mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 427 | sudo rm -f "$CUDA_HOME/include/nccl.h" 428 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 429 | sudo rm -f "$CUDA_HOME/lib64/libnccl_static.a" 430 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 431 | else 432 | mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 433 | rm -f "$CUDA_HOME/include/nccl.h" 434 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 435 | rm -f "$CUDA_HOME/lib64/libnccl_static.a" 436 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 437 | fi 438 | 439 | echo "Cleanup files." 440 | rm -fr "$NCCL_TMP_DIR_PATH" 441 | rm -fr "$NCCL_DEV_TMP_DIR_PATH" 442 | rm -f "$NCCL_TMP_PATH" 443 | rm -f "$NCCL_DEV_TMP_PATH" 444 | 445 | echo "NCCL $NCCL_VERSION is installed at $CUDA_HOME." 446 | } 447 | 448 | cuda.gcc.install() { 449 | 450 | if [ -z "$1" ]; then 451 | echo "Please specify a GCC version." 452 | return 453 | fi 454 | export GCC_VERSION="$1" 455 | 456 | sudo apt install --yes gcc-$GCC_VERSION g++-$GCC_VERSION 457 | 458 | sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 10 459 | sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 10 460 | 461 | sudo update-alternatives --set gcc "/usr/bin/gcc-$GCC_VERSION" 462 | sudo update-alternatives --set g++ "/usr/bin/g++-$GCC_VERSION" 463 | } 464 | -------------------------------------------------------------------------------- /tensorflow/centos-6.6/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | tf: 4 | build: . 5 | stdin_open: true 6 | tty: true 7 | volumes: 8 | - ../../wheels:/wheels 9 | environment: 10 | - TF_VERSION_GIT_TAG=${TF_VERSION_GIT_TAG:?TF_VERSION_GIT_TAG} 11 | - PYTHON_VERSION=${PYTHON_VERSION:?PYTHON_VERSION} 12 | - BAZEL_VERSION=${BAZEL_VERSION:?BAZEL_VERSION} 13 | - USE_GPU=${USE_GPU-0} 14 | - CUDA_VERSION=${CUDA_VERSION-10.0} 15 | - CUDNN_VERSION=${CUDNN_VERSION-7.5} 16 | - NCCL_VERSION=${NCCL_VERSION-2.4} 17 | -------------------------------------------------------------------------------- /tensorflow/centos-7.4/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7.4.1708 2 | 3 | RUN yum update -y && \ 4 | yum install -y \ 5 | curl \ 6 | git \ 7 | wget \ 8 | tar \ 9 | bzip2 \ 10 | patch \ 11 | gcc \ 12 | gcc-c++ \ 13 | which && \ 14 | yum -y install centos-release-scl && \ 15 | yum -y install devtoolset-4-gcc devtoolset-4-gcc-c++ && \ 16 | yum clean all 17 | 18 | # Install Anaconda 19 | WORKDIR / 20 | RUN wget "https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh" -O "miniconda.sh" && \ 21 | bash "miniconda.sh" -b -p "/conda" && \ 22 | rm -f miniconda.sh && \ 23 | echo PATH='/conda/bin:$PATH' >> /root/.bashrc && \ 24 | /conda/bin/conda config --add channels conda-forge && \ 25 | /conda/bin/conda update --yes -n base conda && \ 26 | /conda/bin/conda update --all --yes 27 | 28 | COPY build.sh /build.sh 29 | COPY build2.sh /build2.sh 30 | COPY cuda.sh /cuda.sh 31 | 32 | CMD bash build.sh 33 | -------------------------------------------------------------------------------- /tensorflow/centos-7.4/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | source /root/.bashrc 5 | 6 | if [ "$USE_GPU" -eq "1" ]; then 7 | export CUDA_HOME="/usr/local/cuda" 8 | alias sudo="" 9 | source cuda.sh 10 | cuda.install $CUDA_VERSION $CUDNN_VERSION $NCCL_VERSION 11 | cd / 12 | fi 13 | 14 | # Enable GCC 5 15 | chmod +x /build2.sh 16 | scl enable devtoolset-4 ./build2.sh 17 | -------------------------------------------------------------------------------- /tensorflow/centos-7.4/build2.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | gcc --version 5 | 6 | # Install an appropriate Python environment 7 | conda config --add channels conda-forge 8 | conda create --yes -n tensorflow python==$PYTHON_VERSION 9 | source activate tensorflow 10 | conda install --yes numpy wheel bazel==$BAZEL_VERSION 11 | conda install --yes git 12 | pip install keras-applications keras-preprocessing 13 | 14 | # Compile TensorFlow 15 | 16 | # Here you can change the TensorFlow version you want to build. 17 | # You can also tweak the optimizations and various parameters for the build compilation. 18 | # See https://www.tensorflow.org/install/install_sources for more details. 19 | 20 | cd / 21 | rm -fr tensorflow/ 22 | git clone --depth 1 --branch $TF_VERSION_GIT_TAG "https://github.com/tensorflow/tensorflow.git" 23 | 24 | TF_ROOT=/tensorflow 25 | cd $TF_ROOT 26 | 27 | # Python path options 28 | export PYTHON_BIN_PATH=$(which python) 29 | export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" 30 | export PYTHONPATH=${TF_ROOT}/lib 31 | export PYTHON_ARG=${TF_ROOT}/lib 32 | 33 | # Compilation parameters 34 | export TF_NEED_CUDA=0 35 | export TF_NEED_GCP=1 36 | export TF_CUDA_COMPUTE_CAPABILITIES=5.2,3.5 37 | export TF_NEED_HDFS=1 38 | export TF_NEED_OPENCL=0 39 | export TF_NEED_JEMALLOC=1 40 | export TF_ENABLE_XLA=0 41 | export TF_NEED_VERBS=0 42 | export TF_CUDA_CLANG=0 43 | export TF_DOWNLOAD_CLANG=0 44 | export TF_NEED_MKL=0 45 | export TF_DOWNLOAD_MKL=0 46 | export TF_NEED_MPI=0 47 | export TF_NEED_S3=1 48 | export TF_NEED_KAFKA=1 49 | export TF_NEED_GDR=0 50 | export TF_NEED_OPENCL_SYCL=0 51 | export TF_SET_ANDROID_WORKSPACE=0 52 | export TF_NEED_AWS=0 53 | export TF_NEED_IGNITE=0 54 | export TF_NEED_ROCM=0 55 | 56 | # Compiler options 57 | export GCC_HOST_COMPILER_PATH=$(which gcc) 58 | 59 | # Here you can edit this variable to set any optimizations you want. 60 | export CC_OPT_FLAGS="-march=native" 61 | 62 | if [ "$USE_GPU" -eq "1" ]; then 63 | # Cuda parameters 64 | export CUDA_TOOLKIT_PATH=/usr/local/cuda 65 | export CUDNN_INSTALL_PATH=/usr/local/cuda 66 | export TF_CUDA_VERSION="$CUDA_VERSION" 67 | export TF_CUDNN_VERSION="$CUDNN_VERSION" 68 | export TF_NEED_CUDA=1 69 | export TF_NEED_TENSORRT=0 70 | export TF_NCCL_VERSION=1.3 71 | 72 | # Those two lines are important for the linking step. 73 | export LD_LIBRARY_PATH="$CUDA_TOOLKIT_PATH/lib64:${LD_LIBRARY_PATH}" 74 | ldconfig 75 | fi 76 | 77 | # Compilation 78 | ./configure 79 | 80 | if [ "$USE_GPU" -eq "1" ]; then 81 | 82 | bazel build --config=opt \ 83 | --config=cuda \ 84 | --linkopt="-lrt" \ 85 | --linkopt="-lm" \ 86 | --host_linkopt="-lrt" \ 87 | --host_linkopt="-lm" \ 88 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 89 | //tensorflow/tools/pip_package:build_pip_package 90 | 91 | PACKAGE_NAME=tensorflow-gpu 92 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}-cuda${TF_CUDA_VERSION}-cudnn${TF_CUDNN_VERSION}" 93 | else 94 | 95 | bazel build --config=opt \ 96 | --linkopt="-lrt" \ 97 | --linkopt="-lm" \ 98 | --host_linkopt="-lrt" \ 99 | --host_linkopt="-lm" \ 100 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 101 | //tensorflow/tools/pip_package:build_pip_package 102 | 103 | PACKAGE_NAME=tensorflow 104 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}" 105 | fi 106 | 107 | mkdir -p "/wheels/$SUBFOLDER_NAME" 108 | 109 | # Project name can only be set for TF > 1.8 110 | bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" --project_name "$PACKAGE_NAME" 111 | 112 | # Use the following for TF <= 1.8 113 | #bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" 114 | 115 | # Fix wheel folder permissions 116 | chmod -R 777 /wheels/ 117 | -------------------------------------------------------------------------------- /tensorflow/centos-7.4/cuda.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cuda and friends installation done right. 4 | # Switch default Cuda version using symbolic link: cuda.switch 9.2 5 | # Install Cuda: cuda.install.cuda 10.0 6 | # Install cuDNN to CUDA_HOME: cuda.install.cudnn 7.5 7 | # Install NCCL to CUDA_HOME: cuda.install.nccl 2.4 8 | # Install Cuda, cuDNN and NCCL: cuda.install 10.0 7.5 2.4 9 | 10 | # Author: Hadrien Mary 11 | # License: MIT License 12 | # Date: 11/03/2019 13 | 14 | is_cuda_home_set() { 15 | if [ -z "$CUDA_HOME" ]; then 16 | echo "CUDA_HOME is not set. Please set it:" 17 | echo 'export CUDA_HOME="/usr/local/cuda/"' 18 | return 1 19 | fi 20 | return 0 21 | } 22 | 23 | is_cuda_home_symbolic_link() { 24 | if [[ -e "${CUDA_HOME}" && -L "${CUDA_HOME}" ]]; then 25 | return 0 26 | elif [[ ! -d "${CUDA_HOME}" && ! -f "${CUDA_HOME}" ]]; then 27 | return 0 28 | else 29 | echo "CUDA_HOME is not a symbolic link." 30 | echo "Please make it a symbolic link." 31 | return 1 32 | fi 33 | } 34 | 35 | guess_cuda_version() { 36 | if ! is_cuda_home_set; then 37 | return 1 38 | fi 39 | 40 | if ! is_cuda_home_symbolic_link; then 41 | return 1 42 | fi 43 | 44 | POSSIBLE_CUDA_VERSION=$(cat "$CUDA_HOME/version.txt" | cut -d' ' -f 3 | cut -d'.' -f 1-2) 45 | echo $POSSIBLE_CUDA_VERSION 46 | } 47 | 48 | cuda.see() { 49 | if ! is_cuda_home_set; then 50 | return 1 51 | fi 52 | 53 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 54 | ls -l $PARENT_BASE_DIR 55 | return 0 56 | } 57 | 58 | cuda.switch() { 59 | if ! is_cuda_home_set; then 60 | return 1 61 | fi 62 | 63 | if ! is_cuda_home_symbolic_link; then 64 | return 1 65 | fi 66 | 67 | if [ -z "$1" ]; then 68 | echo "Please specify a Cuda version." 69 | echo "Usage: cuda.switch CUDA_VERSION" 70 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 10.1" 71 | return 1 72 | fi 73 | 74 | NEW_CUDA_VERSION="$1" 75 | NEW_CUDA_HOME="$CUDA_HOME-$NEW_CUDA_VERSION" 76 | 77 | if [ ! -d $NEW_CUDA_HOME ]; then 78 | echo "Cuda $NEW_CUDA_VERSION doesn't exist at $NEW_CUDA_HOME." 79 | return 1 80 | fi 81 | 82 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 83 | if [ ! -w "$PARENT_BASE_DIR" ]; then 84 | sudo rm -f $CUDA_HOME 85 | sudo ln -s $NEW_CUDA_HOME $CUDA_HOME 86 | else 87 | rm -f $CUDA_HOME 88 | ln -s $NEW_CUDA_HOME $CUDA_HOME 89 | fi 90 | echo "Default Cuda version is now $NEW_CUDA_VERSION at $NEW_CUDA_HOME" 91 | } 92 | 93 | cuda.install() { 94 | cuda.install.cuda $1 95 | cuda.install.cudnn $2 96 | cuda.install.nccl $3 97 | } 98 | 99 | cuda.install.cuda() { 100 | 101 | CUDA_VERSION="$1" 102 | if [ -z "$CUDA_VERSION" ]; then 103 | echo "Please specify a Cuda version." 104 | echo "Usage: cuda.install.cuda CUDA_VERSION" 105 | echo "Example: cuda.install.cuda 10.0" 106 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 9.2." 107 | return 1 108 | fi 109 | 110 | if ! is_cuda_home_set; then 111 | return 1 112 | fi 113 | 114 | if ! is_cuda_home_symbolic_link; then 115 | return 1 116 | fi 117 | 118 | CUDA_PATH="$CUDA_HOME-$CUDA_VERSION" 119 | if [ -d $CUDA_PATH ]; then 120 | echo "$CUDA_PATH exists. Please remove the previous Cuda folder first." 121 | return 1 122 | fi 123 | 124 | # Setup Cuda URL 125 | if [ "$CUDA_VERSION" = "9.0" ]; then 126 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run" 127 | elif [ "$CUDA_VERSION" = "9.1" ]; then 128 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux" 129 | elif [ "$CUDA_VERSION" = "9.2" ]; then 130 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux" 131 | elif [ "$CUDA_VERSION" = "10.0" ]; then 132 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux" 133 | elif [ "$CUDA_VERSION" = "10.1" ]; then 134 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.105_418.39_linux.run" 135 | else 136 | echo "Error: You need to set CUDA_VERSION to 9.0, 9.1, 9.2, 10.0 or 10.1." 137 | return 1 138 | fi 139 | 140 | CUDA_INSTALLER_PATH="/tmp/cuda.run" 141 | 142 | echo "Download Cuda $CUDA_VERSION." 143 | wget "$CUDA_URL" -O "$CUDA_INSTALLER_PATH" 144 | 145 | echo "Install Cuda $CUDA_VERSION." 146 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 147 | if [ ! -w "$PARENT_BASE_DIR" ]; then 148 | sudo bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 149 | else 150 | bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 151 | fi 152 | rm -f "$CUDA_INSTALLER_PATH" 153 | 154 | # Set the symbolic link. 155 | cuda.switch $CUDA_VERSION 156 | 157 | echo "Cuda $CUDA_VERSION is installed at $CUDA_PATH." 158 | 159 | return 0 160 | } 161 | 162 | cuda.install.cudnn() { 163 | # Install cuDNN in $CUDA_HOME 164 | 165 | if ! is_cuda_home_set; then 166 | return 1 167 | fi 168 | 169 | if ! is_cuda_home_symbolic_link; then 170 | return 1 171 | fi 172 | 173 | CUDA_VERSION="$(guess_cuda_version)" 174 | if [ -z "$CUDA_VERSION" ]; then 175 | echo "Can't guess the Cuda version from $CUDA_HOME." 176 | return 1 177 | fi 178 | 179 | CUDNN_VERSION="$1" 180 | if [ -z "$CUDNN_VERSION" ]; then 181 | echo "Please specify a cuDNN version." 182 | echo "Usage: cuda.install.cudnn CUDNN_VERSION" 183 | echo "Example: cuda.install.cudnn 7.5" 184 | echo "cuDNN version available: 7.0, 7.1, 7.4, 7.5." 185 | return 1 186 | fi 187 | 188 | # cuDNN 7.0 189 | if [ "$CUDNN_VERSION" = "7.0" ]; then 190 | 191 | if [ "$CUDA_VERSION" = "9.0" ]; then 192 | CUDNN_VERSION_DETAILED="7.0.5.15" 193 | elif [ "$CUDA_VERSION" = "9.1" ]; then 194 | CUDNN_VERSION_DETAILED="7.0.5.15" 195 | elif [ -n "$CUDNN_VERSION" ]; then 196 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 197 | return 1 198 | fi 199 | 200 | # cuDNN 7.1 201 | elif [ "$CUDNN_VERSION" = "7.1" ]; then 202 | 203 | if [ "$CUDA_VERSION" = "9.0" ]; then 204 | CUDNN_VERSION_DETAILED="7.1.4.18" 205 | elif [ "$CUDA_VERSION" = "9.2" ]; then 206 | CUDNN_VERSION_DETAILED="7.1.4.18" 207 | elif [ -n "$CUDNN_VERSION" ]; then 208 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 209 | return 1 210 | fi 211 | 212 | # cuDNN 7.4 213 | elif [ "$CUDNN_VERSION" = "7.4" ]; then 214 | 215 | if [ "$CUDA_VERSION" = "9.0" ]; then 216 | CUDNN_VERSION_DETAILED="7.4.2.24" 217 | elif [ "$CUDA_VERSION" = "9.2" ]; then 218 | CUDNN_VERSION_DETAILED="7.4.2.24" 219 | elif [ "$CUDA_VERSION" = "10.0" ]; then 220 | CUDNN_VERSION_DETAILED="7.4.2.24" 221 | elif [ -n "$CUDNN_VERSION" ]; then 222 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 223 | return 1 224 | fi 225 | 226 | # cuDNN 7.5 227 | elif [ "$CUDNN_VERSION" = "7.5" ]; then 228 | 229 | if [ "$CUDA_VERSION" = "9.0" ]; then 230 | CUDNN_VERSION_DETAILED="7.5.0.56" 231 | elif [ "$CUDA_VERSION" = "9.2" ]; then 232 | CUDNN_VERSION_DETAILED="7.5.0.56" 233 | elif [ "$CUDA_VERSION" = "10.0" ]; then 234 | CUDNN_VERSION_DETAILED="7.5.0.56" 235 | elif [ "$CUDA_VERSION" = "10.1" ]; then 236 | CUDNN_VERSION_DETAILED="7.5.0.56" 237 | elif [ -n "$CUDNN_VERSION" ]; then 238 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 239 | return 1 240 | fi 241 | 242 | elif [ -n "$CUDNN_VERSION" ]; then 243 | echo "Error: You need to set CUDNN_VERSION to 7.0, 7.1, 7.4 or 7.5." 244 | return 1 245 | fi 246 | 247 | # Setup URLs 248 | CUDNN_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 249 | CUDNN_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7-dev_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 250 | 251 | # Setup temporary paths 252 | CUDNN_TMP_PATH="/tmp/cudnn.deb" 253 | CUDNN_DEV_TMP_PATH="/tmp/cudnn-dev.deb" 254 | 255 | CUDNN_TMP_DIR_PATH="/tmp/cudnn" 256 | CUDNN_DEV_TMP_DIR_PATH="/tmp/cudnn-dev" 257 | 258 | echo "Download binaries." 259 | wget "$CUDNN_URL" -O "$CUDNN_TMP_PATH" 260 | wget "$CUDNN_URL_DEV" -O "$CUDNN_DEV_TMP_PATH" 261 | 262 | mkdir -p "$CUDNN_TMP_DIR_PATH" 263 | mkdir -p "$CUDNN_DEV_TMP_DIR_PATH" 264 | 265 | echo "Extract binaries." 266 | cd "$CUDNN_TMP_DIR_PATH" 267 | ar x "$CUDNN_TMP_PATH" 268 | tar -xJf data.tar.xz 269 | cd "$CUDNN_DEV_TMP_DIR_PATH" 270 | ar x "$CUDNN_DEV_TMP_PATH" 271 | tar -xJf data.tar.xz 272 | 273 | echo "Install cuDNN files." 274 | 275 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 276 | if [ ! -w "$PARENT_BASE_DIR" ]; then 277 | sudo mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 278 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 279 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 280 | 281 | sudo rm -f "$CUDA_HOME/include/cudnn.h" 282 | sudo rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 283 | 284 | sudo ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 285 | sudo ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 286 | else 287 | mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 288 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 289 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 290 | 291 | rm -f "$CUDA_HOME/include/cudnn.h" 292 | rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 293 | 294 | ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 295 | ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 296 | fi 297 | 298 | echo "Cleanup files." 299 | rm -fr "$CUDNN_TMP_DIR_PATH" 300 | rm -fr "$CUDNN_DEV_TMP_DIR_PATH" 301 | rm -f "$CUDNN_TMP_PATH" 302 | rm -f "$CUDNN_DEV_TMP_PATH" 303 | 304 | echo "cuDNN $CUDNN_VERSION is installed at $CUDA_HOME." 305 | } 306 | 307 | cuda.install.nccl() { 308 | # Install NCCL in $CUDA_HOME 309 | 310 | if ! is_cuda_home_set; then 311 | return 1 312 | fi 313 | 314 | if ! is_cuda_home_symbolic_link; then 315 | return 1 316 | fi 317 | 318 | CUDA_VERSION="$(guess_cuda_version)" 319 | if [ -z "$CUDA_VERSION" ]; then 320 | echo "Can't guess the Cuda version from $CUDA_HOME." 321 | return 1 322 | fi 323 | 324 | NCCL_VERSION="$1" 325 | if [ -z "$NCCL_VERSION" ]; then 326 | # echo "Please specify a NCCL version." 327 | # echo "Usage: cuda.install.nccl NCCL_VERSION" 328 | # echo "Example: cuda.install.nccl 2.4" 329 | # echo "NCCL version available: 2.1, 2.2, 2.3 and 2.4" 330 | # return 1 331 | # Default NCCL version 332 | NCCL_VERSION="2.4" 333 | fi 334 | 335 | # NCCL 2.1 336 | if [ "$NCCL_VERSION" = "2.1" ]; then 337 | 338 | 339 | if [ "$CUDA_VERSION" = "9.0" ]; then 340 | NCCL_VERSION_DETAILED="2.1.15-1" 341 | elif [ "$CUDA_VERSION" = "9.1" ]; then 342 | NCCL_VERSION_DETAILED="2.1.15-1" 343 | elif [ -n "$NCCL_VERSION" ]; then 344 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 345 | return 1 346 | fi 347 | 348 | # NCCL 2.3 349 | elif [ "$NCCL_VERSION" = "2.2" ]; then 350 | 351 | # NCCL 2.2 352 | if [ "$CUDA_VERSION" = "9.0" ]; then 353 | NCCL_VERSION_DETAILED="2.2.13-1" 354 | elif [ "$CUDA_VERSION" = "9.2" ]; then 355 | NCCL_VERSION_DETAILED="2.2.13-1" 356 | elif [ -n "$NCCL_VERSION" ]; then 357 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 358 | return 1 359 | fi 360 | 361 | # NCCL 2.3 362 | elif [ "$NCCL_VERSION" = "2.3" ]; then 363 | 364 | if [ "$CUDA_VERSION" = "9.0" ]; then 365 | NCCL_VERSION_DETAILED="2.3.7-1" 366 | elif [ "$CUDA_VERSION" = "9.2" ]; then 367 | NCCL_VERSION_DETAILED="2.3.7-1" 368 | elif [ "$CUDA_VERSION" = "10.0" ]; then 369 | NCCL_VERSION_DETAILED="2.3.7-1" 370 | elif [ -n "$NCCL_VERSION" ]; then 371 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 372 | return 1 373 | fi 374 | 375 | # NCCL 2.4 376 | elif [ "$NCCL_VERSION" = "2.4" ]; then 377 | 378 | if [ "$CUDA_VERSION" = "9.0" ]; then 379 | NCCL_VERSION_DETAILED="2.4.2-1" 380 | elif [ "$CUDA_VERSION" = "9.2" ]; then 381 | NCCL_VERSION_DETAILED="2.4.2-1" 382 | elif [ "$CUDA_VERSION" = "10.0" ]; then 383 | NCCL_VERSION_DETAILED="2.4.2-1" 384 | elif [ "$CUDA_VERSION" = "10.1" ]; then 385 | NCCL_VERSION_DETAILED="2.4.2-1" 386 | elif [ -n "$NCCL_VERSION" ]; then 387 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 388 | return 1 389 | fi 390 | 391 | elif [ -n "$NCCL_VERSION" ]; then 392 | echo "Error: You need to set NCCL_VERSION to 2.1, 2.2, 2.3 and 2.4." 393 | return 1 394 | fi 395 | 396 | # Setup URLs 397 | NCCL_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl2_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 398 | NCCL_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl-dev_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 399 | 400 | # Setup temporary paths 401 | NCCL_TMP_PATH="/tmp/nccl.deb" 402 | NCCL_DEV_TMP_PATH="/tmp/nccl-dev.deb" 403 | 404 | NCCL_TMP_DIR_PATH="/tmp/nccl" 405 | NCCL_DEV_TMP_DIR_PATH="/tmp/nccl-dev" 406 | 407 | echo "Download binaries." 408 | wget "$NCCL_URL" -O "$NCCL_TMP_PATH" 409 | wget "$NCCL_URL_DEV" -O "$NCCL_DEV_TMP_PATH" 410 | 411 | mkdir -p "$NCCL_TMP_DIR_PATH" 412 | mkdir -p "$NCCL_DEV_TMP_DIR_PATH" 413 | 414 | echo "Extract binaries." 415 | cd "$NCCL_TMP_DIR_PATH" 416 | ar x "$NCCL_TMP_PATH" 417 | tar -xJf data.tar.xz 418 | cd "$NCCL_DEV_TMP_DIR_PATH" 419 | ar x "$NCCL_DEV_TMP_PATH" 420 | tar -xJf data.tar.xz 421 | 422 | echo "Install NCCL files." 423 | 424 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 425 | if [ ! -w "$PARENT_BASE_DIR" ]; then 426 | sudo mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 427 | sudo rm -f "$CUDA_HOME/include/nccl.h" 428 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 429 | sudo rm -f "$CUDA_HOME/lib64/libnccl_static.a" 430 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 431 | else 432 | mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 433 | rm -f "$CUDA_HOME/include/nccl.h" 434 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 435 | rm -f "$CUDA_HOME/lib64/libnccl_static.a" 436 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 437 | fi 438 | 439 | echo "Cleanup files." 440 | rm -fr "$NCCL_TMP_DIR_PATH" 441 | rm -fr "$NCCL_DEV_TMP_DIR_PATH" 442 | rm -f "$NCCL_TMP_PATH" 443 | rm -f "$NCCL_DEV_TMP_PATH" 444 | 445 | echo "NCCL $NCCL_VERSION is installed at $CUDA_HOME." 446 | } 447 | 448 | cuda.gcc.install() { 449 | 450 | if [ -z "$1" ]; then 451 | echo "Please specify a GCC version." 452 | return 453 | fi 454 | export GCC_VERSION="$1" 455 | 456 | sudo apt install --yes gcc-$GCC_VERSION g++-$GCC_VERSION 457 | 458 | sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 10 459 | sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 10 460 | 461 | sudo update-alternatives --set gcc "/usr/bin/gcc-$GCC_VERSION" 462 | sudo update-alternatives --set g++ "/usr/bin/g++-$GCC_VERSION" 463 | } 464 | -------------------------------------------------------------------------------- /tensorflow/centos-7.4/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | tf: 4 | build: . 5 | stdin_open: true 6 | tty: true 7 | volumes: 8 | - ../../wheels:/wheels 9 | environment: 10 | - TF_VERSION_GIT_TAG=${TF_VERSION_GIT_TAG:?TF_VERSION_GIT_TAG} 11 | - PYTHON_VERSION=${PYTHON_VERSION:?PYTHON_VERSION} 12 | - BAZEL_VERSION=${BAZEL_VERSION:?BAZEL_VERSION} 13 | - USE_GPU=${USE_GPU-0} 14 | - CUDA_VERSION=${CUDA_VERSION-10.0} 15 | - CUDNN_VERSION=${CUDNN_VERSION-7.5} 16 | - NCCL_VERSION=${NCCL_VERSION-2.4} 17 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-16.04/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | RUN apt update && apt install -y \ 4 | build-essential \ 5 | curl \ 6 | git \ 7 | wget \ 8 | libjpeg-dev \ 9 | openjdk-8-jdk \ 10 | && rm -rf /var/lib/lists/* 11 | 12 | # Install Anaconda 13 | WORKDIR / 14 | RUN wget "https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh" -O "miniconda.sh" && \ 15 | bash "miniconda.sh" -b -p "/conda" && \ 16 | rm miniconda.sh && \ 17 | echo PATH='/conda/bin:$PATH' >> /root/.bashrc && \ 18 | /conda/bin/conda config --add channels conda-forge && \ 19 | /conda/bin/conda update --yes -n base conda && \ 20 | /conda/bin/conda update --all --yes 21 | 22 | COPY build.sh /build.sh 23 | COPY cuda.sh /cuda.sh 24 | 25 | CMD bash build.sh 26 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-16.04/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | export PATH="/conda/bin:/usr/bin:$PATH" 5 | 6 | if [ "$USE_GPU" -eq "1" ]; then 7 | export CUDA_HOME="/usr/local/cuda" 8 | alias sudo="" 9 | source cuda.sh 10 | cuda.install $CUDA_VERSION $CUDNN_VERSION $NCCL_VERSION 11 | cd / 12 | fi 13 | 14 | gcc --version 15 | 16 | # Install an appropriate Python environment 17 | conda config --add channels conda-forge 18 | conda create --yes -n tensorflow python==$PYTHON_VERSION 19 | source activate tensorflow 20 | conda install --yes numpy wheel bazel==$BAZEL_VERSION 21 | pip install keras-applications keras-preprocessing 22 | 23 | # Compile TensorFlow 24 | 25 | # Here you can change the TensorFlow version you want to build. 26 | # You can also tweak the optimizations and various parameters for the build compilation. 27 | # See https://www.tensorflow.org/install/install_sources for more details. 28 | 29 | cd / 30 | rm -fr tensorflow/ 31 | git clone --depth 1 --branch $TF_VERSION_GIT_TAG "https://github.com/tensorflow/tensorflow.git" 32 | 33 | TF_ROOT=/tensorflow 34 | cd $TF_ROOT 35 | 36 | # Python path options 37 | export PYTHON_BIN_PATH=$(which python) 38 | export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" 39 | export PYTHONPATH=${TF_ROOT}/lib 40 | export PYTHON_ARG=${TF_ROOT}/lib 41 | 42 | # Compilation parameters 43 | export TF_NEED_CUDA=0 44 | export TF_NEED_GCP=1 45 | export TF_CUDA_COMPUTE_CAPABILITIES=5.2,3.5 46 | export TF_NEED_HDFS=1 47 | export TF_NEED_OPENCL=0 48 | export TF_NEED_JEMALLOC=1 # Need to be disabled on CentOS 6.6 49 | export TF_ENABLE_XLA=0 50 | export TF_NEED_VERBS=0 51 | export TF_CUDA_CLANG=0 52 | export TF_DOWNLOAD_CLANG=0 53 | export TF_NEED_MKL=0 54 | export TF_DOWNLOAD_MKL=0 55 | export TF_NEED_MPI=0 56 | export TF_NEED_S3=1 57 | export TF_NEED_KAFKA=1 58 | export TF_NEED_GDR=0 59 | export TF_NEED_OPENCL_SYCL=0 60 | export TF_SET_ANDROID_WORKSPACE=0 61 | export TF_NEED_AWS=0 62 | export TF_NEED_IGNITE=0 63 | export TF_NEED_ROCM=0 64 | 65 | # Compiler options 66 | export GCC_HOST_COMPILER_PATH=$(which gcc) 67 | 68 | # Here you can edit this variable to set any optimizations you want. 69 | export CC_OPT_FLAGS="-march=native" 70 | 71 | if [ "$USE_GPU" -eq "1" ]; then 72 | # Cuda parameters 73 | export CUDA_TOOLKIT_PATH=/usr/local/cuda 74 | export CUDNN_INSTALL_PATH=/usr/local/cuda 75 | export TF_CUDA_VERSION="$CUDA_VERSION" 76 | export TF_CUDNN_VERSION="$CUDNN_VERSION" 77 | export TF_NEED_CUDA=1 78 | export TF_NEED_TENSORRT=0 79 | export TF_NCCL_VERSION=1.3 80 | export NCCL_INSTALL_PATH=$CUDA_HOME 81 | export NCCL_INSTALL_PATH=$CUDA_HOME 82 | 83 | # Those two lines are important for the linking step. 84 | export LD_LIBRARY_PATH="$CUDA_TOOLKIT_PATH/lib64:${LD_LIBRARY_PATH}" 85 | ldconfig 86 | fi 87 | 88 | # Compilation 89 | ./configure 90 | 91 | if [ "$USE_GPU" -eq "1" ]; then 92 | 93 | bazel build --config=opt \ 94 | --config=cuda \ 95 | --linkopt="-lrt" \ 96 | --linkopt="-lm" \ 97 | --host_linkopt="-lrt" \ 98 | --host_linkopt="-lm" \ 99 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 100 | //tensorflow/tools/pip_package:build_pip_package 101 | 102 | PACKAGE_NAME=tensorflow-gpu 103 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}-cuda${TF_CUDA_VERSION}-cudnn${TF_CUDNN_VERSION}" 104 | else 105 | 106 | bazel build --config=opt \ 107 | --linkopt="-lrt" \ 108 | --linkopt="-lm" \ 109 | --host_linkopt="-lrt" \ 110 | --host_linkopt="-lm" \ 111 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 112 | //tensorflow/tools/pip_package:build_pip_package 113 | 114 | PACKAGE_NAME=tensorflow 115 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}" 116 | fi 117 | 118 | mkdir -p "/wheels/$SUBFOLDER_NAME" 119 | 120 | # Project name can only be set for TF > 1.8 121 | bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" --project_name "$PACKAGE_NAME" 122 | 123 | # Use the following for TF <= 1.8 124 | #bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" 125 | 126 | # Fix wheel folder permissions 127 | chmod -R 777 /wheels/ 128 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-16.04/cuda.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cuda and friends installation done right. 4 | # Switch default Cuda version using symbolic link: cuda.switch 9.2 5 | # Install Cuda: cuda.install.cuda 10.0 6 | # Install cuDNN to CUDA_HOME: cuda.install.cudnn 7.5 7 | # Install NCCL to CUDA_HOME: cuda.install.nccl 2.4 8 | # Install Cuda, cuDNN and NCCL: cuda.install 10.0 7.5 2.4 9 | 10 | # Author: Hadrien Mary 11 | # License: MIT License 12 | # Date: 11/03/2019 13 | 14 | is_cuda_home_set() { 15 | if [ -z "$CUDA_HOME" ]; then 16 | echo "CUDA_HOME is not set. Please set it:" 17 | echo 'export CUDA_HOME="/usr/local/cuda/"' 18 | return 1 19 | fi 20 | return 0 21 | } 22 | 23 | is_cuda_home_symbolic_link() { 24 | if [[ -e "${CUDA_HOME}" && -L "${CUDA_HOME}" ]]; then 25 | return 0 26 | elif [[ ! -d "${CUDA_HOME}" && ! -f "${CUDA_HOME}" ]]; then 27 | return 0 28 | else 29 | echo "CUDA_HOME is not a symbolic link." 30 | echo "Please make it a symbolic link." 31 | return 1 32 | fi 33 | } 34 | 35 | guess_cuda_version() { 36 | if ! is_cuda_home_set; then 37 | return 1 38 | fi 39 | 40 | if ! is_cuda_home_symbolic_link; then 41 | return 1 42 | fi 43 | 44 | POSSIBLE_CUDA_VERSION=$(cat "$CUDA_HOME/version.txt" | cut -d' ' -f 3 | cut -d'.' -f 1-2) 45 | echo $POSSIBLE_CUDA_VERSION 46 | } 47 | 48 | cuda.see() { 49 | if ! is_cuda_home_set; then 50 | return 1 51 | fi 52 | 53 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 54 | ls -l $PARENT_BASE_DIR 55 | return 0 56 | } 57 | 58 | cuda.switch() { 59 | if ! is_cuda_home_set; then 60 | return 1 61 | fi 62 | 63 | if ! is_cuda_home_symbolic_link; then 64 | return 1 65 | fi 66 | 67 | if [ -z "$1" ]; then 68 | echo "Please specify a Cuda version." 69 | echo "Usage: cuda.switch CUDA_VERSION" 70 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 10.1" 71 | return 1 72 | fi 73 | 74 | NEW_CUDA_VERSION="$1" 75 | NEW_CUDA_HOME="$CUDA_HOME-$NEW_CUDA_VERSION" 76 | 77 | if [ ! -d $NEW_CUDA_HOME ]; then 78 | echo "Cuda $NEW_CUDA_VERSION doesn't exist at $NEW_CUDA_HOME." 79 | return 1 80 | fi 81 | 82 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 83 | if [ ! -w "$PARENT_BASE_DIR" ]; then 84 | sudo rm -f $CUDA_HOME 85 | sudo ln -s $NEW_CUDA_HOME $CUDA_HOME 86 | else 87 | rm -f $CUDA_HOME 88 | ln -s $NEW_CUDA_HOME $CUDA_HOME 89 | fi 90 | echo "Default Cuda version is now $NEW_CUDA_VERSION at $NEW_CUDA_HOME" 91 | } 92 | 93 | cuda.install() { 94 | cuda.install.cuda $1 95 | cuda.install.cudnn $2 96 | cuda.install.nccl $3 97 | } 98 | 99 | cuda.install.cuda() { 100 | 101 | CUDA_VERSION="$1" 102 | if [ -z "$CUDA_VERSION" ]; then 103 | echo "Please specify a Cuda version." 104 | echo "Usage: cuda.install.cuda CUDA_VERSION" 105 | echo "Example: cuda.install.cuda 10.0" 106 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 9.2." 107 | return 1 108 | fi 109 | 110 | if ! is_cuda_home_set; then 111 | return 1 112 | fi 113 | 114 | if ! is_cuda_home_symbolic_link; then 115 | return 1 116 | fi 117 | 118 | CUDA_PATH="$CUDA_HOME-$CUDA_VERSION" 119 | if [ -d $CUDA_PATH ]; then 120 | echo "$CUDA_PATH exists. Please remove the previous Cuda folder first." 121 | return 1 122 | fi 123 | 124 | # Setup Cuda URL 125 | if [ "$CUDA_VERSION" = "9.0" ]; then 126 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run" 127 | elif [ "$CUDA_VERSION" = "9.1" ]; then 128 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux" 129 | elif [ "$CUDA_VERSION" = "9.2" ]; then 130 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux" 131 | elif [ "$CUDA_VERSION" = "10.0" ]; then 132 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux" 133 | elif [ "$CUDA_VERSION" = "10.1" ]; then 134 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.105_418.39_linux.run" 135 | else 136 | echo "Error: You need to set CUDA_VERSION to 9.0, 9.1, 9.2, 10.0 or 10.1." 137 | return 1 138 | fi 139 | 140 | CUDA_INSTALLER_PATH="/tmp/cuda.run" 141 | 142 | echo "Download Cuda $CUDA_VERSION." 143 | wget "$CUDA_URL" -O "$CUDA_INSTALLER_PATH" 144 | 145 | echo "Install Cuda $CUDA_VERSION." 146 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 147 | if [ ! -w "$PARENT_BASE_DIR" ]; then 148 | sudo bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 149 | else 150 | bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 151 | fi 152 | rm -f "$CUDA_INSTALLER_PATH" 153 | 154 | # Set the symbolic link. 155 | cuda.switch $CUDA_VERSION 156 | 157 | echo "Cuda $CUDA_VERSION is installed at $CUDA_PATH." 158 | 159 | return 0 160 | } 161 | 162 | cuda.install.cudnn() { 163 | # Install cuDNN in $CUDA_HOME 164 | 165 | if ! is_cuda_home_set; then 166 | return 1 167 | fi 168 | 169 | if ! is_cuda_home_symbolic_link; then 170 | return 1 171 | fi 172 | 173 | CUDA_VERSION="$(guess_cuda_version)" 174 | if [ -z "$CUDA_VERSION" ]; then 175 | echo "Can't guess the Cuda version from $CUDA_HOME." 176 | return 1 177 | fi 178 | 179 | CUDNN_VERSION="$1" 180 | if [ -z "$CUDNN_VERSION" ]; then 181 | echo "Please specify a cuDNN version." 182 | echo "Usage: cuda.install.cudnn CUDNN_VERSION" 183 | echo "Example: cuda.install.cudnn 7.5" 184 | echo "cuDNN version available: 7.0, 7.1, 7.4, 7.5." 185 | return 1 186 | fi 187 | 188 | # cuDNN 7.0 189 | if [ "$CUDNN_VERSION" = "7.0" ]; then 190 | 191 | if [ "$CUDA_VERSION" = "9.0" ]; then 192 | CUDNN_VERSION_DETAILED="7.0.5.15" 193 | elif [ "$CUDA_VERSION" = "9.1" ]; then 194 | CUDNN_VERSION_DETAILED="7.0.5.15" 195 | elif [ -n "$CUDNN_VERSION" ]; then 196 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 197 | return 1 198 | fi 199 | 200 | # cuDNN 7.1 201 | elif [ "$CUDNN_VERSION" = "7.1" ]; then 202 | 203 | if [ "$CUDA_VERSION" = "9.0" ]; then 204 | CUDNN_VERSION_DETAILED="7.1.4.18" 205 | elif [ "$CUDA_VERSION" = "9.2" ]; then 206 | CUDNN_VERSION_DETAILED="7.1.4.18" 207 | elif [ -n "$CUDNN_VERSION" ]; then 208 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 209 | return 1 210 | fi 211 | 212 | # cuDNN 7.4 213 | elif [ "$CUDNN_VERSION" = "7.4" ]; then 214 | 215 | if [ "$CUDA_VERSION" = "9.0" ]; then 216 | CUDNN_VERSION_DETAILED="7.4.2.24" 217 | elif [ "$CUDA_VERSION" = "9.2" ]; then 218 | CUDNN_VERSION_DETAILED="7.4.2.24" 219 | elif [ "$CUDA_VERSION" = "10.0" ]; then 220 | CUDNN_VERSION_DETAILED="7.4.2.24" 221 | elif [ -n "$CUDNN_VERSION" ]; then 222 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 223 | return 1 224 | fi 225 | 226 | # cuDNN 7.5 227 | elif [ "$CUDNN_VERSION" = "7.5" ]; then 228 | 229 | if [ "$CUDA_VERSION" = "9.0" ]; then 230 | CUDNN_VERSION_DETAILED="7.5.0.56" 231 | elif [ "$CUDA_VERSION" = "9.2" ]; then 232 | CUDNN_VERSION_DETAILED="7.5.0.56" 233 | elif [ "$CUDA_VERSION" = "10.0" ]; then 234 | CUDNN_VERSION_DETAILED="7.5.0.56" 235 | elif [ "$CUDA_VERSION" = "10.1" ]; then 236 | CUDNN_VERSION_DETAILED="7.5.0.56" 237 | elif [ -n "$CUDNN_VERSION" ]; then 238 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 239 | return 1 240 | fi 241 | 242 | elif [ -n "$CUDNN_VERSION" ]; then 243 | echo "Error: You need to set CUDNN_VERSION to 7.0, 7.1, 7.4 or 7.5." 244 | return 1 245 | fi 246 | 247 | # Setup URLs 248 | CUDNN_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 249 | CUDNN_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7-dev_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 250 | 251 | # Setup temporary paths 252 | CUDNN_TMP_PATH="/tmp/cudnn.deb" 253 | CUDNN_DEV_TMP_PATH="/tmp/cudnn-dev.deb" 254 | 255 | CUDNN_TMP_DIR_PATH="/tmp/cudnn" 256 | CUDNN_DEV_TMP_DIR_PATH="/tmp/cudnn-dev" 257 | 258 | echo "Download binaries." 259 | wget "$CUDNN_URL" -O "$CUDNN_TMP_PATH" 260 | wget "$CUDNN_URL_DEV" -O "$CUDNN_DEV_TMP_PATH" 261 | 262 | mkdir -p "$CUDNN_TMP_DIR_PATH" 263 | mkdir -p "$CUDNN_DEV_TMP_DIR_PATH" 264 | 265 | echo "Extract binaries." 266 | cd "$CUDNN_TMP_DIR_PATH" 267 | ar x "$CUDNN_TMP_PATH" 268 | tar -xJf data.tar.xz 269 | cd "$CUDNN_DEV_TMP_DIR_PATH" 270 | ar x "$CUDNN_DEV_TMP_PATH" 271 | tar -xJf data.tar.xz 272 | 273 | echo "Install cuDNN files." 274 | 275 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 276 | if [ ! -w "$PARENT_BASE_DIR" ]; then 277 | sudo mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 278 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 279 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 280 | 281 | sudo rm -f "$CUDA_HOME/include/cudnn.h" 282 | sudo rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 283 | 284 | sudo ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 285 | sudo ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 286 | else 287 | mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 288 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 289 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 290 | 291 | rm -f "$CUDA_HOME/include/cudnn.h" 292 | rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 293 | 294 | ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 295 | ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 296 | fi 297 | 298 | echo "Cleanup files." 299 | rm -fr "$CUDNN_TMP_DIR_PATH" 300 | rm -fr "$CUDNN_DEV_TMP_DIR_PATH" 301 | rm -f "$CUDNN_TMP_PATH" 302 | rm -f "$CUDNN_DEV_TMP_PATH" 303 | 304 | echo "cuDNN $CUDNN_VERSION is installed at $CUDA_HOME." 305 | } 306 | 307 | cuda.install.nccl() { 308 | # Install NCCL in $CUDA_HOME 309 | 310 | if ! is_cuda_home_set; then 311 | return 1 312 | fi 313 | 314 | if ! is_cuda_home_symbolic_link; then 315 | return 1 316 | fi 317 | 318 | CUDA_VERSION="$(guess_cuda_version)" 319 | if [ -z "$CUDA_VERSION" ]; then 320 | echo "Can't guess the Cuda version from $CUDA_HOME." 321 | return 1 322 | fi 323 | 324 | NCCL_VERSION="$1" 325 | if [ -z "$NCCL_VERSION" ]; then 326 | # echo "Please specify a NCCL version." 327 | # echo "Usage: cuda.install.nccl NCCL_VERSION" 328 | # echo "Example: cuda.install.nccl 2.4" 329 | # echo "NCCL version available: 2.1, 2.2, 2.3 and 2.4" 330 | # return 1 331 | # Default NCCL version 332 | NCCL_VERSION="2.4" 333 | fi 334 | 335 | # NCCL 2.1 336 | if [ "$NCCL_VERSION" = "2.1" ]; then 337 | 338 | 339 | if [ "$CUDA_VERSION" = "9.0" ]; then 340 | NCCL_VERSION_DETAILED="2.1.15-1" 341 | elif [ "$CUDA_VERSION" = "9.1" ]; then 342 | NCCL_VERSION_DETAILED="2.1.15-1" 343 | elif [ -n "$NCCL_VERSION" ]; then 344 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 345 | return 1 346 | fi 347 | 348 | # NCCL 2.3 349 | elif [ "$NCCL_VERSION" = "2.2" ]; then 350 | 351 | # NCCL 2.2 352 | if [ "$CUDA_VERSION" = "9.0" ]; then 353 | NCCL_VERSION_DETAILED="2.2.13-1" 354 | elif [ "$CUDA_VERSION" = "9.2" ]; then 355 | NCCL_VERSION_DETAILED="2.2.13-1" 356 | elif [ -n "$NCCL_VERSION" ]; then 357 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 358 | return 1 359 | fi 360 | 361 | # NCCL 2.3 362 | elif [ "$NCCL_VERSION" = "2.3" ]; then 363 | 364 | if [ "$CUDA_VERSION" = "9.0" ]; then 365 | NCCL_VERSION_DETAILED="2.3.7-1" 366 | elif [ "$CUDA_VERSION" = "9.2" ]; then 367 | NCCL_VERSION_DETAILED="2.3.7-1" 368 | elif [ "$CUDA_VERSION" = "10.0" ]; then 369 | NCCL_VERSION_DETAILED="2.3.7-1" 370 | elif [ -n "$NCCL_VERSION" ]; then 371 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 372 | return 1 373 | fi 374 | 375 | # NCCL 2.4 376 | elif [ "$NCCL_VERSION" = "2.4" ]; then 377 | 378 | if [ "$CUDA_VERSION" = "9.0" ]; then 379 | NCCL_VERSION_DETAILED="2.4.2-1" 380 | elif [ "$CUDA_VERSION" = "9.2" ]; then 381 | NCCL_VERSION_DETAILED="2.4.2-1" 382 | elif [ "$CUDA_VERSION" = "10.0" ]; then 383 | NCCL_VERSION_DETAILED="2.4.2-1" 384 | elif [ "$CUDA_VERSION" = "10.1" ]; then 385 | NCCL_VERSION_DETAILED="2.4.2-1" 386 | elif [ -n "$NCCL_VERSION" ]; then 387 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 388 | return 1 389 | fi 390 | 391 | elif [ -n "$NCCL_VERSION" ]; then 392 | echo "Error: You need to set NCCL_VERSION to 2.1, 2.2, 2.3 and 2.4." 393 | return 1 394 | fi 395 | 396 | # Setup URLs 397 | NCCL_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl2_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 398 | NCCL_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl-dev_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 399 | 400 | # Setup temporary paths 401 | NCCL_TMP_PATH="/tmp/nccl.deb" 402 | NCCL_DEV_TMP_PATH="/tmp/nccl-dev.deb" 403 | 404 | NCCL_TMP_DIR_PATH="/tmp/nccl" 405 | NCCL_DEV_TMP_DIR_PATH="/tmp/nccl-dev" 406 | 407 | echo "Download binaries." 408 | wget "$NCCL_URL" -O "$NCCL_TMP_PATH" 409 | wget "$NCCL_URL_DEV" -O "$NCCL_DEV_TMP_PATH" 410 | 411 | mkdir -p "$NCCL_TMP_DIR_PATH" 412 | mkdir -p "$NCCL_DEV_TMP_DIR_PATH" 413 | 414 | echo "Extract binaries." 415 | cd "$NCCL_TMP_DIR_PATH" 416 | ar x "$NCCL_TMP_PATH" 417 | tar -xJf data.tar.xz 418 | cd "$NCCL_DEV_TMP_DIR_PATH" 419 | ar x "$NCCL_DEV_TMP_PATH" 420 | tar -xJf data.tar.xz 421 | 422 | echo "Install NCCL files." 423 | 424 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 425 | if [ ! -w "$PARENT_BASE_DIR" ]; then 426 | sudo mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 427 | sudo rm -f "$CUDA_HOME/include/nccl.h" 428 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 429 | sudo rm -f "$CUDA_HOME/lib64/libnccl_static.a" 430 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 431 | else 432 | mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 433 | rm -f "$CUDA_HOME/include/nccl.h" 434 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 435 | rm -f "$CUDA_HOME/lib64/libnccl_static.a" 436 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 437 | fi 438 | 439 | echo "Cleanup files." 440 | rm -fr "$NCCL_TMP_DIR_PATH" 441 | rm -fr "$NCCL_DEV_TMP_DIR_PATH" 442 | rm -f "$NCCL_TMP_PATH" 443 | rm -f "$NCCL_DEV_TMP_PATH" 444 | 445 | echo "NCCL $NCCL_VERSION is installed at $CUDA_HOME." 446 | } 447 | 448 | cuda.gcc.install() { 449 | 450 | if [ -z "$1" ]; then 451 | echo "Please specify a GCC version." 452 | return 453 | fi 454 | export GCC_VERSION="$1" 455 | 456 | sudo apt install --yes gcc-$GCC_VERSION g++-$GCC_VERSION 457 | 458 | sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 10 459 | sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 10 460 | 461 | sudo update-alternatives --set gcc "/usr/bin/gcc-$GCC_VERSION" 462 | sudo update-alternatives --set g++ "/usr/bin/g++-$GCC_VERSION" 463 | } 464 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-16.04/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | tf: 4 | build: . 5 | stdin_open: true 6 | tty: true 7 | volumes: 8 | - ../../wheels:/wheels 9 | environment: 10 | - TF_VERSION_GIT_TAG=${TF_VERSION_GIT_TAG:?TF_VERSION_GIT_TAG} 11 | - PYTHON_VERSION=${PYTHON_VERSION:?PYTHON_VERSION} 12 | - BAZEL_VERSION=${BAZEL_VERSION:?BAZEL_VERSION} 13 | - USE_GPU=${USE_GPU-0} 14 | - CUDA_VERSION=${CUDA_VERSION-10.0} 15 | - CUDNN_VERSION=${CUDNN_VERSION-7.5} 16 | - NCCL_VERSION=${NCCL_VERSION-2.4} 17 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-18.10/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.10 2 | 3 | RUN apt update && apt install -y \ 4 | build-essential \ 5 | curl \ 6 | git \ 7 | wget \ 8 | libjpeg-dev \ 9 | openjdk-8-jdk \ 10 | gcc-7 \ 11 | g++-7 \ 12 | && rm -rf /var/lib/lists/* 13 | 14 | # Install Anaconda 15 | WORKDIR / 16 | RUN wget "https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh" -O "miniconda.sh" && \ 17 | bash "miniconda.sh" -b -p "/conda" && \ 18 | rm miniconda.sh && \ 19 | echo PATH='/conda/bin:$PATH' >> /root/.bashrc && \ 20 | /conda/bin/conda config --add channels conda-forge && \ 21 | /conda/bin/conda update --yes -n base conda && \ 22 | /conda/bin/conda update --all --yes 23 | 24 | COPY build.sh /build.sh 25 | COPY cuda.sh /cuda.sh 26 | 27 | CMD bash build.sh 28 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-18.10/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | export PATH="/conda/bin:/usr/bin:$PATH" 5 | 6 | if [ "$USE_GPU" -eq "1" ]; then 7 | export CUDA_HOME="/usr/local/cuda" 8 | alias sudo="" 9 | source cuda.sh 10 | cuda.install $CUDA_VERSION $CUDNN_VERSION $NCCL_VERSION 11 | cd / 12 | fi 13 | 14 | # Set correct GCC version 15 | GCC_VERSION="7" 16 | update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 10 17 | update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 10 18 | update-alternatives --set gcc "/usr/bin/gcc-$GCC_VERSION" 19 | update-alternatives --set g++ "/usr/bin/g++-$GCC_VERSION" 20 | gcc --version 21 | 22 | # Install an appropriate Python environment 23 | conda config --add channels conda-forge 24 | conda create --yes -n tensorflow python==$PYTHON_VERSION 25 | source activate tensorflow 26 | conda install --yes numpy wheel bazel==$BAZEL_VERSION 27 | pip install keras-applications keras-preprocessing 28 | 29 | # Compile TensorFlow 30 | 31 | # Here you can change the TensorFlow version you want to build. 32 | # You can also tweak the optimizations and various parameters for the build compilation. 33 | # See https://www.tensorflow.org/install/install_sources for more details. 34 | 35 | cd / 36 | rm -fr tensorflow/ 37 | git clone --depth 1 --branch $TF_VERSION_GIT_TAG "https://github.com/tensorflow/tensorflow.git" 38 | 39 | TF_ROOT=/tensorflow 40 | cd $TF_ROOT 41 | 42 | # Python path options 43 | export PYTHON_BIN_PATH=$(which python) 44 | export PYTHON_LIB_PATH="$($PYTHON_BIN_PATH -c 'import site; print(site.getsitepackages()[0])')" 45 | export PYTHONPATH=${TF_ROOT}/lib 46 | export PYTHON_ARG=${TF_ROOT}/lib 47 | 48 | # Compilation parameters 49 | export TF_NEED_CUDA=0 50 | export TF_NEED_GCP=1 51 | export TF_CUDA_COMPUTE_CAPABILITIES=5.2,3.5 52 | export TF_NEED_HDFS=1 53 | export TF_NEED_OPENCL=0 54 | export TF_NEED_JEMALLOC=1 # Need to be disabled on CentOS 6.6 55 | export TF_ENABLE_XLA=0 56 | export TF_NEED_VERBS=0 57 | export TF_CUDA_CLANG=0 58 | export TF_DOWNLOAD_CLANG=0 59 | export TF_NEED_MKL=0 60 | export TF_DOWNLOAD_MKL=0 61 | export TF_NEED_MPI=0 62 | export TF_NEED_S3=1 63 | export TF_NEED_KAFKA=1 64 | export TF_NEED_GDR=0 65 | export TF_NEED_OPENCL_SYCL=0 66 | export TF_SET_ANDROID_WORKSPACE=0 67 | export TF_NEED_AWS=0 68 | export TF_NEED_IGNITE=0 69 | export TF_NEED_ROCM=0 70 | 71 | # Compiler options 72 | export GCC_HOST_COMPILER_PATH=$(which gcc) 73 | 74 | # Here you can edit this variable to set any optimizations you want. 75 | export CC_OPT_FLAGS="-march=native" 76 | 77 | if [ "$USE_GPU" -eq "1" ]; then 78 | # Cuda parameters 79 | export CUDA_TOOLKIT_PATH=$CUDA_HOME 80 | export CUDNN_INSTALL_PATH=$CUDA_HOME 81 | export TF_CUDA_VERSION="$CUDA_VERSION" 82 | export TF_CUDNN_VERSION="$CUDNN_VERSION" 83 | export TF_NEED_CUDA=1 84 | export TF_NEED_TENSORRT=0 85 | export TF_NCCL_VERSION=$NCCL_VERSION 86 | export NCCL_INSTALL_PATH=$CUDA_HOME 87 | export NCCL_INSTALL_PATH=$CUDA_HOME 88 | 89 | # Those two lines are important for the linking step. 90 | export LD_LIBRARY_PATH="$CUDA_TOOLKIT_PATH/lib64:${LD_LIBRARY_PATH}" 91 | ldconfig 92 | fi 93 | 94 | # Compilation 95 | ./configure 96 | 97 | if [ "$USE_GPU" -eq "1" ]; then 98 | 99 | bazel build --config=opt \ 100 | --config=cuda \ 101 | --linkopt="-lrt" \ 102 | --linkopt="-lm" \ 103 | --host_linkopt="-lrt" \ 104 | --host_linkopt="-lm" \ 105 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 106 | //tensorflow/tools/pip_package:build_pip_package 107 | 108 | PACKAGE_NAME=tensorflow-gpu 109 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}-cuda${TF_CUDA_VERSION}-cudnn${TF_CUDNN_VERSION}" 110 | 111 | else 112 | 113 | bazel build --config=opt \ 114 | --linkopt="-lrt" \ 115 | --linkopt="-lm" \ 116 | --host_linkopt="-lrt" \ 117 | --host_linkopt="-lm" \ 118 | --action_env="LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" \ 119 | //tensorflow/tools/pip_package:build_pip_package 120 | 121 | PACKAGE_NAME=tensorflow 122 | SUBFOLDER_NAME="${TF_VERSION_GIT_TAG}-py${PYTHON_VERSION}" 123 | fi 124 | 125 | mkdir -p "/wheels/$SUBFOLDER_NAME" 126 | 127 | bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" --project_name "$PACKAGE_NAME" 128 | 129 | # Use the following for TF <= 1.8 130 | # bazel-bin/tensorflow/tools/pip_package/build_pip_package "/wheels/$SUBFOLDER_NAME" 131 | 132 | # Fix wheel folder permissions 133 | chmod -R 777 /wheels/ 134 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-18.10/cuda.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cuda and friends installation done right. 4 | # Switch default Cuda version using symbolic link: cuda.switch 9.2 5 | # Install Cuda: cuda.install.cuda 10.0 6 | # Install cuDNN to CUDA_HOME: cuda.install.cudnn 7.5 7 | # Install NCCL to CUDA_HOME: cuda.install.nccl 2.4 8 | # Install Cuda, cuDNN and NCCL: cuda.install 10.0 7.5 2.4 9 | 10 | # Author: Hadrien Mary 11 | # License: MIT License 12 | # Date: 11/03/2019 13 | 14 | is_cuda_home_set() { 15 | if [ -z "$CUDA_HOME" ]; then 16 | echo "CUDA_HOME is not set. Please set it:" 17 | echo 'export CUDA_HOME="/usr/local/cuda/"' 18 | return 1 19 | fi 20 | return 0 21 | } 22 | 23 | is_cuda_home_symbolic_link() { 24 | if [[ -e "${CUDA_HOME}" && -L "${CUDA_HOME}" ]]; then 25 | return 0 26 | elif [[ ! -d "${CUDA_HOME}" && ! -f "${CUDA_HOME}" ]]; then 27 | return 0 28 | else 29 | echo "CUDA_HOME is not a symbolic link." 30 | echo "Please make it a symbolic link." 31 | return 1 32 | fi 33 | } 34 | 35 | guess_cuda_version() { 36 | if ! is_cuda_home_set; then 37 | return 1 38 | fi 39 | 40 | if ! is_cuda_home_symbolic_link; then 41 | return 1 42 | fi 43 | 44 | POSSIBLE_CUDA_VERSION=$(cat "$CUDA_HOME/version.txt" | cut -d' ' -f 3 | cut -d'.' -f 1-2) 45 | echo $POSSIBLE_CUDA_VERSION 46 | } 47 | 48 | cuda.see() { 49 | if ! is_cuda_home_set; then 50 | return 1 51 | fi 52 | 53 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 54 | ls -l $PARENT_BASE_DIR 55 | return 0 56 | } 57 | 58 | cuda.switch() { 59 | if ! is_cuda_home_set; then 60 | return 1 61 | fi 62 | 63 | if ! is_cuda_home_symbolic_link; then 64 | return 1 65 | fi 66 | 67 | if [ -z "$1" ]; then 68 | echo "Please specify a Cuda version." 69 | echo "Usage: cuda.switch CUDA_VERSION" 70 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 10.1" 71 | return 1 72 | fi 73 | 74 | NEW_CUDA_VERSION="$1" 75 | NEW_CUDA_HOME="$CUDA_HOME-$NEW_CUDA_VERSION" 76 | 77 | if [ ! -d $NEW_CUDA_HOME ]; then 78 | echo "Cuda $NEW_CUDA_VERSION doesn't exist at $NEW_CUDA_HOME." 79 | return 1 80 | fi 81 | 82 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 83 | if [ ! -w "$PARENT_BASE_DIR" ]; then 84 | sudo rm -f $CUDA_HOME 85 | sudo ln -s $NEW_CUDA_HOME $CUDA_HOME 86 | else 87 | rm -f $CUDA_HOME 88 | ln -s $NEW_CUDA_HOME $CUDA_HOME 89 | fi 90 | echo "Default Cuda version is now $NEW_CUDA_VERSION at $NEW_CUDA_HOME" 91 | } 92 | 93 | cuda.install() { 94 | cuda.install.cuda $1 95 | cuda.install.cudnn $2 96 | cuda.install.nccl $3 97 | } 98 | 99 | cuda.install.cuda() { 100 | 101 | CUDA_VERSION="$1" 102 | if [ -z "$CUDA_VERSION" ]; then 103 | echo "Please specify a Cuda version." 104 | echo "Usage: cuda.install.cuda CUDA_VERSION" 105 | echo "Example: cuda.install.cuda 10.0" 106 | echo "Cuda version available: 9.0, 9.1, 9.2, 10.0, 9.2." 107 | return 1 108 | fi 109 | 110 | if ! is_cuda_home_set; then 111 | return 1 112 | fi 113 | 114 | if ! is_cuda_home_symbolic_link; then 115 | return 1 116 | fi 117 | 118 | CUDA_PATH="$CUDA_HOME-$CUDA_VERSION" 119 | if [ -d $CUDA_PATH ]; then 120 | echo "$CUDA_PATH exists. Please remove the previous Cuda folder first." 121 | return 1 122 | fi 123 | 124 | # Setup Cuda URL 125 | if [ "$CUDA_VERSION" = "9.0" ]; then 126 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda_9.0.176_384.81_linux-run" 127 | elif [ "$CUDA_VERSION" = "9.1" ]; then 128 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.1/Prod/local_installers/cuda_9.1.85_387.26_linux" 129 | elif [ "$CUDA_VERSION" = "9.2" ]; then 130 | CUDA_URL="https://developer.nvidia.com/compute/cuda/9.2/Prod/local_installers/cuda_9.2.88_396.26_linux" 131 | elif [ "$CUDA_VERSION" = "10.0" ]; then 132 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_410.48_linux" 133 | elif [ "$CUDA_VERSION" = "10.1" ]; then 134 | CUDA_URL="https://developer.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.105_418.39_linux.run" 135 | else 136 | echo "Error: You need to set CUDA_VERSION to 9.0, 9.1, 9.2, 10.0 or 10.1." 137 | return 1 138 | fi 139 | 140 | CUDA_INSTALLER_PATH="/tmp/cuda.run" 141 | 142 | echo "Download Cuda $CUDA_VERSION." 143 | wget "$CUDA_URL" -O "$CUDA_INSTALLER_PATH" 144 | 145 | echo "Install Cuda $CUDA_VERSION." 146 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 147 | if [ ! -w "$PARENT_BASE_DIR" ]; then 148 | sudo bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 149 | else 150 | bash "$CUDA_INSTALLER_PATH" --silent --toolkit --override --toolkitpath="$CUDA_PATH" 151 | fi 152 | rm -f "$CUDA_INSTALLER_PATH" 153 | 154 | # Set the symbolic link. 155 | cuda.switch $CUDA_VERSION 156 | 157 | echo "Cuda $CUDA_VERSION is installed at $CUDA_PATH." 158 | 159 | return 0 160 | } 161 | 162 | cuda.install.cudnn() { 163 | # Install cuDNN in $CUDA_HOME 164 | 165 | if ! is_cuda_home_set; then 166 | return 1 167 | fi 168 | 169 | if ! is_cuda_home_symbolic_link; then 170 | return 1 171 | fi 172 | 173 | CUDA_VERSION="$(guess_cuda_version)" 174 | if [ -z "$CUDA_VERSION" ]; then 175 | echo "Can't guess the Cuda version from $CUDA_HOME." 176 | return 1 177 | fi 178 | 179 | CUDNN_VERSION="$1" 180 | if [ -z "$CUDNN_VERSION" ]; then 181 | echo "Please specify a cuDNN version." 182 | echo "Usage: cuda.install.cudnn CUDNN_VERSION" 183 | echo "Example: cuda.install.cudnn 7.5" 184 | echo "cuDNN version available: 7.0, 7.1, 7.4, 7.5." 185 | return 1 186 | fi 187 | 188 | # cuDNN 7.0 189 | if [ "$CUDNN_VERSION" = "7.0" ]; then 190 | 191 | if [ "$CUDA_VERSION" = "9.0" ]; then 192 | CUDNN_VERSION_DETAILED="7.0.5.15" 193 | elif [ "$CUDA_VERSION" = "9.1" ]; then 194 | CUDNN_VERSION_DETAILED="7.0.5.15" 195 | elif [ -n "$CUDNN_VERSION" ]; then 196 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 197 | return 1 198 | fi 199 | 200 | # cuDNN 7.1 201 | elif [ "$CUDNN_VERSION" = "7.1" ]; then 202 | 203 | if [ "$CUDA_VERSION" = "9.0" ]; then 204 | CUDNN_VERSION_DETAILED="7.1.4.18" 205 | elif [ "$CUDA_VERSION" = "9.2" ]; then 206 | CUDNN_VERSION_DETAILED="7.1.4.18" 207 | elif [ -n "$CUDNN_VERSION" ]; then 208 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 209 | return 1 210 | fi 211 | 212 | # cuDNN 7.4 213 | elif [ "$CUDNN_VERSION" = "7.4" ]; then 214 | 215 | if [ "$CUDA_VERSION" = "9.0" ]; then 216 | CUDNN_VERSION_DETAILED="7.4.2.24" 217 | elif [ "$CUDA_VERSION" = "9.2" ]; then 218 | CUDNN_VERSION_DETAILED="7.4.2.24" 219 | elif [ "$CUDA_VERSION" = "10.0" ]; then 220 | CUDNN_VERSION_DETAILED="7.4.2.24" 221 | elif [ -n "$CUDNN_VERSION" ]; then 222 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 223 | return 1 224 | fi 225 | 226 | # cuDNN 7.5 227 | elif [ "$CUDNN_VERSION" = "7.5" ]; then 228 | 229 | if [ "$CUDA_VERSION" = "9.0" ]; then 230 | CUDNN_VERSION_DETAILED="7.5.0.56" 231 | elif [ "$CUDA_VERSION" = "9.2" ]; then 232 | CUDNN_VERSION_DETAILED="7.5.0.56" 233 | elif [ "$CUDA_VERSION" = "10.0" ]; then 234 | CUDNN_VERSION_DETAILED="7.5.0.56" 235 | elif [ "$CUDA_VERSION" = "10.1" ]; then 236 | CUDNN_VERSION_DETAILED="7.5.0.56" 237 | elif [ -n "$CUDNN_VERSION" ]; then 238 | echo "Error: cuDNN $CUDNN_VERSION is not compatible with Cuda $CUDA_VERSION." 239 | return 1 240 | fi 241 | 242 | elif [ -n "$CUDNN_VERSION" ]; then 243 | echo "Error: You need to set CUDNN_VERSION to 7.0, 7.1, 7.4 or 7.5." 244 | return 1 245 | fi 246 | 247 | # Setup URLs 248 | CUDNN_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 249 | CUDNN_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libcudnn7-dev_${CUDNN_VERSION_DETAILED}-1+cuda${CUDA_VERSION}_amd64.deb" 250 | 251 | # Setup temporary paths 252 | CUDNN_TMP_PATH="/tmp/cudnn.deb" 253 | CUDNN_DEV_TMP_PATH="/tmp/cudnn-dev.deb" 254 | 255 | CUDNN_TMP_DIR_PATH="/tmp/cudnn" 256 | CUDNN_DEV_TMP_DIR_PATH="/tmp/cudnn-dev" 257 | 258 | echo "Download binaries." 259 | wget "$CUDNN_URL" -O "$CUDNN_TMP_PATH" 260 | wget "$CUDNN_URL_DEV" -O "$CUDNN_DEV_TMP_PATH" 261 | 262 | mkdir -p "$CUDNN_TMP_DIR_PATH" 263 | mkdir -p "$CUDNN_DEV_TMP_DIR_PATH" 264 | 265 | echo "Extract binaries." 266 | cd "$CUDNN_TMP_DIR_PATH" 267 | ar x "$CUDNN_TMP_PATH" 268 | tar -xJf data.tar.xz 269 | cd "$CUDNN_DEV_TMP_DIR_PATH" 270 | ar x "$CUDNN_DEV_TMP_PATH" 271 | tar -xJf data.tar.xz 272 | 273 | echo "Install cuDNN files." 274 | 275 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 276 | if [ ! -w "$PARENT_BASE_DIR" ]; then 277 | sudo mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 278 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 279 | sudo mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 280 | 281 | sudo rm -f "$CUDA_HOME/include/cudnn.h" 282 | sudo rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 283 | 284 | sudo ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 285 | sudo ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 286 | else 287 | mv $CUDNN_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn* "$CUDA_HOME/lib64/" 288 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/include/x86_64-linux-gnu/cudnn_v7.h" "$CUDA_HOME/include/" 289 | mv "$CUDNN_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a" "$CUDA_HOME/lib64/" 290 | 291 | rm -f "$CUDA_HOME/include/cudnn.h" 292 | rm -f "$CUDA_HOME/lib64/libcudnn_static.a" 293 | 294 | ln -s "$CUDA_HOME/include/cudnn_v7.h" "$CUDA_HOME/include/cudnn.h" 295 | ln -s "$CUDA_HOME/lib64/libcudnn_static_v7.a" "$CUDA_HOME/lib64/libcudnn_static.a" 296 | fi 297 | 298 | echo "Cleanup files." 299 | rm -fr "$CUDNN_TMP_DIR_PATH" 300 | rm -fr "$CUDNN_DEV_TMP_DIR_PATH" 301 | rm -f "$CUDNN_TMP_PATH" 302 | rm -f "$CUDNN_DEV_TMP_PATH" 303 | 304 | echo "cuDNN $CUDNN_VERSION is installed at $CUDA_HOME." 305 | } 306 | 307 | cuda.install.nccl() { 308 | # Install NCCL in $CUDA_HOME 309 | 310 | if ! is_cuda_home_set; then 311 | return 1 312 | fi 313 | 314 | if ! is_cuda_home_symbolic_link; then 315 | return 1 316 | fi 317 | 318 | CUDA_VERSION="$(guess_cuda_version)" 319 | if [ -z "$CUDA_VERSION" ]; then 320 | echo "Can't guess the Cuda version from $CUDA_HOME." 321 | return 1 322 | fi 323 | 324 | NCCL_VERSION="$1" 325 | if [ -z "$NCCL_VERSION" ]; then 326 | # echo "Please specify a NCCL version." 327 | # echo "Usage: cuda.install.nccl NCCL_VERSION" 328 | # echo "Example: cuda.install.nccl 2.4" 329 | # echo "NCCL version available: 2.1, 2.2, 2.3 and 2.4" 330 | # return 1 331 | # Default NCCL version 332 | NCCL_VERSION="2.4" 333 | fi 334 | 335 | # NCCL 2.1 336 | if [ "$NCCL_VERSION" = "2.1" ]; then 337 | 338 | 339 | if [ "$CUDA_VERSION" = "9.0" ]; then 340 | NCCL_VERSION_DETAILED="2.1.15-1" 341 | elif [ "$CUDA_VERSION" = "9.1" ]; then 342 | NCCL_VERSION_DETAILED="2.1.15-1" 343 | elif [ -n "$NCCL_VERSION" ]; then 344 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 345 | return 1 346 | fi 347 | 348 | # NCCL 2.3 349 | elif [ "$NCCL_VERSION" = "2.2" ]; then 350 | 351 | # NCCL 2.2 352 | if [ "$CUDA_VERSION" = "9.0" ]; then 353 | NCCL_VERSION_DETAILED="2.2.13-1" 354 | elif [ "$CUDA_VERSION" = "9.2" ]; then 355 | NCCL_VERSION_DETAILED="2.2.13-1" 356 | elif [ -n "$NCCL_VERSION" ]; then 357 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 358 | return 1 359 | fi 360 | 361 | # NCCL 2.3 362 | elif [ "$NCCL_VERSION" = "2.3" ]; then 363 | 364 | if [ "$CUDA_VERSION" = "9.0" ]; then 365 | NCCL_VERSION_DETAILED="2.3.7-1" 366 | elif [ "$CUDA_VERSION" = "9.2" ]; then 367 | NCCL_VERSION_DETAILED="2.3.7-1" 368 | elif [ "$CUDA_VERSION" = "10.0" ]; then 369 | NCCL_VERSION_DETAILED="2.3.7-1" 370 | elif [ -n "$NCCL_VERSION" ]; then 371 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 372 | return 1 373 | fi 374 | 375 | # NCCL 2.4 376 | elif [ "$NCCL_VERSION" = "2.4" ]; then 377 | 378 | if [ "$CUDA_VERSION" = "9.0" ]; then 379 | NCCL_VERSION_DETAILED="2.4.2-1" 380 | elif [ "$CUDA_VERSION" = "9.2" ]; then 381 | NCCL_VERSION_DETAILED="2.4.2-1" 382 | elif [ "$CUDA_VERSION" = "10.0" ]; then 383 | NCCL_VERSION_DETAILED="2.4.2-1" 384 | elif [ "$CUDA_VERSION" = "10.1" ]; then 385 | NCCL_VERSION_DETAILED="2.4.2-1" 386 | elif [ -n "$NCCL_VERSION" ]; then 387 | echo "Error: NCCL $NCCL_VERSION is not compatible with Cuda $CUDA_VERSION." 388 | return 1 389 | fi 390 | 391 | elif [ -n "$NCCL_VERSION" ]; then 392 | echo "Error: You need to set NCCL_VERSION to 2.1, 2.2, 2.3 and 2.4." 393 | return 1 394 | fi 395 | 396 | # Setup URLs 397 | NCCL_URL="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl2_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 398 | NCCL_URL_DEV="https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/libnccl-dev_${NCCL_VERSION_DETAILED}+cuda${CUDA_VERSION}_amd64.deb" 399 | 400 | # Setup temporary paths 401 | NCCL_TMP_PATH="/tmp/nccl.deb" 402 | NCCL_DEV_TMP_PATH="/tmp/nccl-dev.deb" 403 | 404 | NCCL_TMP_DIR_PATH="/tmp/nccl" 405 | NCCL_DEV_TMP_DIR_PATH="/tmp/nccl-dev" 406 | 407 | echo "Download binaries." 408 | wget "$NCCL_URL" -O "$NCCL_TMP_PATH" 409 | wget "$NCCL_URL_DEV" -O "$NCCL_DEV_TMP_PATH" 410 | 411 | mkdir -p "$NCCL_TMP_DIR_PATH" 412 | mkdir -p "$NCCL_DEV_TMP_DIR_PATH" 413 | 414 | echo "Extract binaries." 415 | cd "$NCCL_TMP_DIR_PATH" 416 | ar x "$NCCL_TMP_PATH" 417 | tar -xJf data.tar.xz 418 | cd "$NCCL_DEV_TMP_DIR_PATH" 419 | ar x "$NCCL_DEV_TMP_PATH" 420 | tar -xJf data.tar.xz 421 | 422 | echo "Install NCCL files." 423 | 424 | PARENT_BASE_DIR=$(dirname $CUDA_HOME) 425 | if [ ! -w "$PARENT_BASE_DIR" ]; then 426 | sudo mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 427 | sudo rm -f "$CUDA_HOME/include/nccl.h" 428 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 429 | sudo rm -f "$CUDA_HOME/lib64/libnccl_static.a" 430 | sudo mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 431 | else 432 | mv $NCCL_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl* "$CUDA_HOME/lib64/" 433 | rm -f "$CUDA_HOME/include/nccl.h" 434 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/include/nccl.h" "$CUDA_HOME/include/nccl.h" 435 | rm -f "$CUDA_HOME/lib64/libnccl_static.a" 436 | mv "$NCCL_DEV_TMP_DIR_PATH/usr/lib/x86_64-linux-gnu/libnccl_static.a" "$CUDA_HOME/lib64/libnccl_static.a" 437 | fi 438 | 439 | echo "Cleanup files." 440 | rm -fr "$NCCL_TMP_DIR_PATH" 441 | rm -fr "$NCCL_DEV_TMP_DIR_PATH" 442 | rm -f "$NCCL_TMP_PATH" 443 | rm -f "$NCCL_DEV_TMP_PATH" 444 | 445 | echo "NCCL $NCCL_VERSION is installed at $CUDA_HOME." 446 | } 447 | 448 | cuda.gcc.install() { 449 | 450 | if [ -z "$1" ]; then 451 | echo "Please specify a GCC version." 452 | return 453 | fi 454 | export GCC_VERSION="$1" 455 | 456 | sudo apt install --yes gcc-$GCC_VERSION g++-$GCC_VERSION 457 | 458 | sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 10 459 | sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION 10 460 | 461 | sudo update-alternatives --set gcc "/usr/bin/gcc-$GCC_VERSION" 462 | sudo update-alternatives --set g++ "/usr/bin/g++-$GCC_VERSION" 463 | } 464 | -------------------------------------------------------------------------------- /tensorflow/ubuntu-18.10/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | tf: 4 | build: . 5 | stdin_open: true 6 | tty: true 7 | volumes: 8 | - ../../wheels:/wheels 9 | environment: 10 | - TF_VERSION_GIT_TAG=${TF_VERSION_GIT_TAG:?TF_VERSION_GIT_TAG} 11 | - PYTHON_VERSION=${PYTHON_VERSION:?PYTHON_VERSION} 12 | - BAZEL_VERSION=${BAZEL_VERSION:?BAZEL_VERSION} 13 | - USE_GPU=${USE_GPU-0} 14 | - CUDA_VERSION=${CUDA_VERSION-10.0} 15 | - CUDNN_VERSION=${CUDNN_VERSION-7.5} 16 | - NCCL_VERSION=${NCCL_VERSION-2.4} 17 | --------------------------------------------------------------------------------