├── .github └── workflows │ ├── build_and_push.yml │ ├── build_r.yml │ ├── build_rstudio.yml │ ├── build_ubuntu.yml │ └── create_readme.yml ├── .gitignore ├── Dockerfile ├── Dockerfile.base ├── LICENSE ├── README.md ├── aspera_connect ├── Dockerfile ├── README.md ├── asperaweb_id_dsa.openssh ├── asperaweb_id_dsa.openssh.pub └── build.sh ├── bioperl ├── Dockerfile ├── build.sh └── test.pl ├── build.sh ├── clean_up_docker.sh ├── create_readme.sh ├── data ├── README.md └── chrI.fa.gz ├── fastq ├── .version ├── Dockerfile ├── build.sh └── run.sh ├── firefox ├── README.md └── run_firefox.sh ├── github_actions ├── Dockerfile └── README.md ├── gitlab └── README.md ├── hla-la ├── Dockerfile ├── README.md └── paths.ini ├── hugo └── README.md ├── igv ├── README.md └── run_docker.sh ├── mkdocs ├── Dockerfile ├── README.md ├── build.sh ├── mkdocs_serve.sh └── requirements.txt ├── mkdocs_site ├── docs │ └── index.md └── mkdocs.yml ├── mysql └── README.md ├── r ├── Dockerfile ├── README.md └── build.sh ├── readme.Rmd ├── rstudio ├── .Rprofile ├── Dockerfile ├── README.md ├── build.sh ├── iris.png ├── notebooks │ ├── example.Rmd │ └── getting_started_with_keras.Rmd ├── rstudio-prefs.json ├── run_docker.sh ├── run_rstudio.sh └── user-settings ├── rstudio_python ├── Dockerfile └── build.sh ├── samtools ├── Dockerfile ├── README.md ├── build.sh └── test.sh ├── script └── docker_build.sh ├── seurat ├── Dockerfile ├── README.md ├── build.sh └── run.sh ├── shiny ├── .gitignore ├── Dockerfile ├── README.md ├── build.sh ├── deseq2_app │ └── app.R ├── run_shiny.sh └── test_app │ └── app.R ├── tensorflow ├── Dockerfile ├── README.md ├── build.sh └── rstudio-prefs.json ├── testing ├── entrypoint │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── run.sh └── user │ ├── Dockerfile │ ├── Dockerfile_user │ ├── README.md │ ├── hello │ ├── run.sh │ ├── test.txt │ └── user.sh ├── ubuntu ├── Dockerfile ├── README.md └── build.sh ├── vim ├── .vimrc ├── Dockerfile ├── README.md └── build.sh └── vscode ├── .version ├── README.md ├── get_password.sh └── run_vscode.sh /.github/workflows/build_and_push.yml: -------------------------------------------------------------------------------- 1 | name: Build and push test 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | tags: 9 | - 'v*' 10 | paths: 11 | - 'github_actions/Dockerfile' 12 | - '.github/workflows/build_and_push.yml' 13 | 14 | jobs: 15 | docker: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - 19 | name: Checkout 20 | uses: actions/checkout@v3 21 | - 22 | name: Docker meta 23 | id: meta 24 | uses: docker/metadata-action@v4 25 | with: 26 | images: davetang/from_github 27 | - 28 | name: Set up QEMU 29 | uses: docker/setup-qemu-action@v2 30 | - 31 | name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v2 33 | - 34 | name: Login to DockerHub 35 | uses: docker/login-action@v2 36 | with: 37 | username: ${{ secrets.DOCKER_USERNAME }} 38 | password: ${{ secrets.DOCKER_TOKEN }} 39 | - 40 | name: Build and push 41 | uses: docker/build-push-action@v3 42 | with: 43 | context: github_actions 44 | push: true 45 | tags: ${{ steps.meta.outputs.tags }} 46 | -------------------------------------------------------------------------------- /.github/workflows/build_r.yml: -------------------------------------------------------------------------------- 1 | name: Build R and push to Docker Hub 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | paths: 9 | - 'r/Dockerfile' 10 | - 'script/docker_build.sh' 11 | - '.github/workflows/build_r.yml' 12 | 13 | jobs: 14 | docker: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - 18 | name: Checkout 19 | uses: actions/checkout@v3 20 | - 21 | name: Set up QEMU 22 | uses: docker/setup-qemu-action@v2 23 | - 24 | name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v2 26 | - 27 | name: Login to DockerHub 28 | uses: docker/login-action@v2 29 | with: 30 | username: ${{ secrets.DOCKER_USERNAME }} 31 | password: ${{ secrets.DOCKER_TOKEN }} 32 | - 33 | name: Build and push 34 | run: | 35 | img=davetang/r_build 36 | df=r/Dockerfile 37 | script/docker_build.sh ${df} ${img} 38 | ver=$(cat ${df} | grep "^FROM" | cut -f2 -d':') 39 | docker push ${img}:${ver} 40 | -------------------------------------------------------------------------------- /.github/workflows/build_rstudio.yml: -------------------------------------------------------------------------------- 1 | name: Build RStudio Server and push to Docker Hub 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | paths: 9 | - 'rstudio/Dockerfile' 10 | - 'rstudio/rstudio-prefs.json' 11 | - 'script/docker_build.sh' 12 | - '.github/workflows/build_rstudio.yml' 13 | 14 | jobs: 15 | docker: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - 19 | name: Checkout 20 | uses: actions/checkout@v3 21 | - 22 | name: Set up QEMU 23 | uses: docker/setup-qemu-action@v2 24 | - 25 | name: Set up Docker Buildx 26 | uses: docker/setup-buildx-action@v2 27 | - 28 | name: Login to DockerHub 29 | uses: docker/login-action@v2 30 | with: 31 | username: ${{ secrets.DOCKER_USERNAME }} 32 | password: ${{ secrets.DOCKER_TOKEN }} 33 | - 34 | name: Build and push 35 | run: | 36 | script/docker_build.sh rstudio/Dockerfile davetang/rstudio 37 | ver=$(cat rstudio/Dockerfile | grep "^FROM" | cut -f2 -d':') 38 | docker push davetang/rstudio:${ver} 39 | -------------------------------------------------------------------------------- /.github/workflows/build_ubuntu.yml: -------------------------------------------------------------------------------- 1 | name: Build Ubuntu and push to Docker Hub 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - 'main' 8 | paths: 9 | - 'ubuntu/Dockerfile' 10 | - 'script/docker_build.sh' 11 | - '.github/workflows/build_ubuntu.yml' 12 | 13 | jobs: 14 | docker: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - 18 | name: Checkout 19 | uses: actions/checkout@v3 20 | - 21 | name: Set up QEMU 22 | uses: docker/setup-qemu-action@v2 23 | - 24 | name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v2 26 | - 27 | name: Login to DockerHub 28 | uses: docker/login-action@v2 29 | with: 30 | username: ${{ secrets.DOCKER_USERNAME }} 31 | password: ${{ secrets.DOCKER_TOKEN }} 32 | - 33 | name: Build and push 34 | run: | 35 | image=davetang/build 36 | ver=23.04 37 | script/docker_build.sh ubuntu/Dockerfile ${image} ${ver} 38 | docker push ${image}:${ver} 39 | -------------------------------------------------------------------------------- /.github/workflows/create_readme.yml: -------------------------------------------------------------------------------- 1 | # name of workflow that will be displayed on the actions page 2 | name: Create README.md 3 | 4 | # execute workflow only when these files are modified 5 | on: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'create_readme.sh' 11 | - 'readme.Rmd' 12 | - '.github/workflows/create_readme.yml' 13 | 14 | # a list of the jobs that run as part of the workflow 15 | jobs: 16 | make_markdown: 17 | runs-on: ubuntu-latest 18 | 19 | # a list of the steps that will run as part of the job 20 | steps: 21 | - run: echo "The job was automatically triggered by a ${{ github.event_name }} event." 22 | - run: echo "This job is now running on a ${{ runner.os }} server hosted by GitHub!" 23 | - run: echo "The name of your branch is ${{ github.ref }} and your repository is ${{ github.repository }}." 24 | - name: Check out repository code 25 | uses: actions/checkout@v3 26 | - run: echo "The ${{ github.repository }} repository has been cloned to the runner." 27 | - run: echo "The workflow is now ready to test your code on the runner." 28 | 29 | - uses: r-lib/actions/setup-r@v2 30 | - uses: r-lib/actions/setup-pandoc@v2 31 | 32 | - name: Install rmarkdown 33 | run: Rscript -e 'install.packages("rmarkdown", repos="http://cran.us.r-project.org")' 34 | 35 | - name: Build README 36 | run: ./create_readme.sh 37 | 38 | - name: Commit report 39 | run: | 40 | git config --global user.name 'GitHub Actions' 41 | git config --global user.email 'me@davetang.org' 42 | git add "README.md" 43 | git commit -m "Build README.md" 44 | git push origin main 45 | 46 | - name: Install MkDocs 47 | run: python -m pip install --upgrade pip && pip install mkdocs pymdown-extensions 48 | 49 | - name: Build MkDocs site 50 | run: | 51 | cd mkdocs_site && mkdocs build 52 | 53 | - name: Deploy MkDocs 54 | run: | 55 | git branch gh-pages 56 | git pull 57 | cd mkdocs_site && mkdocs gh-deploy 58 | 59 | - run: echo "This job's status is ${{ job.status }}." 60 | 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | rstudio/packages 3 | .Rhistory 4 | mkdocs_site/site 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/blob/main/Dockerfile" 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | build-essential \ 12 | wget \ 13 | zlib1g-dev && \ 14 | apt-get clean all && \ 15 | apt-get purge && \ 16 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 17 | 18 | RUN mkdir /src && \ 19 | cd /src && \ 20 | wget https://github.com/lh3/bwa/releases/download/v0.7.17/bwa-0.7.17.tar.bz2 && \ 21 | tar xjf bwa-0.7.17.tar.bz2 && \ 22 | cd bwa-0.7.17 && \ 23 | make && \ 24 | mv bwa /usr/local/bin && \ 25 | cd && rm -rf /src 26 | 27 | WORKDIR /work 28 | 29 | CMD ["bwa"] 30 | 31 | -------------------------------------------------------------------------------- /Dockerfile.base: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | MAINTAINER Dave Tang 4 | 5 | RUN apt-get clean all && \ 6 | apt-get update && \ 7 | apt-get upgrade -y && \ 8 | apt-get install -y \ 9 | build-essential \ 10 | git-core \ 11 | wget \ 12 | unzip \ 13 | time \ 14 | vim \ 15 | libhdf5-dev \ 16 | libcurl4-gnutls-dev \ 17 | libssl-dev \ 18 | libxml2-dev \ 19 | libpng-dev \ 20 | zlib1g-dev \ 21 | libbz2-dev \ 22 | liblzma-dev \ 23 | libncurses-dev \ 24 | && apt-get clean all && \ 25 | apt-get purge && \ 26 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 27 | 28 | # Miniconda and dependencies 29 | RUN cd /tmp/ && \ 30 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 31 | bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda3 && \ 32 | /root/miniconda3/condabin/conda install -y python=3.7 33 | ENV PATH=$PATH:/root/miniconda3/bin 34 | 35 | # CMake 36 | RUN cd /tmp/ && \ 37 | wget https://github.com/Kitware/CMake/releases/download/v3.16.5/cmake-3.16.5.tar.gz && \ 38 | tar -zxf cmake-3.16.5.tar.gz && \ 39 | cd cmake-3.16.5 && \ 40 | ./bootstrap && \ 41 | make && \ 42 | make install 43 | 44 | RUN rm -rf /tmp/* 45 | 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Dave Tang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /aspera_connect/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.10 2 | 3 | MAINTAINER Dave Tang 4 | 5 | RUN apt-get clean all && \ 6 | apt-get update && \ 7 | apt-get upgrade -y && \ 8 | apt-get install -y \ 9 | openssh-client \ 10 | wget && \ 11 | apt-get clean all && \ 12 | apt-get purge && \ 13 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 14 | 15 | ARG user=parasite 16 | RUN useradd \ 17 | --create-home \ 18 | --home-dir /home/${user} \ 19 | --base-dir /home/${user} \ 20 | --shell /bin/bash ${user} && \ 21 | echo "${user}:password" | chpasswd && \ 22 | usermod -d /home/${user} ${user} 23 | 24 | # get link to tarball from https://www.ibm.com/aspera/connect/ 25 | ARG aspera_ver=4.2.6.393 26 | ARG tarball=ibm-aspera-connect_${aspera_ver}_linux_x86_64.tar.gz 27 | ARG script=ibm-aspera-connect_${aspera_ver}_linux_x86_64.sh 28 | 29 | USER ${user} 30 | RUN cd /tmp && \ 31 | wget --quiet https://d3gcli72yxqn2z.cloudfront.net/downloads/connect/latest/bin/${tarball} && \ 32 | tar -xzf ${tarball} && \ 33 | ./ibm-aspera-connect_4.2.6.393_linux_x86_64.sh && \ 34 | ./${script} && \ 35 | rm /tmp/${tarball} /tmp/${script} 36 | 37 | ARG home=/home/${user} 38 | # manually copy files as they are missing from the tarball 39 | COPY --chown=${user}:${user} asperaweb_id_dsa.openssh ${home} 40 | COPY --chown=${user}:${user} asperaweb_id_dsa.openssh.pub ${home} 41 | 42 | RUN echo "export PATH=$PATH:${home}/.aspera/connect/bin/" >> ${home}/.bashrc 43 | RUN chmod -R 777 ${home} 44 | WORKDIR ${home} 45 | ENTRYPOINT ["/home/parasite/.aspera/connect/bin/ascp", "-P33001", "-i", "/home/parasite/asperaweb_id_dsa.openssh"] 46 | CMD ["-QT", "-l 300m"] 47 | -------------------------------------------------------------------------------- /aspera_connect/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | [IBM Aspera Connect](https://www.ibm.com/aspera/connect/) cannot be installed 4 | as root. If you try, you will get the following message. 5 | 6 | ``` 7 | Installing IBM Aspera Connect 8 | 9 | This script cannot be run as root, IBM Aspera Connect must be installed per user. 10 | ``` 11 | 12 | This is why a user (called `parasite`) is created in the Dockerfile. 13 | 14 | ``` 15 | ARG user=parasite 16 | RUN useradd \ 17 | --create-home \ 18 | --home-dir /home/${user} \ 19 | --base-dir /home/${user} \ 20 | --shell /bin/bash ${user} && \ 21 | echo "${user}:password" | chpasswd && \ 22 | usermod -d /home/${user} ${user} 23 | ``` 24 | 25 | In addition, the SSH keys are missing in the latest versions of Aspera Connect 26 | and are manually copied. 27 | 28 | ``` 29 | arg home=/home/${user} 30 | copy --chown=${user}:${user} asperaweb_id_dsa.openssh ${home} 31 | copy --chown=${user}:${user} asperaweb_id_dsa.openssh.pub ${home} 32 | ``` 33 | 34 | In the Dockerfile, `/home/parasite` is made globally accessible so any user can 35 | run `ascp`. (I did not think this would work when I was implementing it because 36 | I thought the installation had some user specific settings but it does!) 37 | 38 | ``` 39 | arg home=/home/${user} 40 | RUN chmod -R 777 ${home} 41 | ``` 42 | 43 | Use the image as follows but change the URL to the data you want to download; 44 | also note the period at the end, which specifies where to copy the data. 45 | 46 | ```console 47 | docker run --rm -u $(id -u):$(id -g) -v $(pwd):$(pwd) -w $(pwd) davetang/aspera_connect:4.2.6.393 era-fasp@fasp.sra.ebi.ac.uk:vol1/fastq/SRR390/SRR390728/SRR390728_1.fastq.gz . 48 | ``` 49 | -------------------------------------------------------------------------------- /aspera_connect/asperaweb_id_dsa.openssh: -------------------------------------------------------------------------------- 1 | -----BEGIN DSA PRIVATE KEY----- 2 | MIIBuwIBAAKBgQDkKQHD6m4yIxgjsey6Pny46acZXERsJHy54p/BqXIyYkVOAkEp 3 | KgvT3qTTNmykWWw4ovOP1+Di1c/2FpYcllcTphkWcS8lA7j012mUEecXavXjPPG0 4 | i3t5vtB8xLy33kQ3e9v9/Lwh0xcRfua0d5UfFwopBIAXvJAr3B6raps8+QIVALws 5 | yeqsx3EolCaCVXJf+61ceJppAoGAPoPtEP4yzHG2XtcxCfXab4u9zE6wPz4ePJt0 6 | UTn3fUvnQmJT7i0KVCRr3g2H2OZMWF12y0jUq8QBuZ2so3CHee7W1VmAdbN7Fxc+ 7 | cyV9nE6zURqAaPyt2bE+rgM1pP6LQUYxgD3xKdv1ZG+kDIDEf6U3onjcKbmA6ckx 8 | T6GavoACgYEAobapDv5p2foH+cG5K07sIFD9r0RD7uKJnlqjYAXzFc8U76wXKgu6 9 | WXup2ac0Co+RnZp7Hsa9G+E+iJ6poI9pOR08XTdPly4yDULNST4PwlfrbSFT9FVh 10 | zkWfpOvAUc8fkQAhZqv/PE6VhFQ8w03Z8GpqXx7b3NvBR+EfIx368KoCFEyfl0vH 11 | Ta7g6mGwIMXrdTQQ8fZs 12 | -----END DSA PRIVATE KEY----- 13 | -------------------------------------------------------------------------------- /aspera_connect/asperaweb_id_dsa.openssh.pub: -------------------------------------------------------------------------------- 1 | ssh-dss AAAAB3NzaC1kc3MAAACBAOQpAcPqbjIjGCOx7Lo+fLjppxlcRGwkfLnin8GpcjJiRU4CQSkqC9PepNM2bKRZbDii84/X4OLVz/YWlhyWVxOmGRZxLyUDuPTXaZQR5xdq9eM88bSLe3m+0HzEvLfeRDd72/38vCHTFxF+5rR3lR8XCikEgBe8kCvcHqtqmzz5AAAAFQC8LMnqrMdxKJQmglVyX/utXHiaaQAAAIA+g+0Q/jLMcbZe1zEJ9dpvi73MTrA/Ph48m3RROfd9S+dCYlPuLQpUJGveDYfY5kxYXXbLSNSrxAG5nayjcId57tbVWYB1s3sXFz5zJX2cTrNRGoBo/K3ZsT6uAzWk/otBRjGAPfEp2/Vkb6QMgMR/pTeieNwpuYDpyTFPoZq+gAAAAIEAobapDv5p2foH+cG5K07sIFD9r0RD7uKJnlqjYAXzFc8U76wXKgu6WXup2ac0Co+RnZp7Hsa9G+E+iJ6poI9pOR08XTdPly4yDULNST4PwlfrbSFT9FVhzkWfpOvAUc8fkQAhZqv/PE6VhFQ8w03Z8GpqXx7b3NvBR+EfIx368Ko= Aspera public access 2 | 3 | -------------------------------------------------------------------------------- /aspera_connect/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=$(cat Dockerfile | grep "^ARG aspera_ver" | cut -f2 -d'=') 6 | if [[ -z ${ver} ]]; then 7 | >&2 echo Could not get version 8 | exit 1 9 | fi 10 | image=aspera_connect 11 | 12 | docker build -t davetang/${image}:${ver} . 13 | 14 | >&2 echo Build complete 15 | >&2 echo -e "Run the following to push to Docker Hub:\n" 16 | >&2 echo docker login 17 | >&2 echo docker push davetang/${image}:${ver} 18 | -------------------------------------------------------------------------------- /bioperl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM bioperl/bioperl 2 | 3 | MAINTAINER Dave Tang 4 | 5 | RUN apt-get clean all && \ 6 | apt-get update && \ 7 | apt-get upgrade -y && \ 8 | apt-get install -y \ 9 | wget \ 10 | git \ 11 | build-essential \ 12 | zlib1g-dev \ 13 | libbz2-dev \ 14 | libcurl4-gnutls-dev \ 15 | liblzma-dev \ 16 | libncurses5-dev \ 17 | libssl-dev \ 18 | libxml2-dev \ 19 | vim \ 20 | && apt-get clean all && \ 21 | apt-get purge && \ 22 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 23 | 24 | # libraries required by Bio::DB::Sam 25 | RUN mkdir /src && \ 26 | cd /src && \ 27 | wget https://sourceforge.net/projects/samtools/files/samtools/0.1.19/samtools-0.1.19.tar.bz2 && \ 28 | tar -xjf samtools-0.1.19.tar.bz2 && \ 29 | cd samtools-0.1.19 && \ 30 | cat Makefile | sed -E 's/^(CFLAGS.*)/\1 -fPIC/' > blah && \ 31 | mv blah Makefile && \ 32 | make 33 | ENV SAMTOOLS=/src/samtools-0.1.19/ 34 | 35 | # Miniconda and dependencies 36 | RUN cd /tmp/ && \ 37 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 38 | bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda3 && \ 39 | /root/miniconda3/condabin/conda install -y -c bioconda bwa samtools picard mummer minimap2 40 | ENV PATH=$PATH:/root/miniconda3/bin 41 | 42 | RUN cpanm Parallel::ForkManager Bio::DB::Sam namespace::clean 43 | 44 | RUN mkdir /root/perllib && \ 45 | cd /root/perllib && \ 46 | wget https://raw.githubusercontent.com/vcftools/vcftools/master/src/perl/Vcf.pm 47 | 48 | RUN cd /root/ && \ 49 | git clone https://github.com/MullinsLab/Bio-Cigar.git && \ 50 | cd Bio-Cigar/ && \ 51 | perl Makefile.PL && \ 52 | make install 53 | 54 | COPY test.pl /tmp/ 55 | 56 | -------------------------------------------------------------------------------- /bioperl/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker build -t davetang/bioperl . 4 | 5 | # push to Docker Hub 6 | # docker login 7 | # docker push davetang/bioperl 8 | 9 | -------------------------------------------------------------------------------- /bioperl/test.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | use strict; 4 | use warnings; 5 | use Bio::DB::Sam; 6 | use Getopt::Std; 7 | use Parallel::ForkManager; 8 | # https://github.com/MullinsLab/Bio-Cigar 9 | use Bio::Cigar; 10 | 11 | use lib "$ENV{HOME}/perllib/"; 12 | use Vcf; 13 | 14 | print "OK\n"; 15 | 16 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=0.7.17 6 | 7 | docker build -t davetang/bwa:${ver} . 8 | 9 | -------------------------------------------------------------------------------- /clean_up_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | exited=`docker ps -a -q -f status=exited` 6 | 7 | if [[ ! -z ${exited} ]]; then 8 | docker rm -v $(docker ps -a -q -f status=exited) 9 | fi 10 | 11 | exit 0 12 | 13 | -------------------------------------------------------------------------------- /create_readme.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | if ! [ -x "$(command -v docker)" ]; then 6 | >&2 echo Could not find Docker 7 | exit 1 8 | fi 9 | 10 | download_url (){ 11 | my_url=$1 12 | outfile=$2 13 | if ! [ -x "$(command -v wget)" ]; then 14 | if ! [ -x "$(command -v curl)" ]; then 15 | >&2 echo Could not find a suitable downloader 16 | exit 1 17 | else 18 | curl ${my_url} -L --max-redirs 5 -o ${outfile} 19 | fi 20 | fi 21 | wget -O ${outfile} ${my_url} 22 | } 23 | 24 | if ! [ -x "$(command -v pandoc)" ]; then 25 | >&2 echo Could not find pandoc 26 | >&2 echo Trying to download pandoc 27 | os=$(uname -s) 28 | arc=$(arch) 29 | 30 | RAN_DIR=$$$RANDOM 31 | mkdir /tmp/${RAN_DIR} && cd /tmp/${RAN_DIR} 32 | pandoc_ver=2.16.2 33 | pandoc_url=https://github.com/jgm/pandoc/releases/download/${pandoc_ver}/pandoc-${pandoc_ver} 34 | 35 | if [[ ${os} == Darwin ]]; then 36 | download_url ${pandoc_url}-macOS.zip, pandoc.zip 37 | unzip pandoc.zip 38 | elif [[ ${os} == Linux ]]; then 39 | if [[ ${arc} == x86_64 ]]; then 40 | download_url ${pandoc_url}-amd64.tar.gz pandoc.tar.gz 41 | tar -xzf pandoc.tar.gz 42 | elif [[ ${arc} =~ arm ]]; then 43 | download_url ${pandoc_url}-arm64.tar.gz pandoc.tar.gz 44 | tar -xzf pandoc.tar.gz 45 | fi 46 | else 47 | >&2 echo Unrecognised operating system 48 | exit 1 49 | fi 50 | PATH=$PATH:$(pwd)/pandoc-${pandoc_ver}/bin 51 | cd - 52 | fi 53 | 54 | if ! [ -x "$(command -v Rscript)" ]; then 55 | >&2 echo Could not find Rscript. Please make sure R is installed. 56 | exit 1 57 | fi 58 | 59 | if ! [ -x "$(command -v gh-md-toc)" ]; then 60 | >&2 echo Could not find gh-md-toc 61 | ghmdtoc_ver=0.10.0 62 | ghmdtoc_url=https://github.com/ekalinin/github-markdown-toc/archive/refs/tags/${ghmdtoc_ver}.tar.gz 63 | RAN_DIR=$$$RANDOM 64 | mkdir /tmp/${RAN_DIR} && cd /tmp/${RAN_DIR} 65 | >&2 echo Trying to download gh-md-toc 66 | if ! [ -x "$(command -v wget)" ]; then 67 | if ! [ -x "$(command -v curl)" ]; then 68 | >&2 echo Could not download gh-md-toc 69 | else 70 | curl ${ghmdtoc_url} -L --max-redirs 5 -o ghmdtoc-${ghmdtoc_ver}.tar.gz 71 | fi 72 | fi 73 | wget -O ghmdtoc-${ghmdtoc_ver}.tar.gz ${ghmdtoc_url} 74 | tar -xzf ghmdtoc-${ghmdtoc_ver}.tar.gz 75 | PATH=$PATH:$(pwd)/github-markdown-toc-${ghmdtoc_ver} 76 | cd - 77 | fi 78 | 79 | out_md=tmp.md 80 | Rscript -e "rmarkdown::render('readme.Rmd', output_file=\"${out_md}\")" 81 | 82 | cp -f ${out_md} mkdocs_site/docs/index.md 83 | 84 | gh-md-toc ${out_md} > toc 85 | 86 | cat toc <(echo) <(date) <(echo) ${out_md} > README.md 87 | 88 | rm ${out_md} toc 89 | 90 | >&2 echo Done! 91 | 92 | exit 0 93 | -------------------------------------------------------------------------------- /data/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Chromosome one ofi _C. elegans_ genome (ce11, C. elegans Sequencing Consortium WBcel235). 4 | 5 | ```bash 6 | wget -c https://hgdownload.soe.ucsc.edu/goldenPath/ce11/chromosomes/chrI.fa.gz 7 | ``` 8 | 9 | -------------------------------------------------------------------------------- /data/chrI.fa.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davetang/learning_docker/8835c5a8f57f49d4bfd798dbf1d0d14c305cc8fa/data/chrI.fa.gz -------------------------------------------------------------------------------- /fastq/.version: -------------------------------------------------------------------------------- 1 | 0.0.1 2 | -------------------------------------------------------------------------------- /fastq/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM davetang/build:1.1 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/fastq" 6 | 7 | RUN wget https://github.com/angelovangel/faster/releases/download/v0.1.4/x86_64_linux_faster -O /usr/local/bin/faster && chmod 755 /usr/local/bin/faster 8 | 9 | RUN fq_ver=0.8.0 && cd /tmp && \ 10 | wget https://github.com/stjude-rust-labs/fq/releases/download/v${fq_ver}/fq-${fq_ver}-x86_64-unknown-linux-gnu.tar.gz && \ 11 | tar xzf fq-${fq_ver}-x86_64-unknown-linux-gnu.tar.gz && \ 12 | mv fq-${fq_ver}-x86_64-unknown-linux-gnu/fq /usr/local/bin 13 | 14 | RUN seqkit_ver=2.1.0 && cd /tmp && \ 15 | wget https://github.com/shenwei356/seqkit/releases/download/v${seqkit_ver}/seqkit_linux_amd64.tar.gz && \ 16 | tar xzf seqkit_linux_amd64.tar.gz && \ 17 | mv seqkit /usr/local/bin 18 | 19 | RUN rm -rf /tmp/* 20 | 21 | CMD seqkit 22 | 23 | -------------------------------------------------------------------------------- /fastq/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=0.0.1 6 | 7 | docker build -t davetang/fastq:${ver} . 8 | 9 | # docker login 10 | # docker push davetang/faster:${ver} 11 | 12 | -------------------------------------------------------------------------------- /fastq/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=$(cat .version) 6 | 7 | docker run --rm -it -v $(pwd):$(pwd) -w $(pwd) davetang/fastq:${ver} 8 | 9 | -------------------------------------------------------------------------------- /firefox/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Run [Firefox](https://hub.docker.com/r/jlesage/firefox) in a Docker container. Why? Easier to port forward than X11 forwarding through several hosts. 4 | 5 | ```bash 6 | docker pull jlesage/firefox 7 | ``` 8 | 9 | Run Docker; add more [environment variables](https://github.com/jlesage/docker-firefox#environment-variables) as you see fit. 10 | 11 | ```bash 12 | image=jlesage/firefox 13 | width=1920 14 | height=1200 15 | 16 | docker run -d \ 17 | --rm \ 18 | --name=firefox \ 19 | -p 5800:5800 \ 20 | --shm-size 8g \ 21 | -e DISPLAY_WIDTH=$width \ 22 | -e DISPLAY_HEIGHT=$height \ 23 | $image 24 | ``` 25 | 26 | Now head to http://localhost:5800/ and that's it! 27 | 28 | When you're done, stop the container. 29 | 30 | ```bash 31 | docker stop firefox 32 | ``` 33 | 34 | -------------------------------------------------------------------------------- /firefox/run_firefox.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | image=jlesage/firefox 4 | width=3000 5 | height=2000 6 | 7 | docker run -d \ 8 | --rm \ 9 | --name=firefox \ 10 | -p 5800:5800 \ 11 | --shm-size 8g \ 12 | -e DISPLAY_WIDTH=$width \ 13 | -e DISPLAY_HEIGHT=$height \ 14 | $image 15 | 16 | -------------------------------------------------------------------------------- /github_actions/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/blob/main/github_actions/Dockerfile" 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | sl && \ 12 | apt-get clean all && \ 13 | apt-get purge && \ 14 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 15 | 16 | CMD ["/usr/games/sl"] 17 | 18 | -------------------------------------------------------------------------------- /github_actions/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | GitHub Actions can be used to automatically build and push Docker images to 4 | Docker Hub using [Build and push Docker 5 | images](https://github.com/marketplace/actions/build-and-push-docker-images) 6 | action. This is very nice because you can automatically check whether your 7 | Dockerfile builds and once it finishes building you can upload the image to a 8 | repository such as Docker Hub. 9 | 10 | The following workflow works and the image is pushed to [Docker 11 | Hub](https://hub.docker.com/repository/docker/davetang/from_github). 12 | 13 | ``` 14 | name: Build and push test 15 | 16 | on: 17 | workflow_dispatch: 18 | push: 19 | paths: 20 | - 'github_actions/Dockerfile' 21 | - '.github/workflows/build_and_push.yml' 22 | 23 | jobs: 24 | docker: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - 28 | name: Checkout 29 | uses: actions/checkout@v3 30 | - 31 | name: Docker meta 32 | id: meta 33 | uses: docker/metadata-action@v4 34 | with: 35 | images: davetang/from_github 36 | - 37 | name: Set up QEMU 38 | uses: docker/setup-qemu-action@v2 39 | - 40 | name: Set up Docker Buildx 41 | uses: docker/setup-buildx-action@v2 42 | - 43 | name: Login to DockerHub 44 | uses: docker/login-action@v2 45 | with: 46 | username: ${{ secrets.DOCKER_USERNAME }} 47 | password: ${{ secrets.DOCKER_TOKEN }} 48 | - 49 | name: Build and push 50 | uses: docker/build-push-action@v3 51 | with: 52 | context: github_actions 53 | push: true 54 | tags: ${{ steps.meta.outputs.tags }} 55 | ``` 56 | 57 | However, in order to automatically set a version, i.e. add a tag, for your 58 | Docker image [metadata-action](https://github.com/docker/metadata-action) needs 59 | to be used along with [Git 60 | tags](https://git-scm.com/book/en/v2/Git-Basics-Tagging). The idea is that the 61 | tagged Git commit is associated with the Docker image version. Therefore, when 62 | pushing to Github, include the tag (`git push origin tag`) or else the version 63 | tag will not be populated correctly. This works fine if the GitHub repository 64 | only contains one Docker image to build but not for this repository, which has 65 | multiple Dockerfiles and images to build. 66 | 67 | A less elegant solution was to simply run a command to get the version from a 68 | Dockerfile. The following workflow uses the setup of the Build and push Docker 69 | action but runs other commands to build, get the version, and push to Docker 70 | Hub. 71 | 72 | ``` 73 | name: Build RStudio Server and push to Docker Hub 74 | 75 | on: 76 | workflow_dispatch: 77 | push: 78 | branches: 79 | - 'main' 80 | paths: 81 | - 'rstudio/Dockerfile' 82 | - 'script/docker_build.sh' 83 | - '.github/workflows/build_rstudio.yml' 84 | 85 | jobs: 86 | docker: 87 | runs-on: ubuntu-latest 88 | steps: 89 | - 90 | name: Checkout 91 | uses: actions/checkout@v3 92 | - 93 | name: Set up QEMU 94 | uses: docker/setup-qemu-action@v2 95 | - 96 | name: Set up Docker Buildx 97 | uses: docker/setup-buildx-action@v2 98 | - 99 | name: Login to DockerHub 100 | uses: docker/login-action@v2 101 | with: 102 | username: ${{ secrets.DOCKER_USERNAME }} 103 | password: ${{ secrets.DOCKER_TOKEN }} 104 | - 105 | name: Build and push 106 | run: | 107 | script/docker_build.sh rstudio/Dockerfile davetang/rstudio 108 | ver=$(cat rstudio/Dockerfile | grep "^FROM" | cut -f2 -d':') 109 | docker push davetang/rstudio:${ver} 110 | ``` 111 | 112 | It's a bit of a hack but it works. 113 | -------------------------------------------------------------------------------- /gitlab/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | Set up your own GitLab server using Docker! The [GitLab Docker images](https://docs.gitlab.com/ee/install/docker.html) contain all the necessary services in a single container. Official Docker images are at and contain the GitLab Enterprise Edition image based on the Omnibus package. All configurations are done in the unique configuration file `/etc/gitlab/gitlab.rb` for containers using the official Omnibus GitLab package. 4 | 5 | First we'll pull the latest image. (Use a version tag instead of simply using latest.) 6 | 7 | ```bash 8 | docker pull gitlab/gitlab-ee:14.2.3-ee.0 9 | 10 | docker images gitlab/gitlab-ee:14.2.3-ee.0 11 | # REPOSITORY TAG IMAGE ID CREATED SIZE 12 | # gitlab/gitlab-ee 14.2.3-ee.0 9654fe42c8b2 9 days ago 2.43GB 13 | ``` 14 | 15 | Next we'll run a container in detached mode, expose some ports, and mount some volumes. (The container takes a couple of minutes to start up [especially the first time you run this step]; check `docker ps -a` and make sure the container does not say "health: starting" anymore.) 16 | 17 | ```bash 18 | export GITLAB_HOME=$HOME/gitlab 19 | 20 | docker run \ 21 | -detach \ 22 | --publish 4445:443 \ 23 | --publish 8889:80 \ 24 | --publish 7778:22 \ 25 | --name gitlab \ 26 | --volume $GITLAB_HOME/config:/etc/gitlab \ 27 | --volume $GITLAB_HOME/logs:/var/log/gitlab \ 28 | --volume $GITLAB_HOME/data:/var/opt/gitlab \ 29 | gitlab/gitlab-ee:14.2.3-ee.0 30 | ``` 31 | 32 | If all goes well you should be able to see the GitLab page at . 33 | 34 | ## Configuring 35 | 36 | To edit the configuration file use `docker exec` (after you have started the container). 37 | 38 | ```bash 39 | docker exec -it gitlab vi /etc/gitlab/gitlab.rb 40 | ``` 41 | 42 | For your changes to take effect, use `docker restart` to restart container (or run `gitlab-ctl reconfigure`). 43 | 44 | ```bash 45 | docker restart gitlab 46 | 47 | # or 48 | 49 | docker exec -it gitlab /bin/bash 50 | gitlab-ctl reconfigure 51 | exit 52 | ``` 53 | 54 | ## Add user 55 | 56 | To create your own account, visit `localhost:8889` and register a new acccount; it will say that the account is pending. Next log in with username `root` and the password from the following command. 57 | 58 | ```bash 59 | docker exec -it gitlab grep 'Password:' /etc/gitlab/initial_root_password 60 | ``` 61 | 62 | Click on `Menu` and then `Admin`. Then go to `Users` in the left menu bar and click on the `Pending approval` tab and approve the account you just registered. Once approved go to the `Active` tab, click on the new user's name, look for the `Edit` button near the top right and click on it, and change the access level to `Admin`. 63 | 64 | ## SSH key 65 | 66 | First create your key pair and save the public SSH key. In your `User Settings` click on `SSH Keys` and paste the public key. 67 | 68 | Next, add the following to `~/.ssh/config` on your local computer. 69 | 70 | ``` 71 | Host localhost 72 | HostName localhost 73 | User git 74 | IdentityFile /location/of/ssh_key 75 | Port 7778 76 | ``` 77 | 78 | Check to see if it works. 79 | 80 | ```bash 81 | ssh -T git@localhost 82 | # Welcome to GitLab, @davetang! 83 | ``` 84 | 85 | ## GitLab Runner 86 | 87 | The [GitLab Runner](https://docs.gitlab.com/runner/) is an application that works with GitLab CI/CD to run jobs in a pipeline. We can also [install the GitLab Runner](https://docs.gitlab.com/runner/install/docker.html) in a container. But first, we need to find the IP address of our GitLab container by running the following. 88 | 89 | ```bash 90 | docker network inspect bridge | grep "gitlab" -A 3 91 | "Name": "gitlab", 92 | "EndpointID": "d2e73801e30fbe79c054749a111e6d7de6a0b3b8badcca33ab027fb074b5e5ba", 93 | "MacAddress": "02:42:ac:11:00:05", 94 | "IPv4Address": "172.17.0.5/16", 95 | ``` 96 | 97 | The IP is `172.17.0.5`. Next set the following in `/etc/gitlab/gitlab.rb`: 98 | 99 | * `external_url 'http://172.17.0.5'`. 100 | * `pages_external_url "http://localhost/"` 101 | 102 | ```bash 103 | docker exec -it gitlab /bin/bash 104 | vi /etc/gitlab/gitlab.rb 105 | gitlab-ctl reconfigure 106 | exit 107 | ``` 108 | 109 | Now pull the `gitlab-runner` image and start a container. 110 | 111 | ```bash 112 | docker pull gitlab/gitlab-runner:ubuntu-v14.2.0 113 | 114 | docker run \ 115 | -d \ 116 | --name gitlab-runner \ 117 | -v /srv/gitlab-runner/config:/etc/gitlab-runner \ 118 | -v /var/run/docker.sock:/var/run/docker.sock \ 119 | gitlab/gitlab-runner:ubuntu-v14.2.0 120 | ``` 121 | 122 | Log into and go to the `Admin Area` and `Runners` and copy the registration token. When prompted: 123 | 124 | * Use the IP of the container: 172.17.0.5 125 | * Use the registration token from the Admin page of `localhost:8889` 126 | * Enter a description 127 | * Enter tags 128 | * Enter `docker` as your executor 129 | * Enter a default Docker image, such as `ruby:2.7` 130 | 131 | ```bash 132 | docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner:ubuntu-v14.2.0 register 133 | 134 | # Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded! 135 | ``` 136 | 137 | In the `Admin Area` and `Runners` section, you should now see the Runner you registered. However the hyperlink for the Runner is the IP address of our Docker container. To change the settings of the Runner, right-click and save link as. Then paste the link but change the IP address to `localhost:8889` (for example: http://localhost:8889/admin/runners/4). Make sure the `Run untagged jobs` is checked/ticked and then `Save changes`. 138 | 139 | ## Clone 140 | 141 | Create a new project from the GUI. 142 | 143 | ```bash 144 | git clone git@localhost:davetang/test_pages.git 145 | # Cloning into 'test_pages'... 146 | # remote: Enumerating objects: 116, done. 147 | # remote: Total 116 (delta 0), reused 0 (delta 0), pack-reused 116 148 | # Receiving objects: 100% (116/116), 1014.88 KiB | 59.70 MiB/s, done. 149 | # Resolving deltas: 100% (6/6), done. 150 | ``` 151 | 152 | ## GitLab Pages 153 | 154 | To create a new [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) create a new project; I will call my new project `pages-test`. Add a new HTML file with the following. 155 | 156 | ``` 157 | 158 | 159 | Home 160 | 161 | 162 |

It's working!

163 | 164 | 165 | ``` 166 | 167 | Add a new file called `.gitlab-ci.yml` with the following. 168 | 169 | ``` 170 | # This file is a template, and might need editing before it works on your project. 171 | # To contribute improvements to CI/CD templates, please follow the Development guide at: 172 | # https://docs.gitlab.com/ee/development/cicd/templates.html 173 | # This specific template is located at: 174 | # https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Pages/HTML.gitlab-ci.yml 175 | 176 | # Full project: https://gitlab.com/pages/plain-html 177 | pages: 178 | stage: deploy 179 | script: 180 | - mkdir .public 181 | - cp -r * .public 182 | - mv .public public 183 | artifacts: 184 | paths: 185 | - public 186 | rules: 187 | - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH 188 | 189 | ``` 190 | 191 | If you go to `CI/CD` and then `Pipelines`, hopefully the page successfully built and passed. Go to `Settings` and then `Pages` to find the URL of your page site. It should look something like `http://username.localhost/pages-test`. In this guide I have forwarded post `80` to `8889`, so you will have to visit to see the site. 192 | 193 | See [GitLab Pages administration](https://docs.gitlab.com/ee/administration/pages/index.html) for information on how to administer GitLab Pages. 194 | 195 | -------------------------------------------------------------------------------- /hla-la/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | MAINTAINER Dave Tang 4 | 5 | RUN apt-get clean all && \ 6 | apt-get update && \ 7 | apt-get upgrade -y && \ 8 | apt-get install -y \ 9 | autotools-dev \ 10 | build-essential \ 11 | cmake \ 12 | g++ \ 13 | git \ 14 | libbz2-dev \ 15 | libboost-all-dev \ 16 | libcurl4-gnutls-dev \ 17 | libhdf5-dev \ 18 | libicu-dev \ 19 | libjsoncpp-dev \ 20 | libpng-dev \ 21 | libssl-dev \ 22 | libxml2-dev \ 23 | openjdk-8-jre \ 24 | python-dev \ 25 | tree \ 26 | unzip \ 27 | vim \ 28 | wget \ 29 | zlib1g-dev \ 30 | && apt-get clean all && \ 31 | apt-get purge && \ 32 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 33 | 34 | # Boost 35 | RUN cd /tmp && \ 36 | wget https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.gz/download -O boost_1_72_0.tar.gz && \ 37 | tar -xzf boost_1_72_0.tar.gz && cd boost_1_72_0 && \ 38 | ./bootstrap.sh --prefix=/usr/ && \ 39 | ./b2 && ./b2 install 40 | 41 | # BamTools 42 | RUN cd /tmp && mkdir tool && \ 43 | git clone https://github.com/pezmaster31/bamtools && \ 44 | cd bamtools && mkdir build && cd build && \ 45 | cmake -DCMAKE_INSTALL_PREFIX=/tmp/tool/ -DBUILD_SHARED_LIBS=ON .. && \ 46 | make && make install && \ 47 | cd /tmp/tool/ && ln -s lib lib64 48 | 49 | # Miniconda and dependencies 50 | RUN cd /tmp/ && \ 51 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 52 | bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda3 && \ 53 | /root/miniconda3/condabin/conda install -y -c bioconda bwa samtools picard mummer minimap2 54 | ENV PATH=$PATH:/root/miniconda3/bin 55 | 56 | # Picard 57 | RUN cd /tmp/ && \ 58 | wget https://sourceforge.net/projects/picard/files/picard-tools/1.119/picard-tools-1.119.zip/download -O picard-tools-1.119.zip && \ 59 | unzip picard-tools-1.119.zip 60 | 61 | # HLA-LA 62 | RUN cd /tmp/ && \ 63 | mkdir HLA-LA HLA-LA/bin HLA-LA/src HLA-LA/obj HLA-LA/temp HLA-LA/working HLA-LA/graphs && \ 64 | cd HLA-LA/src && git clone https://github.com/DiltheyLab/HLA-LA.git . && \ 65 | make all BOOST_PATH=/usr/ BAMTOOLS_PATH=/tmp/tool/ 66 | ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/tool/lib64 67 | 68 | COPY paths.ini /tmp/HLA-LA/src/ 69 | 70 | -------------------------------------------------------------------------------- /hla-la/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Installing https://github.com/DiltheyLab/HLA-LA 4 | 5 | ## Installation 6 | 7 | Use Docker. Check out https://github.com/zlskidmore/docker-hla-la/blob/master/Dockerfile. 8 | 9 | docker build -f Dockerfile -t davetang/hla-la . 10 | docker run --rm -it hla-la /bin/bash 11 | 12 | Follow instructions from https://stackoverflow.com/questions/12578499/how-to-install-boost-on-ubuntu to install Boost (requires >= 1.59) 13 | 14 | cd /tmp 15 | wget https://sourceforge.net/projects/boost/files/boost/1.72.0/boost_1_72_0.tar.gz/download -O boost_1_72_0.tar.gz 16 | tar -xzf boost_1_72_0.tar.gz && cd boost_1_72_0 17 | ./bootstrap.sh --prefix=/usr/ 18 | ./b2 && ./b2 install 19 | 20 | `CMAKE_INSTALL_PREFIX` is the root of your final installation directory; see https://github.com/pezmaster31/bamtools/wiki/Building-and-installing; 21 | 22 | cd /tmp && mkdir tool 23 | git clone https://github.com/pezmaster31/bamtools 24 | cd bamtools 25 | mkdir build && cd build 26 | cmake -DCMAKE_INSTALL_PREFIX=/tmp/tool/ -DBUILD_SHARED_LIBS=ON .. 27 | make 28 | make install 29 | cd /tmp/tool/ && ln -s lib lib64 30 | 31 | Miniconda 32 | 33 | cd /tmp/ 34 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh 35 | bash Miniconda3-latest-Linux-x86_64.sh -b -p $HOME/miniconda3 36 | /root/miniconda3/condabin/conda install -y -c bioconda bwa samtools picard mummer minimap2 37 | 38 | Picard 39 | 40 | cd /tmp/ 41 | wget https://sourceforge.net/projects/picard/files/picard-tools/1.119/picard-tools-1.119.zip/download -O picard-tools-1.119.zip 42 | unzip picard-tools-1.119.zip 43 | 44 | HLA-LA 45 | 46 | cd /tmp/ 47 | mkdir HLA-LA HLA-LA/bin HLA-LA/src HLA-LA/obj HLA-LA/temp HLA-LA/working HLA-LA/graphs 48 | cd HLA-LA/src; git clone https://github.com/DiltheyLab/HLA-LA.git . 49 | make all BOOST_PATH=/usr/ BAMTOOLS_PATH=/tmp/tool/ 50 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/tool/lib64 51 | ../bin/HLA-LA --action testBinary 52 | 53 | Should see 54 | 55 | HLA*LA binary functional! 56 | 57 | ## Testing 58 | 59 | Download testing data and check integrity (md5sum): 60 | 61 | * PRG_MHC_GRCh38_withIMGT.tar.gz 525a8aa0c7f357bf29fe2c75ef1d477d 62 | * NA12878.mini.cram 45d1769ffed71418571c9a2414465a12 63 | 64 | Download tarball and CRAM file in `graphs` directory. 65 | 66 | cd /tmp/HLA-LA/graphs/ 67 | wget -c http://www.well.ox.ac.uk/downloads/PRG_MHC_GRCh38_withIMGT.tar.gz 68 | md5sum PRG_MHC_GRCh38_withIMGT.tar.gz 69 | tar -xzf PRG_MHC_GRCh38_withIMGT.tar.gz 70 | wget -c https://www.dropbox.com/s/xr99u3vqaimk4vo/NA12878.mini.cram?dl=0 -O NA12878.mini.cram 71 | 72 | Edit `paths.ini`. 73 | 74 | picard_sam2fastq_bin=/tmp/picard-tools-1.119/SamToFastq.jar 75 | samtools_bin=/root/miniconda3/bin/samtools 76 | bwa_bin=/root/miniconda3/bin/bwa 77 | nucmer_bin=/root/miniconda3/bin/nucmer 78 | dnadiff_bin=/root/miniconda3/bin/dnadiff 79 | minimap2_bin=/root/miniconda3/bin/minimap2 80 | workingDir=$HLA-LA-DIR/../working/ 81 | workingDir_HLA_ASM=$HLA-LA-DIR/output_HLA_ASM/ 82 | 83 | -------------------------------------------------------------------------------- /hla-la/paths.ini: -------------------------------------------------------------------------------- 1 | picard_sam2fastq_bin=/tmp/picard-tools-1.119/SamToFastq.jar 2 | samtools_bin=/root/miniconda3/bin/samtools 3 | bwa_bin=/root/miniconda3/bin/bwa 4 | nucmer_bin=/root/miniconda3/bin/nucmer 5 | dnadiff_bin=/root/miniconda3/bin/dnadiff 6 | minimap2_bin=/root/miniconda3/bin/minimap2 7 | workingDir=$HLA-LA-DIR/../working/ 8 | workingDir_HLA_ASM=$HLA-LA-DIR/output_HLA_ASM/ 9 | -------------------------------------------------------------------------------- /hugo/README.md: -------------------------------------------------------------------------------- 1 | ## Hugo 2 | 3 | For [Hugo](https://gohugo.io/) images, use . 4 | -------------------------------------------------------------------------------- /igv/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | [IGV Web App](https://github.com/igvteam/igv-webapp) as a [Docker image](https://hub.docker.com/r/dceoy/igv-webapp). 4 | 5 | ```bash 6 | docker pull dceoy/igv-webapp 7 | ``` 8 | 9 | Run Docker. 10 | 11 | ```bash 12 | docker run --rm \ 13 | -p 8080:8080 \ 14 | dceoy/igv-webapp 15 | ``` 16 | 17 | Now head to http://localhost:8080/ and that's it! 18 | 19 | -------------------------------------------------------------------------------- /igv/run_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | image=dceoy/igv-webapp 4 | 5 | docker run --rm \ 6 | --name=igv-webapp \ 7 | -d \ 8 | -p 8080:8080 \ 9 | $image 10 | 11 | -------------------------------------------------------------------------------- /mkdocs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10.1 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/mkdocs" 6 | 7 | COPY requirements.txt /tmp/requirements.txt 8 | 9 | RUN python -m pip install --upgrade pip && pip install -r /tmp/requirements.txt 10 | 11 | RUN rm -rf /tmp/* 12 | 13 | WORKDIR /work 14 | 15 | CMD ["mkdocs", "--version"] 16 | -------------------------------------------------------------------------------- /mkdocs/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Start new project. 4 | 5 | ```bash 6 | docker run \ 7 | --rm \ 8 | -u $(stat -c "%u:%g" README.md) \ 9 | -v $(pwd):/work \ 10 | davetang/mkdocs:0.0.1 \ 11 | mkdocs new test 12 | ``` 13 | 14 | Build. 15 | 16 | ```bash 17 | docker run \ 18 | --rm \ 19 | -u $(stat -c "%u:%g" README.md) \ 20 | -v $(pwd)/test:/work \ 21 | davetang/mkdocs:0.0.1 \ 22 | mkdocs build 23 | ``` 24 | 25 | Serve. 26 | 27 | ```bash 28 | ./mkdocs_serve.sh 29 | ``` 30 | 31 | -------------------------------------------------------------------------------- /mkdocs/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=0.0.3 6 | 7 | docker build -t davetang/mkdocs:${ver} . 8 | 9 | # docker login 10 | # docker push davetang/mkdocs:${ver} 11 | -------------------------------------------------------------------------------- /mkdocs/mkdocs_serve.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # set project/directory name 6 | proj=test 7 | 8 | ver=0.0.3 9 | docker_image=davetang/mkdocs:${ver} 10 | container_name=mkdocs_dtang 11 | port=5555 12 | 13 | dir=$(pwd)/${proj}/site 14 | if [[ ! -x $(command -v docker) ]]; then 15 | >&2 echo Could not find docker 16 | exit 1 17 | fi 18 | 19 | check_image=$(docker image inspect ${docker_image}) 20 | 21 | if [[ ! -d ${proj} ]]; then 22 | >&2 echo Creating ${proj} 23 | docker run \ 24 | --rm \ 25 | -u $(stat -c "%u:%g" README.md) \ 26 | -v $(pwd):/work \ 27 | ${docker_image} \ 28 | mkdocs new ${proj} 29 | fi 30 | 31 | if [[ ! -d ${proj}/site ]]; then 32 | >&2 echo Building ${proj} 33 | docker run \ 34 | --rm \ 35 | -u $(stat -c "%u:%g" README.md) \ 36 | -v $(pwd)/${proj}:/work \ 37 | ${docker_image} \ 38 | mkdocs build 39 | fi 40 | 41 | docker run \ 42 | --rm \ 43 | -d \ 44 | -p ${port}:${port} \ 45 | --name ${container_name} \ 46 | -v ${dir}:/work \ 47 | ${docker_image} \ 48 | python -m http.server ${port} 49 | 50 | >&2 echo ${container_name} listening on port $port 51 | >&2 echo Copy and paste http://localhost:$port into your browser 52 | >&2 echo To stop container run: docker stop ${container_name} 53 | >&2 echo Done 54 | 55 | exit 0 56 | -------------------------------------------------------------------------------- /mkdocs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Documentation static site generator & deployment tool 2 | mkdocs>=1.1.2 3 | 4 | # Add your custom theme if not inside a theme_dir 5 | # (https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes) 6 | # mkdocs-material>=5.4.0 7 | 8 | python-markdown-math 9 | pymdown-extensions 10 | markdown-exec[ansi] 11 | -------------------------------------------------------------------------------- /mkdocs_site/docs/index.md: -------------------------------------------------------------------------------- 1 | Learning Docker 2 | ================ 3 | 4 | ## Introduction 5 | 6 | Docker is an open source project that allows one to pack, ship, and run any application as a lightweight container. An analogy of Docker containers are shipping containers, which provide a standard and consistent way of shipping just about anything. The container includes everything that is needed for an application to run including the code, system tools, and the necessary dependencies. If you wanted to test an application, all you need to do is to download the Docker image and run it in a new container. No more compiling and installing missing dependencies! 7 | 8 | The [overview](https://docs.docker.com/get-started/overview/) at provides more information. For more a more hands-on approach, check out know [Enough Docker to be Dangerous](https://docs.docker.com/) and [this short workshop](https://davetang.github.io/reproducible_bioinformatics/docker.html) that I prepared for BioC Asia 2019. 9 | 10 | This README was generated from the R Markdown file `readme.Rmd`, which can executed via the `create_readme.sh` script. 11 | 12 | ## Installing the Docker Engine 13 | 14 | To get started, you will need to install the Docker Engine; check out [this guide](https://docs.docker.com/engine/install/). 15 | 16 | ## Checking your installation 17 | 18 | To see if everything is working, try to obtain the Docker version. 19 | 20 | ``` bash 21 | docker --version 22 | ``` 23 | 24 | ## Docker version 20.10.7, build f0df350 25 | 26 | And run the `hello-world` image. (The `--rm` parameter is used to automatically remove the container when it exits.) 27 | 28 | ``` bash 29 | docker run --rm hello-world 30 | ``` 31 | 32 | ## 33 | ## Hello from Docker! 34 | ## This message shows that your installation appears to be working correctly. 35 | ## 36 | ## To generate this message, Docker took the following steps: 37 | ## 1. The Docker client contacted the Docker daemon. 38 | ## 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 39 | ## (amd64) 40 | ## 3. The Docker daemon created a new container from that image which runs the 41 | ## executable that produces the output you are currently reading. 42 | ## 4. The Docker daemon streamed that output to the Docker client, which sent it 43 | ## to your terminal. 44 | ## 45 | ## To try something more ambitious, you can run an Ubuntu container with: 46 | ## $ docker run -it ubuntu bash 47 | ## 48 | ## Share images, automate workflows, and more with a free Docker ID: 49 | ## https://hub.docker.com/ 50 | ## 51 | ## For more examples and ideas, visit: 52 | ## https://docs.docker.com/engine/userguide/ 53 | 54 | ## Basics 55 | 56 | The two guides linked in the introduction section provide some information on the basic commands but I'll include some here as well. One of the main reasons I use Docker is for building tools. For this purpose, I use Docker like a virtual machine, where I can install whatever I want. This is important because I can do my testing in an isolated environment and not worry about affecting the main server. I like to use Ubuntu because it's a popular Linux distribution and therefore whenever I run into a problem, chances are higher that someone else has had the same problem, asked a question on a forum, and received a solution. 57 | 58 | Before we can run Ubuntu using Docker, we need an image. We can obtain an Ubuntu image from the [official Ubuntu image repository](https://hub.docker.com/_/ubuntu/) from Docker Hub by running `docker pull`. 59 | 60 | ``` bash 61 | docker pull ubuntu:18.04 62 | ``` 63 | 64 | ## 18.04: Pulling from library/ubuntu 65 | ## Digest: sha256:7bd7a9ca99f868bf69c4b6212f64f2af8e243f97ba13abb3e641e03a7ceb59e8 66 | ## Status: Image is up to date for ubuntu:18.04 67 | ## docker.io/library/ubuntu:18.04 68 | 69 | To run Ubuntu using Docker, we use `docker run`. 70 | 71 | ``` bash 72 | docker run --rm ubuntu:18.04 cat /etc/os-release 73 | ``` 74 | 75 | ## NAME="Ubuntu" 76 | ## VERSION="18.04.5 LTS (Bionic Beaver)" 77 | ## ID=ubuntu 78 | ## ID_LIKE=debian 79 | ## PRETTY_NAME="Ubuntu 18.04.5 LTS" 80 | ## VERSION_ID="18.04" 81 | ## HOME_URL="https://www.ubuntu.com/" 82 | ## SUPPORT_URL="https://help.ubuntu.com/" 83 | ## BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 84 | ## PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 85 | ## VERSION_CODENAME=bionic 86 | ## UBUNTU_CODENAME=bionic 87 | 88 | You can work interactively with the Ubuntu image by specifying the `-it` option. 89 | 90 | ``` bash 91 | docker run --rm -it ubuntu:18:04 /bin/bash 92 | ``` 93 | 94 | You may have noticed that I keep using the `--rm` option, which removes the container once you quit. If you don't use this option, the container is saved up until the point that you exit; all changes you made, files you created, etc. are saved. Why am I deleting all my changes? Because there is a better (and more reproducible) way to make changes to the system and that is by using a Dockerfile. 95 | 96 | ## Dockerfile 97 | 98 | A Dockerfile is a text file that contains instructions for building Docker images. A Dockerfile adheres to a specific format and set of instructions, which you can find at [Dockerfile reference](https://docs.docker.com/engine/reference/builder/). There is also a [Best practices guide](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) for writing Dockerfiles. 99 | 100 | I have an example Dockerfile that uses the Ubuntu 18.04 image to build [BWA](https://github.com/lh3/bwa), a popular short read alignment tool used in bioinformatics. 101 | 102 | ``` bash 103 | cat Dockerfile 104 | ``` 105 | 106 | ## FROM ubuntu:18.04 107 | ## 108 | ## MAINTAINER Dave Tang 109 | ## LABEL source="https://github.com/davetang/learning_docker/blob/master/Dockerfile" 110 | ## 111 | ## RUN apt-get clean all && \ 112 | ## apt-get update && \ 113 | ## apt-get upgrade -y && \ 114 | ## apt-get install -y \ 115 | ## build-essential \ 116 | ## wget \ 117 | ## zlib1g-dev && \ 118 | ## apt-get clean all && \ 119 | ## apt-get purge && \ 120 | ## rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 121 | ## 122 | ## RUN mkdir /src && \ 123 | ## cd /src && \ 124 | ## wget https://github.com/lh3/bwa/releases/download/v0.7.17/bwa-0.7.17.tar.bz2 && \ 125 | ## tar xjf bwa-0.7.17.tar.bz2 && \ 126 | ## cd bwa-0.7.17 && \ 127 | ## make && \ 128 | ## mv bwa /usr/local/bin && \ 129 | ## cd && rm -rf /src 130 | ## 131 | ## WORKDIR /work 132 | ## 133 | ## CMD ["bwa"] 134 | 135 | ## CMD 136 | 137 | The [CMD](https://docs.docker.com/engine/reference/builder/#cmd) instruction in a Dockerfile does not execute anything at build time but specifies the intended command for the image; there can only be one CMD instruction in a Dockerfile and if you list more than one CMD then only the last CMD will take effect. The main purpose of a CMD is to provide defaults for an executing container. 138 | 139 | ## ENTRYPOINT 140 | 141 | An [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) allows you to configure a container that will run as an executable. ENTRYPOINT has two forms: 142 | 143 | - ENTRYPOINT \["executable", "param1", "param2"\] (exec form, preferred) 144 | - ENTRYPOINT command param1 param2 (shell form) 145 | 146 | ``` bash 147 | FROM ubuntu 148 | ENTRYPOINT ["top", "-b"] 149 | CMD ["-c"] 150 | ``` 151 | 152 | Use `--entrypoint` to override ENTRYPOINT instruction. 153 | 154 | ``` bash 155 | docker run --entrypoint 156 | ``` 157 | 158 | ## Building an image 159 | 160 | Use the `build` subcommand to build Docker images and use the `-f` parameter if your Dockerfile is named as something else otherwise Docker will look for a file named `Dockerfile`. The period at the end, tells Docker to look in the current directory. 161 | 162 | ``` bash 163 | cat build.sh 164 | ``` 165 | 166 | ## #!/usr/bin/env bash 167 | ## 168 | ## set -euo pipefail 169 | ## 170 | ## ver=0.7.17 171 | ## 172 | ## docker build -t davetang/bwa:${ver} . 173 | 174 | You can push the built image to [Docker Hub](https://hub.docker.com/) if you have an account. I have used my Docker Hub account name to name my Docker image. 175 | 176 | ``` bash 177 | # use -f to specify the Dockerfile to use 178 | # the period indicates that the Dockerfile is in the current directory 179 | docker build -f Dockerfile.base -t davetang/base . 180 | 181 | # log into Docker Hub 182 | docker login 183 | 184 | # push to Docker Hub 185 | docker push davetang/base 186 | ``` 187 | 188 | ## Renaming an image 189 | 190 | Use `docker image tag`. 191 | 192 | ``` bash 193 | docker image tag old_image_name:latest new_image_name:latest 194 | ``` 195 | 196 | ## Running an image 197 | 198 | [Docker run documentation](https://docs.docker.com/engine/reference/run/). 199 | 200 | ``` bash 201 | docker run --rm davetang/bwa:0.7.17 202 | ``` 203 | 204 | ## 205 | ## Program: bwa (alignment via Burrows-Wheeler transformation) 206 | ## Version: 0.7.17-r1188 207 | ## Contact: Heng Li 208 | ## 209 | ## Usage: bwa [options] 210 | ## 211 | ## Command: index index sequences in the FASTA format 212 | ## mem BWA-MEM algorithm 213 | ## fastmap identify super-maximal exact matches 214 | ## pemerge merge overlapping paired ends (EXPERIMENTAL) 215 | ## aln gapped/ungapped alignment 216 | ## samse generate alignment (single ended) 217 | ## sampe generate alignment (paired ended) 218 | ## bwasw BWA-SW for long queries 219 | ## 220 | ## shm manage indices in shared memory 221 | ## fa2pac convert FASTA to PAC format 222 | ## pac2bwt generate BWT from PAC 223 | ## pac2bwtgen alternative algorithm for generating BWT 224 | ## bwtupdate update .bwt to the new format 225 | ## bwt2sa generate SA from BWT and Occ 226 | ## 227 | ## Note: To use BWA, you need to first index the genome with `bwa index'. 228 | ## There are three alignment algorithms in BWA: `mem', `bwasw', and 229 | ## `aln/samse/sampe'. If you are not sure which to use, try `bwa mem' 230 | ## first. Please `man ./bwa.1' for the manual. 231 | 232 | ## Resource usage 233 | 234 | To [restrict](https://docs.docker.com/config/containers/resource_constraints/) CPU usage use `--cpus=n` and use `--memory=` to restrict the maximum amount of memory the container can use. 235 | 236 | We can confirm the limited CPU usage by running an endless while loop and using `docker stats` to confirm the CPU usage. *Remember to use `docker stop` to stop the container after confirming the usage!* 237 | 238 | Restrict to 1 CPU. 239 | 240 | ``` bash 241 | # run in detached mode 242 | docker run --rm -d --cpus=1 davetang/bwa:0.7.17 perl -le 'while(1){ }' 243 | 244 | # check stats and use control+c to exit 245 | docker stats 246 | CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 247 | 8cc20bcfa4f4 vigorous_khorana 100.59% 572KiB / 1.941GiB 0.03% 736B / 0B 0B / 0B 1 248 | 249 | docker stop 8cc20bcfa4f4 250 | ``` 251 | 252 | Restrict to 1/2 CPU. 253 | 254 | ``` bash 255 | # run in detached mode 256 | docker run --rm -d --cpus=0.5 davetang/bwa:0.7.17 perl -le 'while(1){ }' 257 | 258 | # check stats and use control+c to exit 259 | docker stats 260 | 261 | CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 262 | af6e812a94da unruffled_liskov 50.49% 584KiB / 1.941GiB 0.03% 736B / 0B 0B / 0B 1 263 | 264 | docker stop af6e812a94da 265 | ``` 266 | 267 | ## Copying files between host and container 268 | 269 | Use `docker cp` but I recommend mounting a volume to a Docker container (see next section). 270 | 271 | ``` bash 272 | docker cp --help 273 | 274 | Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- 275 | docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH 276 | 277 | Copy files/folders between a container and the local filesystem 278 | 279 | Options: 280 | -L, --follow-link Always follow symbol link in SRC_PATH 281 | --help Print usage 282 | 283 | # find container name 284 | docker ps -a 285 | 286 | # create file to transfer 287 | echo hi > hi.txt 288 | 289 | docker cp hi.txt fee424ef6bf0:/root/ 290 | 291 | # start container 292 | docker start -ai fee424ef6bf0 293 | 294 | # inside container 295 | cat /root/hi.txt 296 | hi 297 | 298 | # create file inside container 299 | echo bye > /root/bye.txt 300 | exit 301 | 302 | # transfer file from container to host 303 | docker cp fee424ef6bf0:/root/bye.txt . 304 | 305 | cat bye.txt 306 | bye 307 | ``` 308 | 309 | ## Sharing between host and container 310 | 311 | Use the `-v` flag to mount directories to a container so that you can share files between the host and container. 312 | 313 | In the example below, I am mounting `data` from the current directory (using the Unix command `pwd`) to `/work` in the container. I am working from the root directory of this GitHub repository, which contains the `data` directory. 314 | 315 | ``` bash 316 | ls data 317 | ``` 318 | 319 | ## README.md 320 | ## chrI.fa.gz 321 | 322 | Any output written to `/work` inside the container, will be accessible inside `data` on the host. The command below will create BWA index files for `data/chrI.fa.gz`. 323 | 324 | ``` bash 325 | docker run --rm -v $(pwd)/data:/work davetang/bwa:0.7.17 bwa index chrI.fa.gz 326 | ``` 327 | 328 | ## [bwa_index] Pack FASTA... 0.21 sec 329 | ## [bwa_index] Construct BWT for the packed sequence... 330 | ## [bwa_index] 10.72 seconds elapse. 331 | ## [bwa_index] Update BWT... 0.09 sec 332 | ## [bwa_index] Pack forward-only FASTA... 0.14 sec 333 | ## [bwa_index] Construct SA from BWT and Occ... 3.92 sec 334 | ## [main] Version: 0.7.17-r1188 335 | ## [main] CMD: bwa index chrI.fa.gz 336 | ## [main] Real time: 15.589 sec; CPU: 15.124 sec 337 | 338 | We can see the newly created index files. 339 | 340 | ``` bash 341 | ls -lrt data 342 | ``` 343 | 344 | ## total 63416 345 | ## -rw-r--r--@ 1 dtang staff 4772981 12 Sep 2015 chrI.fa.gz 346 | ## -rw-r--r-- 1 dtang staff 194 14 Aug 11:50 README.md 347 | ## -rw-r--r-- 1 dtang staff 15072516 14 Aug 21:56 chrI.fa.gz.bwt 348 | ## -rw-r--r-- 1 dtang staff 3768110 14 Aug 21:56 chrI.fa.gz.pac 349 | ## -rw-r--r-- 1 dtang staff 41 14 Aug 21:56 chrI.fa.gz.ann 350 | ## -rw-r--r-- 1 dtang staff 13 14 Aug 21:56 chrI.fa.gz.amb 351 | ## -rw-r--r-- 1 dtang staff 7536272 14 Aug 21:56 chrI.fa.gz.sa 352 | 353 | Remove the index files, since we no longer need them 354 | 355 | ``` bash 356 | rm data/chrI.fa.gz.* 357 | ``` 358 | 359 | ### File permissions 360 | 361 | On newer version of Docker, you no longer have to worry about this. However, if you find that the file created inside your container on a mounted volume are owned by `root`, read on. 362 | 363 | The files created inside the Docker container will be owned by root; inside the Docker container, you are `root` and the files you produce will have `root` permissions. 364 | 365 | ``` bash 366 | ls -lrt 367 | total 2816 368 | -rw-r--r-- 1 1211 1211 1000015 Apr 27 02:00 ref.fa 369 | -rw-r--r-- 1 1211 1211 21478 Apr 27 02:00 l100_n100_d400_31_2.fq 370 | -rw-r--r-- 1 1211 1211 21478 Apr 27 02:00 l100_n100_d400_31_1.fq 371 | -rw-r--r-- 1 1211 1211 119 Apr 27 02:01 run.sh 372 | -rw-r--r-- 1 root root 1000072 Apr 27 02:03 ref.fa.bwt 373 | -rw-r--r-- 1 root root 250002 Apr 27 02:03 ref.fa.pac 374 | -rw-r--r-- 1 root root 40 Apr 27 02:03 ref.fa.ann 375 | -rw-r--r-- 1 root root 12 Apr 27 02:03 ref.fa.amb 376 | -rw-r--r-- 1 root root 500056 Apr 27 02:03 ref.fa.sa 377 | -rw-r--r-- 1 root root 56824 Apr 27 02:04 aln.sam 378 | ``` 379 | 380 | This is problematic because when you're back in the host environment, you can't modify these files. To circumvent this, create a user that matches the host user by passing three environmental variables from the host to the container. 381 | 382 | ``` bash 383 | docker run -it \ 384 | -v ~/my_data:/data \ 385 | -e MYUID=`id -u` \ 386 | -e MYGID=`id -g` \ 387 | -e ME=`whoami` \ 388 | bwa /bin/bash 389 | ``` 390 | 391 | Use the steps below to create an identical user inside the container. 392 | 393 | ``` bash 394 | adduser --quiet --home /home/san/$ME --no-create-home --gecos "" --shell /bin/bash --disabled-password $ME 395 | 396 | # optional: give yourself admin privileges 397 | echo "%$ME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers 398 | 399 | # update the IDs to those passed into Docker via environment variable 400 | sed -i -e "s/1000:1000/$MYUID:$MYGID/g" /etc/passwd 401 | sed -i -e "s/$ME:x:1000/$ME:x:$MYGID/" /etc/group 402 | 403 | # su - as the user 404 | exec su - $ME 405 | 406 | # run BWA again, after you have deleted the old files as root 407 | bwa index ref.fa 408 | bwa mem ref.fa l100_n100_d400_31_1.fq l100_n100_d400_31_2.fq > aln.sam 409 | 410 | # check output 411 | ls -lrt 412 | total 2816 413 | -rw-r--r-- 1 dtang dtang 1000015 Apr 27 02:00 ref.fa 414 | -rw-r--r-- 1 dtang dtang 21478 Apr 27 02:00 l100_n100_d400_31_2.fq 415 | -rw-r--r-- 1 dtang dtang 21478 Apr 27 02:00 l100_n100_d400_31_1.fq 416 | -rw-r--r-- 1 dtang dtang 119 Apr 27 02:01 run.sh 417 | -rw-rw-r-- 1 dtang dtang 1000072 Apr 27 02:12 ref.fa.bwt 418 | -rw-rw-r-- 1 dtang dtang 250002 Apr 27 02:12 ref.fa.pac 419 | -rw-rw-r-- 1 dtang dtang 40 Apr 27 02:12 ref.fa.ann 420 | -rw-rw-r-- 1 dtang dtang 12 Apr 27 02:12 ref.fa.amb 421 | -rw-rw-r-- 1 dtang dtang 500056 Apr 27 02:12 ref.fa.sa 422 | -rw-rw-r-- 1 dtang dtang 56824 Apr 27 02:12 aln.sam 423 | 424 | # exit container 425 | exit 426 | ``` 427 | 428 | The files will be saved in `~/my_data` on the host. 429 | 430 | ``` bash 431 | ls -lrt ~/my_data 432 | total 2816 433 | -rw-r--r-- 1 dtang dtang 1000015 Apr 27 10:00 ref.fa 434 | -rw-r--r-- 1 dtang dtang 21478 Apr 27 10:00 l100_n100_d400_31_2.fq 435 | -rw-r--r-- 1 dtang dtang 21478 Apr 27 10:00 l100_n100_d400_31_1.fq 436 | -rw-r--r-- 1 dtang dtang 119 Apr 27 10:01 run.sh 437 | -rw-rw-r-- 1 dtang dtang 1000072 Apr 27 10:12 ref.fa.bwt 438 | -rw-rw-r-- 1 dtang dtang 250002 Apr 27 10:12 ref.fa.pac 439 | -rw-rw-r-- 1 dtang dtang 40 Apr 27 10:12 ref.fa.ann 440 | -rw-rw-r-- 1 dtang dtang 12 Apr 27 10:12 ref.fa.amb 441 | -rw-rw-r-- 1 dtang dtang 500056 Apr 27 10:12 ref.fa.sa 442 | -rw-rw-r-- 1 dtang dtang 56824 Apr 27 10:12 aln.sam 443 | ``` 444 | 445 | ### File Permissions 2 446 | 447 | An easier way to set file permissions is to use the `-u` parameter. 448 | 449 | ``` bash 450 | # assuming blah.fa exists in /local/data/ 451 | docker run -v /local/data:/data -u `stat -c "%u:%g" /local/data` bwa bwa index /data/blah.fa 452 | ``` 453 | 454 | ### Read only 455 | 456 | To mount a volume but with read-only permissions, append `:ro` at the end. 457 | 458 | ``` bash 459 | docker run --rm -v $(pwd):/work:ro davetang/bwa:0.7.17 touch test.txt 460 | ``` 461 | 462 | ## touch: cannot touch 'test.txt': Read-only file system 463 | 464 | ## Removing the image 465 | 466 | Use `docker rmi` to remove an image. You will need to remove any stopped containers first before you can remove an image. Use `docker ps -a` to find stopped containers and `docker rm` to remove these containers. 467 | 468 | Let's pull the `busybox` image. 469 | 470 | ``` bash 471 | docker pull busybox 472 | ``` 473 | 474 | ## Using default tag: latest 475 | ## latest: Pulling from library/busybox 476 | ## b71f96345d44: Pulling fs layer 477 | ## b71f96345d44: Download complete 478 | ## b71f96345d44: Pull complete 479 | ## Digest: sha256:0f354ec1728d9ff32edcd7d1b8bbdfc798277ad36120dc3dc683be44524c8b60 480 | ## Status: Downloaded newer image for busybox:latest 481 | ## docker.io/library/busybox:latest 482 | 483 | Check out `busybox`. 484 | 485 | ``` bash 486 | docker images busybox 487 | ``` 488 | 489 | ## REPOSITORY TAG IMAGE ID CREATED SIZE 490 | ## busybox latest 69593048aa3a 2 months ago 1.24MB 491 | 492 | Remove `busybox`. 493 | 494 | ``` bash 495 | docker rmi busybox 496 | ``` 497 | 498 | ## Untagged: busybox:latest 499 | ## Untagged: busybox@sha256:0f354ec1728d9ff32edcd7d1b8bbdfc798277ad36120dc3dc683be44524c8b60 500 | ## Deleted: sha256:69593048aa3acfee0f75f20b77acb549de2472063053f6730c4091b53f2dfb02 501 | ## Deleted: sha256:5b8c72934dfc08c7d2bd707e93197550f06c0751023dabb3a045b723c5e7b373 502 | 503 | ## Committing changes 504 | 505 | Generally, it is better to use a Dockerfile to manage your images in a documented and maintainable way but if you still want to [commit changes](https://docs.docker.com/engine/reference/commandline/commit/) to your container (like you would for Git), read on. 506 | 507 | When you log out of a container, the changes made are still stored; type `docker ps -a` to see all containers and the latest changes. Use `docker commit` to commit your changes. 508 | 509 | ``` bash 510 | docker ps -a 511 | 512 | # git style commit 513 | # -a, --author= Author (e.g., "John Hannibal Smith ") 514 | # -m, --message= Commit message 515 | docker commit -m 'Made change to blah' -a 'Dave Tang' 516 | 517 | # use docker history to check history 518 | docker history 519 | ``` 520 | 521 | ## Access running container 522 | 523 | To access a container that is already running, perhaps in the background (using detached mode: `docker run` with `-d`) use `docker ps` to find the name of the container and then use `docker exec`. 524 | 525 | In the example below, my container name is `rstudio_dtang`. 526 | 527 | ``` bash 528 | docker exec -it rstudio_dtang /bin/bash 529 | ``` 530 | 531 | ## Cleaning up exited containers 532 | 533 | I typically use the `--rm` flag with `docker run` so that containers are automatically removed after I exit them. However, if you don't use `--rm`, by default a container's file system persists even after the container exits. For example: 534 | 535 | ``` bash 536 | docker run hello-world 537 | ``` 538 | 539 | ## 540 | ## Hello from Docker! 541 | ## This message shows that your installation appears to be working correctly. 542 | ## 543 | ## To generate this message, Docker took the following steps: 544 | ## 1. The Docker client contacted the Docker daemon. 545 | ## 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 546 | ## (amd64) 547 | ## 3. The Docker daemon created a new container from that image which runs the 548 | ## executable that produces the output you are currently reading. 549 | ## 4. The Docker daemon streamed that output to the Docker client, which sent it 550 | ## to your terminal. 551 | ## 552 | ## To try something more ambitious, you can run an Ubuntu container with: 553 | ## $ docker run -it ubuntu bash 554 | ## 555 | ## Share images, automate workflows, and more with a free Docker ID: 556 | ## https://hub.docker.com/ 557 | ## 558 | ## For more examples and ideas, visit: 559 | ## https://docs.docker.com/engine/userguide/ 560 | 561 | Show all containers. 562 | 563 | ``` bash 564 | docker ps -a 565 | ``` 566 | 567 | ## CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 568 | ## 4f204ca3b3c2 hello-world "/hello" 1 second ago Exited (0) Less than a second ago keen_carver 569 | 570 | We can use a sub-shell to get all (`-a`) container IDs (`-q`) that have exited (`-f status=exited`) and then remove them (`docker rm -v`). 571 | 572 | ``` bash 573 | docker rm -v $(docker ps -a -q -f status=exited) 574 | ``` 575 | 576 | ## 4f204ca3b3c2 577 | 578 | Check to see if the container still exists. 579 | 580 | ``` bash 581 | docker ps -a 582 | ``` 583 | 584 | ## CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 585 | 586 | We can set this up as a Bash script so that we can easily remove exited containers. In the Bash script `-z` returns true if `$exited` is empty, i.e. no exited containers, so we will only run the command when `$exited` is not true. 587 | 588 | ``` bash 589 | cat clean_up_docker.sh 590 | ``` 591 | 592 | ## #!/usr/bin/env bash 593 | ## 594 | ## set -euo pipefail 595 | ## 596 | ## exited=`docker ps -a -q -f status=exited` 597 | ## 598 | ## if [[ ! -z ${exited} ]]; then 599 | ## docker rm -v $(docker ps -a -q -f status=exited) 600 | ## fi 601 | ## 602 | ## exit 0 603 | 604 | As I have mentioned, you can use the [--rm](https://docs.docker.com/engine/reference/run/#clean-up---rm) parameter to automatically clean up the container and remove the file system when the container exits. 605 | 606 | ``` bash 607 | docker run --rm hello-world 608 | ``` 609 | 610 | ## 611 | ## Hello from Docker! 612 | ## This message shows that your installation appears to be working correctly. 613 | ## 614 | ## To generate this message, Docker took the following steps: 615 | ## 1. The Docker client contacted the Docker daemon. 616 | ## 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 617 | ## (amd64) 618 | ## 3. The Docker daemon created a new container from that image which runs the 619 | ## executable that produces the output you are currently reading. 620 | ## 4. The Docker daemon streamed that output to the Docker client, which sent it 621 | ## to your terminal. 622 | ## 623 | ## To try something more ambitious, you can run an Ubuntu container with: 624 | ## $ docker run -it ubuntu bash 625 | ## 626 | ## Share images, automate workflows, and more with a free Docker ID: 627 | ## https://hub.docker.com/ 628 | ## 629 | ## For more examples and ideas, visit: 630 | ## https://docs.docker.com/engine/userguide/ 631 | 632 | No containers. 633 | 634 | ``` bash 635 | docker ps -a 636 | ``` 637 | 638 | ## CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 639 | 640 | ## Installing Perl modules 641 | 642 | Use `cpanminus`. 643 | 644 | ``` bash 645 | apt-get install -y cpanminus 646 | 647 | # install some Perl modules 648 | cpanm Archive::Extract Archive::Zip DBD::mysql 649 | ``` 650 | 651 | ## Creating a data container 652 | 653 | This [guide on working with Docker data volumes](https://www.digitalocean.com/community/tutorials/how-to-work-with-docker-data-volumes-on-ubuntu-14-04) provides a really nice introduction. Use `docker create` to create a data container; the `-v` indicates the directory for the data container; the `--name data_container` indicates the name of the data container; and `ubuntu` is the image to be used for the container. 654 | 655 | ``` bash 656 | docker create -v /tmp --name data_container ubuntu 657 | ``` 658 | 659 | If we run a new Ubuntu container with the `--volumes-from` flag, output written to the `/tmp` directory will be saved to the `/tmp` directory of the `data_container` container. 660 | 661 | ``` bash 662 | docker run -it --volumes-from data_container ubuntu /bin/bash 663 | ``` 664 | 665 | ## R 666 | 667 | Use images from [The Rocker Project](https://www.rocker-project.org/), for example `rocker/r-ver:4.1.0`. 668 | 669 | ``` bash 670 | docker run --rm rocker/r-ver:4.1.0 671 | ``` 672 | 673 | ## 674 | ## R version 4.1.0 (2021-05-18) -- "Camp Pontanezen" 675 | ## Copyright (C) 2021 The R Foundation for Statistical Computing 676 | ## Platform: x86_64-pc-linux-gnu (64-bit) 677 | ## 678 | ## R is free software and comes with ABSOLUTELY NO WARRANTY. 679 | ## You are welcome to redistribute it under certain conditions. 680 | ## Type 'license()' or 'licence()' for distribution details. 681 | ## 682 | ## R is a collaborative project with many contributors. 683 | ## Type 'contributors()' for more information and 684 | ## 'citation()' on how to cite R or R packages in publications. 685 | ## 686 | ## Type 'demo()' for some demos, 'help()' for on-line help, or 687 | ## 'help.start()' for an HTML browser interface to help. 688 | ## Type 'q()' to quit R. 689 | ## 690 | ## > 691 | 692 | ## Saving and transferring a Docker image 693 | 694 | You should just share the Dockerfile used to create your image but if you need another way to save and share an iamge, see [this post](http://stackoverflow.com/questions/23935141/how-to-copy-docker-images-from-one-host-to-another-without-via-repository) on Stack Overflow. 695 | 696 | ``` bash 697 | docker save -o 698 | docker load -i 699 | ``` 700 | 701 | Here's an example. 702 | 703 | ``` bash 704 | # save on Unix server 705 | docker save -o davebox.tar davebox 706 | 707 | # copy file to MacBook Pro 708 | scp davetang@192.168.0.31:/home/davetang/davebox.tar . 709 | 710 | docker load -i davebox.tar 711 | 93c22f563196: Loading layer [==================================================>] 134.6 MB/134.6 MB 712 | ... 713 | 714 | docker images 715 | REPOSITORY TAG IMAGE ID CREATED SIZE 716 | davebox latest d38f27446445 10 days ago 3.46 GB 717 | 718 | docker run davebox samtools 719 | 720 | Program: samtools (Tools for alignments in the SAM format) 721 | Version: 1.3 (using htslib 1.3) 722 | 723 | Usage: samtools [options] 724 | ... 725 | ``` 726 | 727 | ## Pushing to Docker Hub 728 | 729 | Create an account on [Docker Hub](https://hub.docker.com/); my account is `davetang`. Use `docker login` to login and use `docker push` to push to Docker Hub (run `docker tag` first if you didn't name your image in the format of `yourhubusername/newrepo`). 730 | 731 | ``` bash 732 | docker login 733 | 734 | # create repo on Docker Hub then tag your image 735 | docker tag bb38976d03cf yourhubusername/newrepo 736 | 737 | # push 738 | docker push yourhubusername/newrepo 739 | ``` 740 | 741 | ## Tips 742 | 743 | Tip from : each RUN, COPY, and ADD command in a Dockerfile generates another layer in the container thus increasing its size; use multi-line commands and clean up package manager caches to minimise image size: 744 | 745 | ``` bash 746 | RUN apt-get update \ 747 | && apt-get install -y \ 748 | autoconf \ 749 | automake \ 750 | gcc \ 751 | g++ \ 752 | python \ 753 | python-dev \ 754 | && apt-get clean all \ 755 | && rm -rf /var/lib/apt/lists/* 756 | ``` 757 | 758 | ## Useful links 759 | 760 | - [A quick introduction to Docker](http://blog.scottlowe.org/2014/03/11/a-quick-introduction-to-docker/) 761 | - [The BioDocker project](https://github.com/BioDocker/biodocker); check out their [Wiki](https://github.com/BioDocker/biodocker/wiki), which has a lot of useful information 762 | - [The impact of Docker containers on the performance of genomic pipelines](http://www.ncbi.nlm.nih.gov/pubmed/26421241) 763 | - [Learn enough Docker to be useful](https://towardsdatascience.com/learn-enough-docker-to-be-useful-b0b44222eef5) 764 | - [10 things to avoid in Docker containers](http://developers.redhat.com/blog/2016/02/24/10-things-to-avoid-in-docker-containers/) 765 | - The [Play with Docker classroom](https://training.play-with-docker.com/) brings you labs and tutorials that help you get hands-on experience using Docker 766 | - [Shifter](https://github.com/NERSC/shifter) enables container images for HPC 767 | - 768 | -------------------------------------------------------------------------------- /mkdocs_site/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Learning how to use Docker 2 | theme: readthedocs 3 | -------------------------------------------------------------------------------- /mysql/README.md: -------------------------------------------------------------------------------- 1 | ## MySQL 2 | 3 | Use https://hub.docker.com/_/mysql/?tab=description 4 | 5 | docker pull mysql:8 6 | 7 | Starting a MySQL instance; 8 | 9 | docker run -p 3306:3306 --name docker_mysql -v ~/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=password -d mysql:8 10 | docker port docker_mysql 3306 11 | 12 | where `--name` is the name you want to assign to your container and password is the password to be set for the MySQL root user. The -v `~/mysql:/var/lib/mysql` part of the command mounts `~/mysql` directory from the underlying host system as `/var/lib/mysql` inside the container, where MySQL by default will write its data files. 13 | 14 | You can get the IP address by using `docker network` (see https://docs.docker.com/network/network-tutorial-standalone/): 15 | 16 | docker network inspect bridge 17 | 18 | You can use the same image to connect: 19 | 20 | docker run -it --network bridge --rm mysql:8 mysql -h 172.17.0.2 -u root -p 21 | 22 | If you have the `mysql` client installed, you can use the client too: 23 | 24 | mysql -h 0.0.0.0 -u root -p 25 | 26 | -------------------------------------------------------------------------------- /r/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/r-ver:4.4.0 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/r" 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | autoconf \ 12 | build-essential \ 13 | default-jre \ 14 | gettext \ 15 | git-core \ 16 | libhdf5-dev \ 17 | libcurl4-gnutls-dev \ 18 | libssl-dev \ 19 | libxml2-dev \ 20 | libpng-dev \ 21 | libbz2-dev \ 22 | liblzma-dev \ 23 | libncurses-dev \ 24 | ncurses-term \ 25 | time \ 26 | unzip \ 27 | vim \ 28 | wget \ 29 | curl \ 30 | zlib1g-dev \ 31 | python3-pip \ 32 | pandoc \ 33 | && apt-get clean all && \ 34 | apt-get purge && \ 35 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 36 | 37 | RUN cmake_ver=3.23.1 && \ 38 | cd /tmp/ && \ 39 | wget https://github.com/Kitware/CMake/releases/download/v${cmake_ver}/cmake-${cmake_ver}.tar.gz && \ 40 | tar -zxf cmake-${cmake_ver}.tar.gz && \ 41 | cd cmake-${cmake_ver} && \ 42 | ./bootstrap && \ 43 | make && \ 44 | make install && \ 45 | rm -rf /tmp/* 46 | 47 | RUN Rscript -e "install.packages(c('rmarkdown', 'tidyverse', 'tidymodels'));" 48 | RUN pip install --upgrade pip && pip install mkdocs 49 | 50 | WORKDIR /work 51 | 52 | CMD ["R", "--version"] 53 | -------------------------------------------------------------------------------- /r/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | ![Build Dockerfile](https://github.com/davetang/learning_docker/actions/workflows/build_r.yml/badge.svg) 4 | 5 | [The Rocker Project](https://rocker-project.org/) contains Docker containers 6 | for the R environment, which are available on [Docker 7 | Hub](https://hub.docker.com/u/rocker). Use 8 | [rocker/r-ver](https://hub.docker.com/r/rocker/r-ver) for reproducible builds 9 | to fixed version of R. 10 | 11 | ```bash 12 | docker run --rm -it rocker/r-ver:4.2.2 /bin/bash 13 | ``` 14 | 15 | `r-ver:4.2.2` uses Ubuntu:22.04. 16 | 17 | ```bash 18 | cat /etc/os-release 19 | # PRETTY_NAME="Ubuntu 22.04.1 LTS" 20 | # NAME="Ubuntu" 21 | # VERSION_ID="22.04" 22 | # VERSION="22.04.1 LTS (Jammy Jellyfish)" 23 | # VERSION_CODENAME=jammy 24 | # ID=ubuntu 25 | # ID_LIKE=debian 26 | # HOME_URL="https://www.ubuntu.com/" 27 | # SUPPORT_URL="https://help.ubuntu.com/" 28 | # BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 29 | # PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 30 | # UBUNTU_CODENAME=jammy 31 | ``` 32 | 33 | The installed version of R (4.2.2) matches the Docker tag. 34 | 35 | ```bash 36 | R --version 37 | # R version 4.2.2 (2022-10-31) -- "Innocent and Trusting" 38 | # Copyright (C) 2022 The R Foundation for Statistical Computing 39 | # Platform: x86_64-pc-linux-gnu (64-bit) 40 | # 41 | # R is free software and comes with ABSOLUTELY NO WARRANTY. 42 | # You are welcome to redistribute it under the terms of the 43 | # GNU General Public License versions 2 or 3. 44 | # For more information about these matters see 45 | # https://www.gnu.org/licenses/. 46 | ``` 47 | -------------------------------------------------------------------------------- /r/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | ver=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 6 | image=r_build 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | 10 | >&2 echo Build complete 11 | >&2 echo -e "Run the following to push to Docker Hub:\n" 12 | >&2 echo docker login 13 | >&2 echo docker push davetang/${image}:${ver} 14 | 15 | -------------------------------------------------------------------------------- /readme.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Learning Docker" 3 | output: github_document 4 | --- 5 | 6 | ```{r setup, include=FALSE} 7 | Sys.setenv(PATH=paste0(Sys.getenv("PATH"), ":", getwd())) 8 | knitr::opts_chunk$set(echo = TRUE) 9 | knitr::opts_chunk$set(error = TRUE) 10 | ``` 11 | 12 | ## Introduction 13 | 14 | ![](https://github.com/davetang/learning_docker/actions/workflows/create_readme.yml/badge.svg) 15 | 16 | Docker is an open source project that allows one to pack, ship, and run any 17 | application as a lightweight container. An analogy of Docker containers are 18 | shipping containers, which provide a standard and consistent way of shipping 19 | just about anything. The container includes everything that is needed for an 20 | application to run including the code, system tools, and the necessary 21 | dependencies. If you wanted to test an application, all you need to do is to 22 | download the Docker image and run it in a new container. No more compiling and 23 | installing missing dependencies! 24 | 25 | The [overview](https://docs.docker.com/get-started/overview/) at 26 | https://docs.docker.com/ provides more information. For more a more hands-on 27 | approach, check out know [Enough Docker to be 28 | Dangerous](https://docs.docker.com/) and [this short 29 | workshop](https://davetang.github.io/reproducible_bioinformatics/docker.html) 30 | that I prepared for BioC Asia 2019. 31 | 32 | This README was generated by GitHub Actions using the R Markdown file 33 | `readme.Rmd`, which was executed via the `create_readme.sh` script. 34 | 35 | ## Installing the Docker Engine 36 | 37 | To get started, you will need to install the Docker Engine; check out [this 38 | guide](https://docs.docker.com/engine/install/). 39 | 40 | ## Checking your installation 41 | 42 | To see if everything is working, try to obtain the Docker version. 43 | 44 | ```{bash engine.opts='-l'} 45 | docker --version 46 | ``` 47 | 48 | And run the `hello-world` image. (The `--rm` parameter is used to automatically 49 | remove the container when it exits.) 50 | 51 | ```{bash engine.opts='-l'} 52 | docker run --rm hello-world 53 | ``` 54 | 55 | ## Docker information 56 | 57 | Get more version information. 58 | 59 | ```{bash engine.opts='-l'} 60 | docker version 61 | ``` 62 | 63 | Even more information. 64 | 65 | ```{bash engine.opts='-l'} 66 | docker info 67 | ``` 68 | 69 | ## Basics 70 | 71 | The two guides linked in the introduction section provide some information on 72 | the basic commands but I'll include some here as well. One of the main reasons 73 | I use Docker is for building tools. For this purpose, I use Docker like a 74 | virtual machine, where I can install whatever I want. This is important because 75 | I can do my testing in an isolated environment and not worry about affecting 76 | the main server. I like to use Ubuntu because it's a popular Linux distribution 77 | and therefore whenever I run into a problem, chances are higher that someone 78 | else has had the same problem, asked a question on a forum, and received a 79 | solution. 80 | 81 | Before we can run Ubuntu using Docker, we need an image. We can obtain an 82 | Ubuntu image from the [official Ubuntu image 83 | repository](https://hub.docker.com/_/ubuntu/) from Docker Hub by running 84 | `docker pull`. 85 | 86 | ```{bash engine.opts='-l'} 87 | docker pull ubuntu:18.04 88 | ``` 89 | 90 | To run Ubuntu using Docker, we use `docker run`. 91 | 92 | ```{bash engine.opts='-l'} 93 | docker run --rm ubuntu:18.04 cat /etc/os-release 94 | ``` 95 | 96 | You can work interactively with the Ubuntu image by specifying the `-it` 97 | option. 98 | 99 | ```console 100 | docker run --rm -it ubuntu:18:04 /bin/bash 101 | ``` 102 | 103 | You may have noticed that I keep using the `--rm` option, which removes the 104 | container once you quit. If you don't use this option, the container is saved 105 | up until the point that you exit; all changes you made, files you created, etc. 106 | are saved. Why am I deleting all my changes? Because there is a better (and 107 | more reproducible) way to make changes to the system and that is by using a 108 | Dockerfile. 109 | 110 | ## Start containers automatically 111 | 112 | When hosting a service using Docker (such as running [RStudio 113 | Server](https://davetang.org/muse/2021/04/24/running-rstudio-server-with-docker/https://davetang.org/muse/2021/04/24/running-rstudio-server-with-docker/)), 114 | it would be nice if the container automatically starts up again when the server 115 | (and Docker) restarts. If you use `--restart flag` with `docker run`, Docker 116 | will [restart your 117 | container](https://docs.docker.com/config/containers/start-containers-automatically/) 118 | when your container has exited or when Docker restarts. The value of the 119 | `--restart` flag can be the following: 120 | 121 | * `no` - do not automatically restart (default) 122 | * `on-failure[:max-retries]` - restarts if it exits due to an error (non-zero 123 | exit code) and the number of attempts is limited using the `max-retries` 124 | option 125 | * `always` - always restarts the container; if it is manually stopped, it is 126 | restarted only when the Docker daemon restarts (or when the container is 127 | manually restarted) 128 | * `unless-stopped` - similar to `always` but when the container is stopped, it 129 | is not restarted even after the Docker daemon restarts. 130 | 131 | ```console 132 | docker run -d \ 133 | --restart always \ 134 | -p 8888:8787 \ 135 | -e PASSWORD=password \ 136 | -e USERID=$(id -u) \ 137 | -e GROUPID=$(id -g) \ 138 | rocker/rstudio:4.1.2 139 | ``` 140 | 141 | ## Dockerfile 142 | 143 | A Dockerfile is a text file that contains instructions for building Docker 144 | images. A Dockerfile adheres to a specific format and set of instructions, 145 | which you can find at [Dockerfile 146 | reference](https://docs.docker.com/engine/reference/builder/). There is also a 147 | [Best practices 148 | guide](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/) 149 | for writing Dockerfiles. 150 | 151 | A Docker image is made up of different layers and they act like snapshots. Each 152 | layer, or intermediate image, is created each time an instruction in the 153 | Dockerfile is executed. Each layer is assigned a unique hash and are cached by 154 | default. This means that you do not need to rebuild a layer again from scratch 155 | if it has not changed. Keep this in mind when creating a Dockerfile. 156 | 157 | Some commonly used instructions include: 158 | 159 | * `FROM` - Specifies the parent or base image to use for building an image and 160 | must be the first command in the file. 161 | * `COPY` - Copies files from the current directory (of where the Dockerfile is) 162 | to the image filesystem. 163 | * `RUN` - Executes a command inside the image. 164 | * `ADD` - Adds new files or directories from a source or URL to the image 165 | filesystem. 166 | * `ENTRYPOINT` - Makes the container run like an executable. 167 | * `CMD` - The default command or parameter/s for the container and can be used 168 | with `ENTRYPOINT`. 169 | * `WORKDIR` - Sets the working directory for the image. Any `CMD`, `RUN`, 170 | `COPY`, or `ENTRYPOINT` instruction after the `WORKDIR` declaration will be 171 | executed in the context of the working directory. 172 | * `USER` - Changes the user 173 | 174 | I have an example Dockerfile that uses the Ubuntu 18.04 image to build 175 | [BWA](https://github.com/lh3/bwa), a popular short read alignment tool used in 176 | bioinformatics. 177 | 178 | ```{bash engine.opts='-l'} 179 | cat Dockerfile 180 | ``` 181 | 182 | ### ARG 183 | 184 | To define variables in your Dockerfile use `ARG name=value`. For example, you 185 | can use `ARG` to create a new variable that stores a version number of a 186 | program. When a new version of the program is released, you can simply change 187 | the `ARG` and re-build your Dockerfile. 188 | 189 | ``` 190 | ARG star_ver=2.7.10a 191 | RUN cd /usr/src && \ 192 | wget https://github.com/alexdobin/STAR/archive/refs/tags/${star_ver}.tar.gz && \ 193 | tar xzf ${star_ver}.tar.gz && \ 194 | rm ${star_ver}.tar.gz && \ 195 | cd STAR-${star_ver}/source && \ 196 | make STAR && \ 197 | cd /usr/local/bin && \ 198 | ln -s /usr/src/STAR-${star_ver}/source/STAR . 199 | ``` 200 | 201 | ### CMD 202 | 203 | The [CMD](https://docs.docker.com/engine/reference/builder/#cmd) instruction in 204 | a Dockerfile does not execute anything at build time but specifies the intended 205 | command for the image; there can only be one CMD instruction in a Dockerfile 206 | and if you list more than one CMD then only the last CMD will take effect. The 207 | main purpose of a CMD is to provide defaults for an executing container. 208 | 209 | ### COPY 210 | 211 | The [COPY](https://docs.docker.com/engine/reference/builder/#copy) instruction 212 | copies new files or directories from `` and adds them to the filesystem of 213 | the container at the path ``. It has two forms: 214 | 215 | ``` 216 | COPY [--chown=:] [--chmod=] ... 217 | COPY [--chown=:] [--chmod=] ["",... ""] 218 | ``` 219 | 220 | Note the `--chown` parameter, which can be used to set the ownership of the 221 | copied files/directories. If this is not specified, the default ownership is 222 | `root`, which can be a problem. 223 | 224 | For example in the RStudio Server 225 | [Dockerfile](https://github.com/davetang/learning_docker/blob/main/rstudio/Dockerfile), 226 | there are two `COPY` instructions that set the ownership to the `rstudio` user. 227 | 228 | ``` 229 | COPY --chown=rstudio:rstudio rstudio/rstudio-prefs.json /home/rstudio/.config/rstudio 230 | COPY --chown=rstudio:rstudio rstudio/.Rprofile /home/rstudio/ 231 | ``` 232 | 233 | The two files that are copied are config files and therefore need to be 234 | writable by `rstudio` if settings are changed in RStudio Server. 235 | 236 | Usually the root path of `` is set to the directory where the Dockerfile 237 | exists. The example above is different because the RStudio Server image is 238 | built by GitHub Actions, and the root path of `` is the GitHub repository. 239 | 240 | ### ENTRYPOINT 241 | 242 | An [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint) 243 | allows you to configure a container that will run as an executable. ENTRYPOINT 244 | has two forms: 245 | 246 | * ENTRYPOINT ["executable", "param1", "param2"] (exec form, preferred) 247 | * ENTRYPOINT command param1 param2 (shell form) 248 | 249 | ```console 250 | FROM ubuntu 251 | ENTRYPOINT ["top", "-b"] 252 | CMD ["-c"] 253 | ``` 254 | 255 | Use `--entrypoint` to override ENTRYPOINT instruction. 256 | 257 | ```console 258 | docker run --entrypoint 259 | ``` 260 | 261 | ## Building an image 262 | 263 | Use the `build` subcommand to build Docker images and use the `-f` parameter if 264 | your Dockerfile is named as something else otherwise Docker will look for a 265 | file named `Dockerfile`. The period at the end, tells Docker to look in the 266 | current directory. 267 | 268 | ```{bash engine.opts='-l'} 269 | cat build.sh 270 | ``` 271 | 272 | You can push the built image to [Docker Hub](https://hub.docker.com/) if you 273 | have an account. I have used my Docker Hub account name to name my Docker 274 | image. 275 | 276 | ```console 277 | # use -f to specify the Dockerfile to use 278 | # the period indicates that the Dockerfile is in the current directory 279 | docker build -f Dockerfile.base -t davetang/base . 280 | 281 | # log into Docker Hub 282 | docker login 283 | 284 | # push to Docker Hub 285 | docker push davetang/base 286 | ``` 287 | 288 | ## Renaming an image 289 | 290 | The `docker image tag` command will create a new tag, i.e. new image name, that 291 | refers to an old image. It is not quite renaming but can be considered renaming 292 | since you will have a new name for your image. 293 | 294 | The usage is: 295 | 296 | Usage: docker image tag SOURCE_IMAGE[:TAG] TARGET_IMAGE[:TAG] 297 | 298 | For example I have created a new tag for my RStudio Server image, so that I can 299 | easily push it to Quay.io. 300 | 301 | ```console 302 | docker image tag davetang/rstudio:4.2.2 quay.io/davetang31/rstudio:4.2.2 303 | ``` 304 | 305 | The original image `davetang/rstudio:4.2.2` still exists, which is why tagging 306 | is not quite renaming. 307 | 308 | ## Running an image 309 | 310 | [Docker run documentation](https://docs.docker.com/engine/reference/run/). 311 | 312 | ```{bash engine.opts='-l'} 313 | docker run --rm davetang/bwa:0.7.17 314 | ``` 315 | 316 | ## Setting environment variables 317 | 318 | Create a new environment variable (ENV) using `--env`. 319 | 320 | ```{bash engine.opts='-l'} 321 | docker run --rm --env YEAR=1984 busybox env 322 | ``` 323 | 324 | Two ENVs. 325 | 326 | ```{bash engine.opts='-l'} 327 | docker run --rm --env YEAR=1984 --env SEED=2049 busybox env 328 | ``` 329 | 330 | Or `-e` for less typing. 331 | 332 | ```{bash engine.opts='-l'} 333 | docker run --rm -e YEAR=1984 -e SEED=2049 busybox env 334 | ``` 335 | 336 | ## Resource usage 337 | 338 | To [restrict](https://docs.docker.com/config/containers/resource_constraints/) 339 | CPU usage use `--cpus=n` and use `--memory=` to restrict the maximum amount of 340 | memory the container can use. 341 | 342 | We can confirm the limited CPU usage by running an endless while loop and using 343 | `docker stats` to confirm the CPU usage. *Remember to use `docker stop` to stop 344 | the container after confirming the usage!* 345 | 346 | Restrict to 1 CPU. 347 | 348 | ```console 349 | # run in detached mode 350 | docker run --rm -d --cpus=1 davetang/bwa:0.7.17 perl -le 'while(1){ }' 351 | 352 | # check stats and use control+c to exit 353 | docker stats 354 | CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 355 | 8cc20bcfa4f4 vigorous_khorana 100.59% 572KiB / 1.941GiB 0.03% 736B / 0B 0B / 0B 1 356 | 357 | docker stop 8cc20bcfa4f4 358 | ``` 359 | 360 | Restrict to 1/2 CPU. 361 | 362 | ```console 363 | # run in detached mode 364 | docker run --rm -d --cpus=0.5 davetang/bwa:0.7.17 perl -le 'while(1){ }' 365 | 366 | # check stats and use control+c to exit 367 | docker stats 368 | 369 | CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O PIDS 370 | af6e812a94da unruffled_liskov 50.49% 584KiB / 1.941GiB 0.03% 736B / 0B 0B / 0B 1 371 | 372 | docker stop af6e812a94da 373 | ``` 374 | 375 | ## Copying files between host and container 376 | 377 | Use `docker cp` but I recommend mounting a volume to a Docker container (see 378 | next section). 379 | 380 | ```console 381 | docker cp --help 382 | 383 | Usage: docker cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- 384 | docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH 385 | 386 | Copy files/folders between a container and the local filesystem 387 | 388 | Options: 389 | -L, --follow-link Always follow symbol link in SRC_PATH 390 | --help Print usage 391 | 392 | # find container name 393 | docker ps -a 394 | 395 | # create file to transfer 396 | echo hi > hi.txt 397 | 398 | docker cp hi.txt fee424ef6bf0:/root/ 399 | 400 | # start container 401 | docker start -ai fee424ef6bf0 402 | 403 | # inside container 404 | cat /root/hi.txt 405 | hi 406 | 407 | # create file inside container 408 | echo bye > /root/bye.txt 409 | exit 410 | 411 | # transfer file from container to host 412 | docker cp fee424ef6bf0:/root/bye.txt . 413 | 414 | cat bye.txt 415 | bye 416 | ``` 417 | 418 | ## Sharing between host and container 419 | 420 | Use the `-v` flag to mount directories to a container so that you can share 421 | files between the host and container. 422 | 423 | In the example below, I am mounting `data` from the current directory (using 424 | the Unix command `pwd`) to `/work` in the container. I am working from the root 425 | directory of this GitHub repository, which contains the `data` directory. 426 | 427 | ```{bash engine.opts='-l'} 428 | ls data 429 | ``` 430 | 431 | Any output written to `/work` inside the container, will be accessible inside 432 | `data` on the host. The command below will create BWA index files for 433 | `data/chrI.fa.gz`. 434 | 435 | ```{bash engine.opts='-l'} 436 | docker run --rm -v $(pwd)/data:/work davetang/bwa:0.7.17 bwa index chrI.fa.gz 437 | ``` 438 | 439 | We can see the newly created index files. 440 | 441 | ```{bash engine.opts='-l'} 442 | ls -lrt data 443 | ``` 444 | 445 | However note that the generated files are owned by `root`, which is slightly 446 | annoying because unless we have root access, we need to start a Docker 447 | container with the volume re-mounted to alter/delete the files. 448 | 449 | ### File permissions 450 | 451 | As seen above, files generated inside the container on a mounted volume are 452 | owned by `root`. This is because the default user inside a Docker container is 453 | `root`. In Linux, there is typically a `root` user with the UID and GID of 0; 454 | this user exists in the host Linux environment (where the Docker engine is 455 | running) as well as inside the Docker container. 456 | 457 | In the example below, the mounted volume is owned by UID 1211 and GID 1211 (in 458 | the host environment). This UID and GID does not exist in the Docker container, 459 | thus the UID and GID are shown instead of a name like `root`. This is important 460 | to understand because to circumvent this file permission issue, we need to 461 | create a user that matches the UID and GID in the host environment. 462 | 463 | ```console 464 | ls -lrt 465 | # total 2816 466 | # -rw-r--r-- 1 1211 1211 1000015 Apr 27 02:00 ref.fa 467 | # -rw-r--r-- 1 1211 1211 21478 Apr 27 02:00 l100_n100_d400_31_2.fq 468 | # -rw-r--r-- 1 1211 1211 21478 Apr 27 02:00 l100_n100_d400_31_1.fq 469 | # -rw-r--r-- 1 1211 1211 119 Apr 27 02:01 run.sh 470 | # -rw-r--r-- 1 root root 1000072 Apr 27 02:03 ref.fa.bwt 471 | # -rw-r--r-- 1 root root 250002 Apr 27 02:03 ref.fa.pac 472 | # -rw-r--r-- 1 root root 40 Apr 27 02:03 ref.fa.ann 473 | # -rw-r--r-- 1 root root 12 Apr 27 02:03 ref.fa.amb 474 | # -rw-r--r-- 1 root root 500056 Apr 27 02:03 ref.fa.sa 475 | # -rw-r--r-- 1 root root 56824 Apr 27 02:04 aln.sam 476 | ``` 477 | 478 | As mentioned already, having `root` ownership is problematic because when we 479 | are back in the host environment, we can't modify these files. To circumvent 480 | this, we can create a user that matches the host user by passing three 481 | environmental variables from the host to the container. 482 | 483 | ```console 484 | docker run -it \ 485 | -v ~/my_data:/data \ 486 | -e MYUID=$(id -u) \ 487 | -e MYGID=$(id -g) \ 488 | -e ME=$(whoami) \ 489 | bwa /bin/bash 490 | ``` 491 | 492 | We use the environment variables and the following steps to create an identical 493 | user inside the container. 494 | 495 | ```console 496 | adduser --quiet --home /home/san/$ME --no-create-home --gecos "" --shell /bin/bash --disabled-password $ME 497 | 498 | # optional: give yourself admin privileges 499 | echo "%$ME ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers 500 | 501 | # update the IDs to those passed into Docker via environment variable 502 | sed -i -e "s/1000:1000/$MYUID:$MYGID/g" /etc/passwd 503 | sed -i -e "s/$ME:x:1000/$ME:x:$MYGID/" /etc/group 504 | 505 | # su - as the user 506 | exec su - $ME 507 | 508 | # run BWA again, after you have deleted the old files as root 509 | bwa index ref.fa 510 | bwa mem ref.fa l100_n100_d400_31_1.fq l100_n100_d400_31_2.fq > aln.sam 511 | 512 | # check output 513 | ls -lrt 514 | # total 2816 515 | # -rw-r--r-- 1 dtang dtang 1000015 Apr 27 02:00 ref.fa 516 | # -rw-r--r-- 1 dtang dtang 21478 Apr 27 02:00 l100_n100_d400_31_2.fq 517 | # -rw-r--r-- 1 dtang dtang 21478 Apr 27 02:00 l100_n100_d400_31_1.fq 518 | # -rw-r--r-- 1 dtang dtang 119 Apr 27 02:01 run.sh 519 | # -rw-rw-r-- 1 dtang dtang 1000072 Apr 27 02:12 ref.fa.bwt 520 | # -rw-rw-r-- 1 dtang dtang 250002 Apr 27 02:12 ref.fa.pac 521 | # -rw-rw-r-- 1 dtang dtang 40 Apr 27 02:12 ref.fa.ann 522 | # -rw-rw-r-- 1 dtang dtang 12 Apr 27 02:12 ref.fa.amb 523 | # -rw-rw-r-- 1 dtang dtang 500056 Apr 27 02:12 ref.fa.sa 524 | # -rw-rw-r-- 1 dtang dtang 56824 Apr 27 02:12 aln.sam 525 | 526 | # exit container 527 | exit 528 | ``` 529 | 530 | This time when you check the file permissions in the host environment, they 531 | should match your username. 532 | 533 | ```console 534 | ls -lrt ~/my_data 535 | # total 2816 536 | # -rw-r--r-- 1 dtang dtang 1000015 Apr 27 10:00 ref.fa 537 | # -rw-r--r-- 1 dtang dtang 21478 Apr 27 10:00 l100_n100_d400_31_2.fq 538 | # -rw-r--r-- 1 dtang dtang 21478 Apr 27 10:00 l100_n100_d400_31_1.fq 539 | # -rw-r--r-- 1 dtang dtang 119 Apr 27 10:01 run.sh 540 | # -rw-rw-r-- 1 dtang dtang 1000072 Apr 27 10:12 ref.fa.bwt 541 | # -rw-rw-r-- 1 dtang dtang 250002 Apr 27 10:12 ref.fa.pac 542 | # -rw-rw-r-- 1 dtang dtang 40 Apr 27 10:12 ref.fa.ann 543 | # -rw-rw-r-- 1 dtang dtang 12 Apr 27 10:12 ref.fa.amb 544 | # -rw-rw-r-- 1 dtang dtang 500056 Apr 27 10:12 ref.fa.sa 545 | # -rw-rw-r-- 1 dtang dtang 56824 Apr 27 10:12 aln.sam 546 | ``` 547 | 548 | ### File Permissions 2 549 | 550 | There is a `-u` or `--user` parameter that can be used with `docker run` to run 551 | a container using a specific user. This is easier than creating a new user. 552 | 553 | In this example we run the `touch` command as `root`. 554 | 555 | ```{bash engine.opts='-l'} 556 | docker run -v $(pwd):/$(pwd) ubuntu:22.10 touch $(pwd)/test_root.txt 557 | ls -lrt $(pwd)/test_root.txt 558 | ``` 559 | 560 | In this example, we run the command as a user with the same UID and GID; the 561 | `stat` command is used to get the UID and GID. 562 | 563 | ```{bash engine.opts='-l'} 564 | docker run -v $(pwd):/$(pwd) -u $(stat -c "%u:%g" $HOME) ubuntu:22.10 touch $(pwd)/test_mine.txt 565 | ls -lrt $(pwd)/test_mine.txt 566 | ``` 567 | 568 | One issue with this method is that you may encounter the following warning (if 569 | running interactively): 570 | 571 | ``` 572 | groups: cannot find name for group ID 1000 573 | I have no name!@ed9e8b6b7622:/$ 574 | ``` 575 | 576 | This is because the user in your host environment does not exist in the 577 | container environment. As far as I am aware, this is not a problem; we just 578 | want to create files/directories with matching user and group IDs. 579 | 580 | ### Read only 581 | 582 | To mount a volume but with read-only permissions, append `:ro` at the end. 583 | 584 | ```{bash engine.opts='-l'} 585 | docker run --rm -v $(pwd):/work:ro davetang/bwa:0.7.17 touch test.txt 586 | ``` 587 | 588 | ## Removing the image 589 | 590 | Use `docker rmi` to remove an image. You will need to remove any stopped 591 | containers first before you can remove an image. Use `docker ps -a` to find 592 | stopped containers and `docker rm` to remove these containers. 593 | 594 | Let's pull the `busybox` image. 595 | 596 | ```{bash engine.opts='-l'} 597 | docker pull busybox 598 | ``` 599 | 600 | Check out `busybox`. 601 | 602 | ```{bash engine.opts='-l'} 603 | docker images busybox 604 | ``` 605 | 606 | Remove `busybox`. 607 | 608 | ```{bash engine.opts='-l'} 609 | docker rmi busybox 610 | ``` 611 | 612 | ## Committing changes 613 | 614 | Generally, it is better to use a Dockerfile to manage your images in a 615 | documented and maintainable way but if you still want to [commit 616 | changes](https://docs.docker.com/engine/reference/commandline/commit/) to your 617 | container (like you would for Git), read on. 618 | 619 | When you log out of a container, the changes made are still stored; type 620 | `docker ps -a` to see all containers and the latest changes. Use `docker 621 | commit` to commit your changes. 622 | 623 | ```console 624 | docker ps -a 625 | 626 | # git style commit 627 | # -a, --author= Author (e.g., "John Hannibal Smith ") 628 | # -m, --message= Commit message 629 | docker commit -m 'Made change to blah' -a 'Dave Tang' 630 | 631 | # use docker history to check history 632 | docker history 633 | ``` 634 | 635 | ## Access running container 636 | 637 | To access a container that is already running, perhaps in the background (using 638 | detached mode: `docker run` with `-d`) use `docker ps` to find the name of the 639 | container and then use `docker exec`. 640 | 641 | In the example below, my container name is `rstudio_dtang`. 642 | 643 | ```console 644 | docker exec -it rstudio_dtang /bin/bash 645 | ``` 646 | 647 | ## Cleaning up exited containers 648 | 649 | I typically use the `--rm` flag with `docker run` so that containers are 650 | automatically removed after I exit them. However, if you don't use `--rm`, by 651 | default a container's file system persists even after the container exits. For 652 | example: 653 | 654 | ```{bash engine.opts='-l'} 655 | docker run hello-world 656 | ``` 657 | 658 | Show all containers. 659 | 660 | ```{bash engine.opts='-l'} 661 | docker ps -a 662 | ``` 663 | 664 | We can use a sub-shell to get all (`-a`) container IDs (`-q`) that have exited 665 | (`-f status=exited`) and then remove them (`docker rm -v`). 666 | 667 | ```{bash engine.opts='-l'} 668 | docker rm -v $(docker ps -a -q -f status=exited) 669 | ``` 670 | 671 | Check to see if the container still exists. 672 | 673 | ```{bash engine.opts='-l'} 674 | docker ps -a 675 | ``` 676 | 677 | We can set this up as a Bash script so that we can easily remove exited 678 | containers. In the Bash script `-z` returns true if `$exited` is empty, i.e. no 679 | exited containers, so we will only run the command when `$exited` is not true. 680 | 681 | ```{bash engine.opts='-l'} 682 | cat clean_up_docker.sh 683 | ``` 684 | 685 | As I have mentioned, you can use the 686 | [--rm](https://docs.docker.com/engine/reference/run/#clean-up---rm) parameter 687 | to automatically clean up the container and remove the file system when the 688 | container exits. 689 | 690 | ```{bash engine.opts='-l'} 691 | docker run --rm hello-world 692 | ``` 693 | 694 | No containers. 695 | 696 | ```{bash engine.opts='-l'} 697 | docker ps -a 698 | ``` 699 | 700 | ## Installing Perl modules 701 | 702 | Use `cpanminus`. 703 | 704 | ```console 705 | apt-get install -y cpanminus 706 | 707 | # install some Perl modules 708 | cpanm Archive::Extract Archive::Zip DBD::mysql 709 | ``` 710 | 711 | ## Creating a data container 712 | 713 | This [guide on working with Docker data 714 | volumes](https://www.digitalocean.com/community/tutorials/how-to-work-with-docker-data-volumes-on-ubuntu-14-04) 715 | provides a really nice introduction. Use `docker create` to create a data 716 | container; the `-v` indicates the directory for the data container; the `--name 717 | data_container` indicates the name of the data container; and `ubuntu` is the 718 | image to be used for the container. 719 | 720 | ```console 721 | docker create -v /tmp --name data_container ubuntu 722 | ``` 723 | 724 | If we run a new Ubuntu container with the `--volumes-from` flag, output written 725 | to the `/tmp` directory will be saved to the `/tmp` directory of the 726 | `data_container` container. 727 | 728 | ```console 729 | docker run -it --volumes-from data_container ubuntu /bin/bash 730 | ``` 731 | 732 | ## R 733 | 734 | Use images from [The Rocker Project](https://www.rocker-project.org/), for 735 | example `rocker/r-ver:4.3.0`. 736 | 737 | ```{bash engine.opts='-l'} 738 | docker run --rm rocker/r-ver:4.3.0 739 | ``` 740 | 741 | ## Saving and transferring a Docker image 742 | 743 | You should just share the Dockerfile used to create your image but if you need 744 | another way to save and share an image, see [this 745 | post](http://stackoverflow.com/questions/23935141/how-to-copy-docker-images-from-one-host-to-another-without-via-repository) 746 | on Stack Overflow. 747 | 748 | ```console 749 | docker save -o 750 | docker load -i 751 | ``` 752 | 753 | Here's an example. 754 | 755 | ```console 756 | # save on Unix server 757 | docker save -o davebox.tar davebox 758 | 759 | # copy file to MacBook Pro 760 | scp davetang@192.168.0.31:/home/davetang/davebox.tar . 761 | 762 | docker load -i davebox.tar 763 | 93c22f563196: Loading layer [==================================================>] 134.6 MB/134.6 MB 764 | ... 765 | 766 | docker images 767 | REPOSITORY TAG IMAGE ID CREATED SIZE 768 | davebox latest d38f27446445 10 days ago 3.46 GB 769 | 770 | docker run davebox samtools 771 | 772 | Program: samtools (Tools for alignments in the SAM format) 773 | Version: 1.3 (using htslib 1.3) 774 | 775 | Usage: samtools [options] 776 | ... 777 | ``` 778 | 779 | ## Sharing your image 780 | 781 | ### Docker Hub 782 | 783 | Create an account on [Docker Hub](https://hub.docker.com/); my account is 784 | `davetang`. Use `docker login` to login and use `docker push` to push to Docker 785 | Hub (run `docker tag` first if you didn't name your image in the format of 786 | `yourhubusername/newrepo`). 787 | 788 | ```console 789 | docker login 790 | 791 | # create repo on Docker Hub then tag your image 792 | docker tag bb38976d03cf yourhubusername/newrepo 793 | 794 | # push 795 | docker push yourhubusername/newrepo 796 | ``` 797 | 798 | ### Quay.io 799 | 800 | Create an account on [Quay.io](https://quay.io/); you can use Quay.io for free as 801 | stated in their [plans](https://quay.io/plans/): 802 | 803 | > Can I use Quay for free? 804 | > Yes! We offer unlimited storage and serving of public repositories. We 805 | > strongly believe in the open source community and will do what we can to 806 | > help! 807 | 808 | Use `docker login` to [login](https://docs.quay.io/guides/login.html) and use 809 | the credentials you set up when you created an account on Quay.io. 810 | 811 | ```console 812 | docker login quay.io 813 | ``` 814 | 815 | Quay.io images are prefixed with `quay.io`, so I used `docker image tag` to create 816 | a new tag of my RStudio Server image. (Unfortunately, the username `davetang` 817 | was taken on RedHat [possibly by me a long time ago], so I have to use 818 | `davetang31` on Quay.io.) 819 | 820 | ```console 821 | docker image tag davetang/rstudio:4.2.2 quay.io/davetang31/rstudio:4.2.2 822 | ``` 823 | 824 | Push to Quay.io. 825 | 826 | ```console 827 | docker push quay.io/davetang31/rstudio:4.2.2 828 | ``` 829 | 830 | ### GitHub Actions 831 | 832 | [login-action](https://github.com/docker/login-action) is used to automatically 833 | login to [Docker Hub](https://github.com/docker/login-action#docker-hub) when 834 | using GitHub Actions. This allows images to be automatically built and pushed 835 | to Docker Hub. There is also support for 836 | [Quay.io](https://github.com/docker/login-action#quayio). 837 | 838 | ## Tips 839 | 840 | Tip from https://support.pawsey.org.au/documentation/display/US/Containers: 841 | each RUN, COPY, and ADD command in a Dockerfile generates another layer in the 842 | container thus increasing its size; use multi-line commands and clean up 843 | package manager caches to minimise image size: 844 | 845 | ```console 846 | RUN apt-get update \ 847 | && apt-get install -y \ 848 | autoconf \ 849 | automake \ 850 | gcc \ 851 | g++ \ 852 | python \ 853 | python-dev \ 854 | && apt-get clean all \ 855 | && rm -rf /var/lib/apt/lists/* 856 | ``` 857 | 858 | I have found it handy to mount my current directory to the same path inside a 859 | Docker container and to [set it as the working 860 | directory](https://docs.docker.com/engine/reference/commandline/run/#set-working-directory--w); 861 | the directory will be automatically created inside the container if it does not 862 | already exist. When the container starts up, I will conveniently be in my 863 | current directory. In the command below I have also added the `-u` option, 864 | which sets the user to `[:]`. 865 | 866 | ```console 867 | docker run --rm -it -u $(stat -c "%u:%g" ${HOME}) -v $(pwd):$(pwd) -w $(pwd) davetang/build:1.1 /bin/bash 868 | ``` 869 | 870 | If you do not want to preface `docker` with `sudo`, create a Unix group called 871 | `docker` and add users to it. On some Linux distributions, the system 872 | automatically creates this group when installing Docker Engine using a package 873 | manager. In that case, there is no need for you to manually create the group. 874 | Check `/etc/group` to see if the `docker` group exists. 875 | 876 | ```console 877 | cat /etc/group | grep docker 878 | ``` 879 | 880 | If the `docker` group does not exist, create the group: 881 | 882 | ```console 883 | sudo groupadd docker 884 | ``` 885 | 886 | Add users to the group. 887 | 888 | ```console 889 | sudo usermod -aG docker $USER 890 | ``` 891 | 892 | The user will need to log out and log back in, before the changes take effect. 893 | 894 | On Linux, Docker is installed in `/var/lib/docker`. 895 | 896 | ```console 897 | docker info -f '{{ .DockerRootDir }}' 898 | # /var/lib/docker 899 | ``` 900 | 901 | This may not be ideal depending on your partitioning. To change the default 902 | root directory update the daemon configuration file; the default location on 903 | Linux is `/etc/docker/daemon.json`. This file may not exist, so you need to 904 | create it. 905 | 906 | The example below makes `/home/docker` the Docker root directory; you can use 907 | any directory you want but just make sure it exists. 908 | 909 | ```console 910 | cat /etc/docker/daemon.json 911 | ``` 912 | ``` 913 | { 914 | "data-root": "/home/docker" 915 | } 916 | ``` 917 | 918 | Restart the Docker server (this will take a little time, since all the files 919 | will be copied to the new location) and then check the Docker root directory. 920 | 921 | ```console 922 | sudo systemctl restart docker 923 | docker info -f '{{ .DockerRootDir}}' 924 | ``` 925 | ``` 926 | /home/docker 927 | ``` 928 | 929 | Check out the new home! 930 | 931 | ```console 932 | sudo ls -1 /home/docker 933 | ``` 934 | ``` 935 | buildkit 936 | containers 937 | engine-id 938 | image 939 | network 940 | overlay2 941 | plugins 942 | runtimes 943 | swarm 944 | tmp 945 | volumes 946 | ``` 947 | 948 | Use `--progress=plain` to show container output, which is useful for debugging! 949 | 950 | ```console 951 | docker build --progress=plain -t davetang/scanpy:3.11 . 952 | ``` 953 | 954 | For Apple laptops using the the M[123] chips, use `--platform linux/amd64` if that's the architecture of the image. 955 | 956 | ``` 957 | docker run --rm --platform linux/amd64 -p 8787:8787 rocker/verse:4.4.1/ 958 | ``` 959 | 960 | ## Useful links 961 | 962 | * [Post installation steps](https://docs.docker.com/engine/install/linux-postinstall/) 963 | * [A quick introduction to 964 | Docker](http://blog.scottlowe.org/2014/03/11/a-quick-introduction-to-docker/) 965 | * [The BioDocker project](https://github.com/BioDocker/biodocker); check out 966 | their [Wiki](https://github.com/BioDocker/biodocker/wiki), which has a lot of 967 | useful information 968 | * [The impact of Docker containers on the performance of genomic 969 | pipelines](http://www.ncbi.nlm.nih.gov/pubmed/26421241) 970 | * [Learn enough Docker to be 971 | useful](https://towardsdatascience.com/learn-enough-docker-to-be-useful-b0b44222eef5) 972 | * [10 things to avoid in Docker 973 | containers](http://developers.redhat.com/blog/2016/02/24/10-things-to-avoid-in-docker-containers/) 974 | * The [Play with Docker classroom](https://training.play-with-docker.com/) 975 | brings you labs and tutorials that help you get hands-on experience using 976 | Docker 977 | * [Shifter](https://github.com/NERSC/shifter) enables container images for HPC 978 | * http://biocworkshops2019.bioconductor.org.s3-website-us-east-1.amazonaws.com/page/BioconductorOnContainers__Bioconductor_Containers_Workshop/ 979 | * Run the Docker daemon as a non-root user ([Rootless mode](https://docs.docker.com/engine/security/rootless/)) 980 | -------------------------------------------------------------------------------- /rstudio/.Rprofile: -------------------------------------------------------------------------------- 1 | .libPaths("/packages/") 2 | -------------------------------------------------------------------------------- /rstudio/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/rstudio:4.5.0 2 | 3 | LABEL source="https://github.com/davetang/learning_docker/blob/main/rstudio/Dockerfile" 4 | 5 | MAINTAINER Dave Tang 6 | 7 | ARG bioc_ver=3.21 8 | 9 | RUN apt-get clean all && \ 10 | apt-get update && \ 11 | apt-get upgrade -y && \ 12 | apt-get install -y \ 13 | git \ 14 | cmake \ 15 | libhdf5-dev \ 16 | libcurl4-openssl-dev \ 17 | libssl-dev \ 18 | libxml2-dev \ 19 | libpng-dev \ 20 | libxt-dev \ 21 | zlib1g-dev \ 22 | libbz2-dev \ 23 | liblzma-dev \ 24 | libglpk40 \ 25 | libgit2-dev \ 26 | libgsl-dev \ 27 | patch \ 28 | libmagick++-dev \ 29 | && apt-get clean all && \ 30 | apt-get purge && \ 31 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 32 | 33 | RUN Rscript -e "install.packages(c('rmarkdown', 'tidyverse', 'tidymodels', 'workflowr', 'BiocManager', 'quarto', 'crew', 'targets'));" 34 | RUN Rscript -e "BiocManager::install(version = '${bioc_ver}')" 35 | 36 | # the rstudio/ path is set for building with GitHub Actions 37 | COPY --chown=rstudio:rstudio rstudio/rstudio-prefs.json /home/rstudio/.config/rstudio 38 | COPY --chown=rstudio:rstudio rstudio/.Rprofile /home/rstudio/ 39 | 40 | WORKDIR /home/rstudio 41 | -------------------------------------------------------------------------------- /rstudio/README.md: -------------------------------------------------------------------------------- 1 | ## Running RStudio Server from Docker 2 | 3 | ![Build Dockerfile](https://github.com/davetang/learning_docker/actions/workflows/build_rstudio.yml/badge.svg) 4 | 5 | The [Rocker project](https://www.rocker-project.org/) provides various Docker images for the R environment. Here's one way of using the [RStudio Server image](https://hub.docker.com/r/rocker/rstudio/) to enable reproducibility. 6 | 7 | First use `docker` to pull the RStudio Server image; remember to specify a version to promote reproducibility. 8 | 9 | ```bash 10 | rstudio_image=rocker/rstudio:4.0.1 11 | docker pull $rstudio_image 12 | ``` 13 | 14 | Once you have successfully pulled the image, try running the command below. The output indicates the operating system used to build the image. 15 | 16 | ```bash 17 | docker run --rm -it $rstudio_image cat /etc/os-release 18 | NAME="Ubuntu" 19 | VERSION="20.04 LTS (Focal Fossa)" 20 | ID=ubuntu 21 | ID_LIKE=debian 22 | PRETTY_NAME="Ubuntu 20.04 LTS" 23 | VERSION_ID="20.04" 24 | HOME_URL="https://www.ubuntu.com/" 25 | SUPPORT_URL="https://help.ubuntu.com/" 26 | BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" 27 | PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" 28 | VERSION_CODENAME=focal 29 | UBUNTU_CODENAME=focal 30 | ``` 31 | 32 | For this example, I have created a `packages` directory for installing R packages into. We will use the `-v` parameter to share the `packages` directory; this directory will be accessible inside the container as `/packages`. 33 | 34 | ```bash 35 | docker run --rm \ 36 | -p 8888:8787 \ 37 | -v ~/github/learning_docker/rstudio/packages:/packages \ 38 | -e PASSWORD=password \ 39 | -e USERID=$(id -u) \ 40 | -e GROUPID=$(id -g) \ 41 | $rstudio_image 42 | ``` 43 | 44 | NOTE: for the Docker installation on my Linux box (Docker Engine - Community 19.03.11), I had a file persmission problem when using RStudio Server. This was because the `rstudio` user does not have permission to write to the mounted directory. This was [solved](https://github.com/rocker-org/rocker/issues/324#issuecomment-454715753) by setting `USERID` and `GROUPID` to the same ID's. 45 | 46 | If all went well, you can access the RStudio Server at http://localhost:8888/ via your favourite web browser. The username is `rstudio` and the password is `password`. 47 | 48 | Once logged in, we need to set `.libPaths()` to `/packages`, so that we can save installed packages and don't have to re-install everything again. 49 | 50 | ```r 51 | # check the default library paths 52 | .libPaths() 53 | [1] "/usr/local/lib/R/site-library" "/usr/local/lib/R/library" 54 | 55 | # add a new library path 56 | .libPaths(new = "/packages") 57 | 58 | # check to see if the new library path was added 59 | .libPaths() 60 | [1] "/packages" "/usr/local/lib/R/site-library" "/usr/local/lib/R/library" 61 | 62 | # install the pheatmap package 63 | install.packages("pheatmap") 64 | 65 | # load package 66 | library(pheatmap) 67 | 68 | # create an example heatmap 69 | pheatmap(as.matrix(iris[, -5])) 70 | ``` 71 | 72 | ![](iris.png) 73 | 74 | The next time you run RStudio Server, you just need to add the packages directory. 75 | 76 | ```r 77 | .libPaths(new = "/packages") 78 | library(pheatmap) 79 | ``` 80 | 81 | You can mount other volumes too, such as a notebooks directory, so that you can save your work. However, note that if you want to create R Markdown documents you will need to install additional packages, so make sure you have added `/packages` via `.libPaths()` first before installing the additional packages. 82 | 83 | ```bash 84 | docker run --rm \ 85 | -p 8888:8787 \ 86 | -v /Users/dtang/github/learning_docker/rstudio/packages:/packages \ 87 | -v /Users/dtang/github/learning_docker/rstudio/notebooks:/notebooks \ 88 | -v /Users/dtang/github/learning_docker/rstudio:/data \ 89 | -e PASSWORD=password \ 90 | $rstudio_image 91 | ``` 92 | 93 | When you're done use CONTROL+C to stop the container. 94 | 95 | ### System libraries 96 | 97 | Some packages will require libraries that are not installed on the default Debian installation. For example, the `Seurat` package will fail to install because it will be missing the `png` library. We can "log in" to the running container and install the missing libraries. First find out the container ID by running `docker ps`. 98 | 99 | ```bash 100 | docker ps 101 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 102 | 215d041c976b rocker/rstudio "/init" 58 minutes ago Up 58 minutes 0.0.0.0:8888->8787/tcp interesting_turing 103 | ``` 104 | 105 | Now we can "log in" to `215d041c976b`. 106 | 107 | ```bash 108 | docker exec -it 215d041c976b /bin/bash 109 | 110 | # once inside 111 | apt update 112 | apt install libpng-dev 113 | ``` 114 | 115 | You can create another Docker image from `rocker/rstudio` so that you don't have to do this manually each time. 116 | 117 | ### RStudio Server preferences 118 | 119 | I have some specific preferences for RStudio Server that are absolutely necessary, such as using Vim key bindings. These preferences are set via the `Tools` menu bar and then selecting `Global Options...`. Each time we start a new container, we will lose our preferences and I don't want to manually change them each time. Luckily, the settings are saved in a specific file, which we can use to save our settings; the `user-settings` file is stored in the location below: 120 | 121 | /home/rstudio/.rstudio/monitored/user-settings/user-settings 122 | 123 | In newer versions of RStudio Server, the settings are now saved in `rstudio-prefs.json` located in: 124 | 125 | /home/rstudio/.config/rstudio 126 | 127 | Once you have made all your settings, save this file back to your local computer and use it to rewrite the default file next time you start a new instance. For example: 128 | 129 | ``` 130 | # once you have the container running in the background, log into Docker container 131 | # I have mounted this directory to /data 132 | cp /data/user-settings /home/rstudio/.rstudio/monitored/user-settings/user-settings 133 | 134 | # for newer version of RStudio Server 135 | cp /data/rstudio-prefs.json /home/rstudio/.config/rstudio 136 | ``` 137 | 138 | Now you can have persistent RStudio Server preferences! 139 | 140 | ## Dockerfile 141 | 142 | I created a new Docker image (see `Dockerfile`) from the `rstudio` image to set `.libPaths("/packages/")`, to install the `png` library, and to copy over my `user-settings` file. I can now run this image instead of the base `rstudio` image so that new packages are installed in `packages` and my user settings are preserved. In addition, I installed the `rmarkdown` and `tidyverse` packages in this new image. 143 | 144 | ## Windows 145 | 146 | This works very nicely with Windows! Just make sure you have WSL 2, a Linux distro, and follow [this guide](https://docs.docker.com/docker-for-windows/wsl/). 147 | 148 | ```bash 149 | wsl -l -v 150 | # convert to WSL 2 if you were using WSL 1 151 | wsl --set-version Ubuntu-20.04 2 152 | 153 | ./run_docker.sh 154 | ``` 155 | 156 | Now open your favourite browser and head to localhost:8888! 157 | 158 | ## Configuration files 159 | 160 | [Managing R with .Rprofile, .Renviron, Rprofile.site, Renviron.site, rsession.conf, and repos.conf](https://support.posit.co/hc/en-us/articles/360047157094-Managing-R-with-Rprofile-Renviron-Rprofile-site-Renviron-site-rsession-conf-and-repos-conf). 161 | 162 | Upon startup, R and RStudio IDE look for a few different files you can use to control the behaviour of your R session, for example by setting options or environment variables. Below is a summary of how to control R options and environment variables on startup. 163 | 164 | * `.Rprofile` - sourced as R code. 165 | * `.Renviron` - set environment variables only. 166 | * `Rprofile.site` - sourced as R code. 167 | * `Renviron.site` - set environment variables only. 168 | * `rsession.conf` - only RStudio IDE settings, only single repository. 169 | * `repos.conf` - only for setting repositories. 170 | 171 | ### .Rprofile 172 | 173 | `.Rprofile` files are user-controllable files to set options and environment variables. `.Rprofile` files can be either at the user or project level. User level `.Rprofile` files live in the base of the user's home directory, and project level `.Rprofile` files live in the base of the project directory. 174 | 175 | R will source only one `.Rprofile` file. So if you have both a project specific `.Rprofile` file and a user `.Rprofile` file that you want to use, you explicitly source the user level `.Rprofile` at the top of your project level `.Rprofile` with `source("~/.Rprofile")`. 176 | 177 | `.Rprofile` files are sourced as regular R code, so setting environment variables must be done inside a `Sys.setenv(key = "value")` call. 178 | 179 | One easy way to edit your `.Rprofile` file is to use the `usethis::edit_r_profile()` function from within an R session. You can specify whether you want to edit the user or project level `.Rprofile`. 180 | 181 | ### .Renviron 182 | 183 | `.Renviron` is a user controllable file that can be used to create environment variables. This is especially useful to avoid including credentials like API keys inside R scripts. This file is written in a key-value format, so environment variables are created in the format: 184 | 185 | ``` 186 | Key1=value1 187 | Key2=value2 188 | ``` 189 | 190 | And then `Sys.getenv("Key1")` will return "value1" in an R session. 191 | 192 | Like with `.Rprofile`, `.Renviron` files can be at either the user or project level. If there is a project level `.Renviron`, the user level file **will not be sourced**. The {usethis} package includes a helper function for editing `.Renviron` files from an R session with `usethis::edit_r_environ()`. 193 | 194 | ### Rprofile.site and Renviron.site 195 | 196 | Both `.Rprofile` and `.Renviron` files have equivalents that apply server wide. `Rprofile.site` and `Renviron.site` (no leading dot) files are managed by RStudio Server admins, and are specific to a particular version of R. The most common settings for these files involve access to package repositories. For example, using the shared-baseline package management strategy is generally done from an `Rprofile.site`. 197 | 198 | Users can override settings in these files with their individual `.Rprofile` files. 199 | 200 | These files are set for each version of R and should be located in `R_HOME/etc/`. You can find `R_HOME` by running the command `R.home(component = "home")` in a session of that version of R. So, for example, if you find that `R_HOME` is `/opt/R/3.6.2/lib/R`, the `Rprofile.site` for R 3.6.2 would go in `/opt/R/3.6.2/lib/R/etc/Rprofile.site`. 201 | 202 | ### rsession.conf and repos.conf 203 | 204 | RStudio Server allows server admins to configure particular server-wide R package repositories via the `rsession.conf` and `repos.conf` files. Only one repository can be configured in `rsession.conf`. If multiple repositories are needed, `repos.conf` should be used. 205 | -------------------------------------------------------------------------------- /rstudio/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ver=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 4 | 5 | docker build -t davetang/rstudio:${ver} . 6 | 7 | # docker login 8 | # docker push davetang/rstudio:${ver} 9 | 10 | -------------------------------------------------------------------------------- /rstudio/iris.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davetang/learning_docker/8835c5a8f57f49d4bfd798dbf1d0d14c305cc8fa/rstudio/iris.png -------------------------------------------------------------------------------- /rstudio/notebooks/example.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Example" 3 | output: html_document 4 | --- 5 | 6 | ```{r setup, include=FALSE} 7 | knitr::opts_chunk$set(echo = TRUE) 8 | ``` 9 | 10 | ## Heatmap 11 | 12 | Load `/packages` directory. 13 | 14 | ```{r add_lib_path} 15 | .libPaths(new = "/packages") 16 | ``` 17 | 18 | Load library. 19 | 20 | ```{r load_lib} 21 | library(pheatmap) 22 | ``` 23 | 24 | Generate example heatmap. 25 | 26 | ```{r heatmap} 27 | pheatmap(as.matrix(iris[, -5])) 28 | ``` 29 | -------------------------------------------------------------------------------- /rstudio/notebooks/getting_started_with_keras.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Getting started with Keras" 3 | author: "Dave Tang" 4 | date: "`r Sys.Date()`" 5 | output: html_document 6 | --- 7 | 8 | ```{r setup, include=FALSE} 9 | knitr::opts_chunk$set(echo = TRUE) 10 | ``` 11 | 12 | Following this [basic image classification tutorial](https://tensorflow.rstudio.com/tutorials/beginners/basic-ml/tutorial_basic_classification/). 13 | 14 | Install if necessary. 15 | 16 | ```{r install_keras} 17 | if (!"keras" %in% installed.packages()){ 18 | install.packages("keras") 19 | } 20 | ``` 21 | 22 | Load `keras`. 23 | 24 | ```{r load_keras} 25 | library(keras) 26 | ``` 27 | 28 | Prepare data. 29 | 30 | ```{r prepare data} 31 | fashion_mnist <- dataset_fashion_mnist() 32 | 33 | c(train_images, train_labels) %<-% fashion_mnist$train 34 | c(test_images, test_labels) %<-% fashion_mnist$test 35 | ``` 36 | 37 | ```{r} 38 | class_names = c('T-shirt/top', 39 | 'Trouser', 40 | 'Pullover', 41 | 'Dress', 42 | 'Coat', 43 | 'Sandal', 44 | 'Shirt', 45 | 'Sneaker', 46 | 'Bag', 47 | 'Ankle boot') 48 | ``` 49 | 50 | 51 | ```{r} 52 | dim(train_images) 53 | ``` 54 | 55 | ```{r} 56 | library(tidyr) 57 | library(ggplot2) 58 | 59 | image_1 <- as.data.frame(train_images[1, , ]) 60 | colnames(image_1) <- seq_len(ncol(image_1)) 61 | image_1$y <- seq_len(nrow(image_1)) 62 | image_1 <- gather(image_1, "x", "value", -y) 63 | image_1$x <- as.integer(image_1$x) 64 | 65 | ggplot(image_1, aes(x = x, y = y, fill = value)) + 66 | geom_tile() + 67 | scale_fill_gradient(low = "white", high = "black", na.value = NA) + 68 | scale_y_reverse() + 69 | theme_minimal() + 70 | theme(panel.grid = element_blank()) + 71 | theme(aspect.ratio = 1) + 72 | xlab("") + 73 | ylab("") 74 | ``` 75 | 76 | ```{r} 77 | train_images <- train_images / 255 78 | test_images <- test_images / 255 79 | ``` 80 | 81 | ```{r} 82 | par(mfcol=c(5,5)) 83 | par(mar=c(0, 0, 1.5, 0), xaxs='i', yaxs='i') 84 | for (i in 1:25) { 85 | img <- train_images[i, , ] 86 | img <- t(apply(img, 2, rev)) 87 | image(1:28, 1:28, img, col = gray((0:255)/255), xaxt = 'n', yaxt = 'n', 88 | main = paste(class_names[train_labels[i] + 1])) 89 | } 90 | ``` 91 | 92 | ```{r} 93 | model <- keras_model_sequential() 94 | model %>% 95 | layer_flatten(input_shape = c(28, 28)) %>% 96 | layer_dense(units = 128, activation = 'relu') %>% 97 | layer_dense(units = 10, activation = 'softmax') 98 | ``` 99 | 100 | 101 | ```{r} 102 | model %>% compile( 103 | optimizer = 'adam', 104 | loss = 'sparse_categorical_crossentropy', 105 | metrics = c('accuracy') 106 | ) 107 | ``` 108 | 109 | ```{r} 110 | model %>% fit(train_images, train_labels, epochs = 5, verbose = 2) 111 | 112 | ``` 113 | 114 | ```{r} 115 | score <- model %>% evaluate(test_images, test_labels, verbose = 0) 116 | score 117 | ``` 118 | 119 | ```{r} 120 | predictions <- model %>% predict(test_images) 121 | predictions[1, ] 122 | which.max(predictions[1, ]) 123 | class_pred <- model %>% predict_classes(test_images) 124 | class_pred[1:20] 125 | test_labels[1] 126 | 127 | ``` 128 | 129 | ```{r} 130 | par(mfcol=c(5,5)) 131 | par(mar=c(0, 0, 1.5, 0), xaxs='i', yaxs='i') 132 | for (i in 1:25) { 133 | img <- test_images[i, , ] 134 | img <- t(apply(img, 2, rev)) 135 | # subtract 1 as labels go from 0 to 9 136 | predicted_label <- which.max(predictions[i, ]) - 1 137 | true_label <- test_labels[i] 138 | if (predicted_label == true_label) { 139 | color <- '#008800' 140 | } else { 141 | color <- '#bb0000' 142 | } 143 | image(1:28, 1:28, img, col = gray((0:255)/255), xaxt = 'n', yaxt = 'n', 144 | main = paste0(class_names[predicted_label + 1], " (", 145 | class_names[true_label + 1], ")"), 146 | col.main = color) 147 | } 148 | ``` 149 | 150 | ```{r} 151 | img <- test_images[1, , , drop = FALSE] 152 | dim(img) 153 | predictions <- model %>% predict(img) 154 | predictions 155 | prediction <- predictions[1, ] - 1 156 | which.max(prediction) 157 | class_pred <- model %>% predict_classes(img) 158 | class_pred 159 | ``` 160 | 161 | -------------------------------------------------------------------------------- /rstudio/rstudio-prefs.json: -------------------------------------------------------------------------------- 1 | { 2 | "save_workspace": "never", 3 | "always_save_history": false, 4 | "reuse_sessions_for_project_links": true, 5 | "posix_terminal_shell": "bash", 6 | "initial_working_directory": "~", 7 | "panes": { 8 | "quadrants": [ 9 | "Source", 10 | "TabSet1", 11 | "Console", 12 | "TabSet2" 13 | ], 14 | "tabSet1": [ 15 | "Environment", 16 | "History", 17 | "Connections", 18 | "Build", 19 | "VCS", 20 | "Tutorial", 21 | "Presentation" 22 | ], 23 | "tabSet2": [ 24 | "Files", 25 | "Plots", 26 | "Packages", 27 | "Help", 28 | "Viewer", 29 | "Presentations" 30 | ], 31 | "hiddenTabSet": [], 32 | "console_left_on_top": false, 33 | "console_right_on_top": true, 34 | "additional_source_columns": 0 35 | }, 36 | "editor_theme": "Clouds Midnight", 37 | "load_workspace": false, 38 | "insert_native_pipe_operator": true, 39 | "editor_keybindings": "vim" 40 | } 41 | -------------------------------------------------------------------------------- /rstudio/run_docker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | if [[ ! -x $(command -v docker) ]]; then 6 | >&2 echo Could not find docker 7 | exit 1 8 | fi 9 | 10 | usage() { 11 | >&2 echo "Usage: $0 [ -v rstudio_dtang version ] [ -p port ] < dirs_to_mount >" 12 | exit 1 13 | } 14 | 15 | while getopts ":v:p:" options; do 16 | case "${options}" in 17 | v) 18 | ver=${OPTARG} 19 | ;; 20 | p) 21 | port=${OPTARG} 22 | ;; 23 | :) 24 | echo "Error: -${OPTARG} requires an argument." 25 | exit 1 26 | ;; 27 | *) 28 | usage ;; 29 | esac 30 | done 31 | 32 | if [[ ${OPTIND} -lt 5 ]]; then 33 | usage 34 | fi 35 | 36 | rstudio_image=davetang/rstudio:${ver} 37 | check_image=$(docker image inspect ${rstudio_image}) 38 | container_name=rstudio_dtang 39 | 40 | to_mount= 41 | if [[ $#-4 -gt 0 ]]; then 42 | for ((i=0; i<$#-4; i++)); do 43 | d=${@:$OPTIND+$i:1} 44 | full_d=$(readlink -f ${d}) 45 | if [[ ! -d ${full_d} ]]; then 46 | >&2 echo Directory ${full_d} does not exist 47 | exit 1 48 | fi 49 | base_d=$(basename ${d}) 50 | to_mount+="-v ${full_d}:/data/${base_d} " 51 | >&2 echo ${full_d} will be mounted to /data/${base_d} 52 | done 53 | fi 54 | 55 | r_package_dir=${HOME}/r_${ver}_packages 56 | if [[ ! -d ${r_package_dir} ]]; then 57 | >&2 echo Creating ${r_package_dir} 58 | mkdir ${r_package_dir} 59 | fi 60 | 61 | docker run --rm \ 62 | -p $port:8787 \ 63 | -d \ 64 | --name $container_name \ 65 | -v ${r_package_dir}:/packages \ 66 | ${to_mount} \ 67 | -e PASSWORD=password \ 68 | -e USERID=$(id -u) \ 69 | -e GROUPID=$(id -g) \ 70 | $rstudio_image 71 | 72 | >&2 echo $container_name listening on port $port 73 | >&2 echo Copy and paste http://localhost:$port into your browser 74 | >&2 echo Username is rstudio and password is password 75 | >&2 echo To stop container run: docker stop ${container_name} 76 | >&2 echo Done 77 | 78 | -------------------------------------------------------------------------------- /rstudio/run_rstudio.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | RVER=4.5.0 6 | IMAGE=davetang/rstudio:${RVER} 7 | NAME=rstudio_server_${RVER} 8 | PORT=8889 9 | LIB=${HOME}/r_packages_${RVER} 10 | 11 | if [[ ! -d ${LIB} ]]; then 12 | mkdir ${LIB} 13 | fi 14 | 15 | docker run \ 16 | --name ${NAME} \ 17 | -d \ 18 | --restart always \ 19 | -p ${PORT}:8787 \ 20 | -v ${LIB}:/packages \ 21 | -v ${HOME}/github/:/home/rstudio/work \ 22 | -v ${HOME}/gitlab/:/home/rstudio/gitlab \ 23 | -v ${HOME}/analysis/:/analysis \ 24 | -v ${HOME}:/data \ 25 | -e PASSWORD=password \ 26 | -e USERID=$(id -u) \ 27 | -e GROUPID=$(id -g) \ 28 | ${IMAGE} 29 | 30 | >&2 echo ${NAME} listening on port ${PORT} 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /rstudio/user-settings: -------------------------------------------------------------------------------- 1 | alwaysSaveHistory="1" 2 | cleanTexi2DviOutput="1" 3 | cleanupAfterRCmdCheck="1" 4 | contextIdentifier="98F3F70F" 5 | cranMirrorChanged="1" 6 | cranMirrorCountry="" 7 | cranMirrorHost="RStudio" 8 | cranMirrorName="Global (CDN)" 9 | cranMirrorRepos="" 10 | cranMirrorUrl="https://cran.rstudio.com/" 11 | customShellCommand="" 12 | customShellOptions="" 13 | defaultTerminalShell="7" 14 | enableLaTeXShellEscape="0" 15 | errorHandlerType="1" 16 | hideObjectFiles="1" 17 | lineEndingConversion="2" 18 | loadRData="1" 19 | newlineInMakefiles="1" 20 | removeHistoryDuplicates="0" 21 | restoreLastProject="1" 22 | reuseSessionsForProjectLinks="1" 23 | rprofileOnResume="0" 24 | saveAction="-1" 25 | securePackageDownload="1" 26 | showLastDotValue="0" 27 | showUserHomePage="sessions" 28 | uiPrefs="{\n \"always_complete_characters\" : 3,\n \"always_complete_console\" : true,\n \"always_complete_delay\" : 250,\n \"always_enable_concordance\" : true,\n \"ansi_console_mode\" : 1,\n \"auto_append_newline\" : false,\n \"auto_detect_indentation\" : true,\n \"auto_discover_package_dependencies\" : true,\n \"auto_expand_error_tracebacks\" : false,\n \"auto_run_setup_chunk\" : true,\n \"background_diagnostics_delay_ms\" : 2000,\n \"blinking_cursor\" : true,\n \"busy_detection\" : 0,\n \"busy_whitelist\" : [\n \"tmux\",\n \"screen\"\n ],\n \"check_arguments_to_r_function_calls\" : false,\n \"clear_hidden\" : false,\n \"code_complete\" : \"always\",\n \"code_complete_other\" : \"always\",\n \"continue_comments_on_newline\" : false,\n \"default_encoding\" : \"\",\n \"default_latex_program\" : \"pdfLaTeX\",\n \"default_project_location\" : \"~\",\n \"default_sweave_engine\" : \"Sweave\",\n \"diagnostics_in_function_calls\" : true,\n \"diagnostics_on_save\" : true,\n \"doc_outline_show\" : \"show_sections_only\",\n \"enable_background_diagnostics\" : true,\n \"enable_emacs_keybindings\" : false,\n \"enable_rsconnect_publish_ui\" : true,\n \"enable_snippets\" : true,\n \"enable_style_diagnostics\" : false,\n \"enable_text_drag\" : true,\n \"execution_behavior\" : \"statement\",\n \"flat_theme\" : \"default\",\n \"focus_console_after_exec\" : false,\n \"fold_style\" : \"markbegin\",\n \"font_size_points\" : 10,\n \"git_diff_ignore_whitespace\" : false,\n \"handle_errors_in_user_code_only\" : true,\n \"hide_console_on_chunk_execute\" : true,\n \"highlight_code_chunks\" : true,\n \"highlight_r_function_calls\" : false,\n \"highlight_selected_line\" : false,\n \"highlight_selected_word\" : true,\n \"ignore_uppercase_words\" : true,\n \"ignore_words_with_numbers\" : true,\n \"insert_matching\" : true,\n \"insert_numbered_latex_sections\" : false,\n \"insert_parens_after_function_completion\" : true,\n \"insert_spaces_around_equals\" : true,\n \"knit_working_dir\" : \"default\",\n \"latex_preview_on_cursor_idle\" : \"always\",\n \"navigate_to_build_error\" : true,\n \"num_spaces_for_tab\" : 2,\n \"packages_pane_enabled\" : true,\n \"pane_config\" : {\n \"consoleLeftOnTop\" : false,\n \"consoleRightOnTop\" : true,\n \"panes\" : [\n \"Source\",\n \"TabSet1\",\n \"Console\",\n \"TabSet2\"\n ],\n \"tabSet1\" : [\n \"Environment\",\n \"History\",\n \"Connections\",\n \"Build\",\n \"VCS\",\n \"Presentation\"\n ],\n \"tabSet2\" : [\n \"Files\",\n \"Plots\",\n \"Packages\",\n \"Help\",\n \"Viewer\"\n ]\n },\n \"pdf_previewer\" : \"rstudio\",\n \"preferred_document_outline_width\" : 110,\n \"print_margin_column\" : 80,\n \"publish_ca_bundle\" : \"\",\n \"publish_check_certificates\" : true,\n \"reindent_on_paste\" : true,\n \"restore_source_document_cursor_position\" : true,\n \"restore_source_documents\" : true,\n \"rmd_chunk_output_inline\" : true,\n \"rmd_preferred_template_path\" : \"\",\n \"rmd_viewer_type\" : 0,\n \"root_document\" : \"\",\n \"rstheme\" : {\n \"isDark\" : true,\n \"name\" : \"Clouds Midnight\",\n \"url\" : \"theme/default/clouds_midnight.rstheme\"\n },\n \"save_before_sourcing\" : true,\n \"save_files_before_build\" : false,\n \"scroll_past_end_of_document\" : false,\n \"show_diagnostics_cpp\" : true,\n \"show_diagnostics_other\" : true,\n \"show_diagnostics_r\" : true,\n \"show_doc_outline_rmd\" : false,\n \"show_help_tooltip_on_idle\" : false,\n \"show_indent_guides\" : false,\n \"show_inline_toolbar_for_r_code_chunks\" : true,\n \"show_invisibles\" : false,\n \"show_line_numbers\" : true,\n \"show_margin\" : false,\n \"show_publish_diagnostics\" : false,\n \"show_publish_ui\" : true,\n \"show_rmd_render_command\" : false,\n \"show_signature_tooltips\" : true,\n \"show_terminal_tab\" : false,\n \"soft_wrap_r_files\" : false,\n \"source_with_echo\" : false,\n \"spelling_dictionary_language\" : \"en_US\",\n \"strip_trailing_whitespace\" : false,\n \"surround_selection\" : \"quotes_and_brackets\",\n \"syntax_color_console\" : false,\n \"tab_multiline_completion\" : false,\n \"terminal_autoclose\" : true,\n \"terminal_local_echo\" : true,\n \"terminal_track_env\" : true,\n \"terminal_websockets\" : true,\n \"theme\" : \"Clouds Midnight\",\n \"toolbar_visible\" : true,\n \"truncate_long_lines_in_console\" : 1000,\n \"use_dataimport\" : true,\n \"use_publish_ca_bundle\" : false,\n \"use_rcpp_template\" : true,\n \"use_roxygen\" : false,\n \"use_spaces_for_tab\" : true,\n \"use_vim_mode\" : true,\n \"valign_argument_indent\" : true,\n \"warn_if_no_such_variable_in_scope\" : false,\n \"warn_if_variable_defined_but_not_used\" : false,\n \"wrap_tab_navigation\" : false\n}" 29 | useDevtools="1" 30 | useInternet2="1" 31 | vcsEnabled="1" 32 | vcsGitExePath="" 33 | vcsSvnExePath="" 34 | vcsTerminalPath="" 35 | viewDirAfterRCmdCheck="0" 36 | -------------------------------------------------------------------------------- /rstudio_python/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM davetang/rstudio:4.2.0 2 | 3 | LABEL source="https://github.com/davetang/learning_docker/blob/main/rstudio_python/Dockerfile" 4 | 5 | MAINTAINER Dave Tang 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | python3 \ 12 | python-is-python3 \ 13 | python3-pip \ 14 | && apt-get clean all && \ 15 | apt-get purge && \ 16 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 17 | 18 | RUN pip install numpy pandas scikit-learn matplotlib 19 | 20 | WORKDIR /home/rstudio 21 | 22 | -------------------------------------------------------------------------------- /rstudio_python/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=rstudio_python 6 | ver=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | 10 | >&2 echo Build complete 11 | >&2 echo -e "Run the following to push to Docker Hub:\n" 12 | >&2 echo docker login 13 | >&2 echo docker push davetang/${image}:${ver} 14 | 15 | exit 0 16 | 17 | -------------------------------------------------------------------------------- /samtools/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | # Build 4 | FROM davetang/build:23.04 as build 5 | ARG samtools_ver=1.17 6 | RUN cd /tmp && \ 7 | wget https://github.com/samtools/samtools/releases/download/${samtools_ver}/samtools-${samtools_ver}.tar.bz2 && \ 8 | tar xjf samtools-${samtools_ver}.tar.bz2 && \ 9 | cd samtools-${samtools_ver} && \ 10 | make 11 | 12 | # Install 13 | # unable to use samtools_ver below 14 | FROM ubuntu:23.04 as install 15 | COPY --from=build /lib/x86_64-linux-gnu /lib/x86_64-linux-gnu 16 | WORKDIR /root/src 17 | COPY --from=build /tmp/samtools-1.17 ./ 18 | RUN apt-get clean all && \ 19 | apt-get update && \ 20 | apt-get install -y \ 21 | make \ 22 | && apt-get clean all && \ 23 | apt-get purge && \ 24 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ 25 | make install && \ 26 | cd /root/ && rm -rf src/* 27 | 28 | ENTRYPOINT ["samtools"] 29 | -------------------------------------------------------------------------------- /samtools/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | Use 4 | [biocontainers/samtools](https://quay.io/repository/biocontainers/samtools?tab=tags&tag=latest) 5 | if you want a Docker image for `samtools`. It is only 61.8MB in size. 6 | 7 | ```console 8 | docker pull quay.io/biocontainers/samtools:1.17--h00cdaf9_0 9 | docker images quay.io/biocontainers/samtools:1.17--h00cdaf9_0 10 | # REPOSITORY TAG IMAGE ID CREATED SIZE 11 | # quay.io/biocontainers/samtools 1.17--h00cdaf9_0 57a71725cb8a 3 weeks ago 61.8MB 12 | ``` 13 | 14 | Build a Docker image for [samtools](https://github.com/samtools/samtools) using 15 | a [multistage build](https://docs.docker.com/build/building/multi-stage/) for 16 | testing purposes only. Do not use this image for any other reason because it 17 | includes a lot of unnecessary libraries. 18 | 19 | Ideally the image should only contain these dependencies: 20 | 21 | Samtools: 22 | * zlib 23 | * curses or GNU ncurses (optional, for the 'tview' command) 24 | 25 | HTSlib: 26 | * zlib 27 | * libbz2 28 | * liblzma 29 | * libcurl (optional but strongly recommended, for network access) 30 | * libcrypto (optional, for Amazon S3 support; not needed on MacOS) 31 | 32 | Or is there a way to build `samtools` such that all libraries are statically 33 | linked, where a multistage build makes more sense. 34 | 35 | ## Build 36 | 37 | Use Ubuntu build image to build `samtools`. 38 | 39 | ```console 40 | docker pull davetang/build:23.04 41 | ``` 42 | 43 | Run `build.sh`. 44 | 45 | ```console 46 | ./build.sh 47 | ``` 48 | 49 | ## Testing 50 | 51 | Run `test.sh`. 52 | 53 | ```console 54 | ./test.sh 55 | # 1176360 + 0 in total (QC-passed reads + QC-failed reads) 56 | # 1160084 + 0 primary 57 | # 16276 + 0 secondary 58 | # 0 + 0 supplementary 59 | # 0 + 0 duplicates 60 | # 0 + 0 primary duplicates 61 | # 1126961 + 0 mapped (95.80% : N/A) 62 | # 1110685 + 0 primary mapped (95.74% : N/A) 63 | # 1160084 + 0 paired in sequencing 64 | # 580042 + 0 read1 65 | # 580042 + 0 read2 66 | # 1060858 + 0 properly paired (91.45% : N/A) 67 | # 1065618 + 0 with itself and mate mapped 68 | # 45067 + 0 singletons (3.88% : N/A) 69 | # 0 + 0 with mate mapped to a different chr 70 | # 0 + 0 with mate mapped to a different chr (mapQ>=5) 71 | # Done 72 | ``` 73 | -------------------------------------------------------------------------------- /samtools/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=samtools 6 | script_dir=$(realpath $(dirname $0)) 7 | ver=$(cat ${script_dir}/Dockerfile | grep "ARG samtools_ver=" | cut -f2 -d'=') 8 | 9 | docker build -t davetang/${image}:${ver} . 10 | 11 | >&2 echo Build complete 12 | >&2 echo -e "Run the following to push to Docker Hub:\n" 13 | >&2 echo docker login 14 | >&2 echo docker push davetang/${image}:${ver} 15 | -------------------------------------------------------------------------------- /samtools/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | script_dir=$(realpath $(dirname $0)) 6 | ver=$(cat ${script_dir}/Dockerfile | grep "ARG samtools_ver=" | cut -f2 -d'=') 7 | 8 | if [[ ! -e ${script_dir}/ERR188273_chrX.bam ]]; then 9 | wget https://github.com/davetang/learning_bam_file/raw/main/eg/ERR188273_chrX.bam 10 | fi 11 | docker run --rm -v $(pwd):/data davetang/samtools:${ver} flagstat /data/ERR188273_chrX.bam 12 | 13 | if [[ $? == 0 ]]; then 14 | >&2 echo Done 15 | exit 0 16 | else 17 | >&2 echo docker run error 18 | exit 1 19 | fi 20 | -------------------------------------------------------------------------------- /script/docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | usage(){ 6 | echo "Usage: $0 Dockerfile image_name [ver]" 7 | exit 1 8 | } 9 | [[ $# -lt 2 ]] && usage 10 | 11 | infile=$1 12 | iname=$2 13 | 14 | if [[ $# -eq 3 ]]; then 15 | ver=$3 16 | else 17 | ver=$(cat ${infile} | grep "^FROM" | cut -f2 -d':' || true) 18 | if [[ -z ${ver} ]]; then 19 | >&2 echo Could not get version from ${infile} 20 | >&2 echo Please re-run script with the version number 21 | exit 1 22 | fi 23 | fi 24 | 25 | docker build -t ${iname}:${ver} -f ${infile} . 26 | 27 | >&2 cat < 4 | 5 | ARG SEURATVER=5.1.0 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | cmake \ 12 | libssl-dev \ 13 | libclang-dev \ 14 | libxml2-dev \ 15 | libcurl4-openssl-dev \ 16 | libssl-dev \ 17 | libfftw3-dev \ 18 | libtiff-dev \ 19 | libgsl-dev \ 20 | libfontconfig1-dev \ 21 | libharfbuzz-dev \ 22 | libfribidi-dev \ 23 | libproj-dev \ 24 | libboost-all-dev \ 25 | libmagick++-dev \ 26 | libv8-dev \ 27 | libudunits2-dev \ 28 | libgdal-dev \ 29 | libmpfr-dev \ 30 | glpk-utils \ 31 | libglpk-dev \ 32 | libicu-dev \ 33 | libhdf5-dev \ 34 | python3-pip \ 35 | patch \ 36 | && apt-get clean all && \ 37 | apt-get purge && \ 38 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 39 | 40 | RUN R -q -e 'install.packages(c("BiocManager", "remotes", "optparse"))' 41 | RUN R -q -e 'remotes::install_version("SeuratObject", version = "5.0.2", repos = "https://cran.ism.ac.jp/", quiet = FALSE)' 42 | RUN R -q -e "remotes::install_version('Seurat', version = '${SEURATVER}', repos = 'https://cran.ism.ac.jp/', quiet = FALSE)" 43 | RUN R -q -e 'library(Seurat)' 44 | RUN R -q -e 'install.packages("hdf5r")' 45 | -------------------------------------------------------------------------------- /seurat/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Install Seurat 5.1.0 on R-4.4.0. Available on [Docker Hub](https://hub.docker.com/r/davetang/seurat/tags). 4 | -------------------------------------------------------------------------------- /seurat/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | SEURATVER=$(cat Dockerfile| grep SEURATVER= | cut -f2 -d'=') 6 | 7 | docker build -t davetang/seurat:${SEURATVER} . 8 | 9 | cat <&2 echo ${container_name} listening on port ${port} 24 | exit 0 25 | -------------------------------------------------------------------------------- /shiny/.gitignore: -------------------------------------------------------------------------------- 1 | shinylog 2 | deseq2_app/Rplots.pdf 3 | -------------------------------------------------------------------------------- /shiny/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/shiny:4.3.0 2 | 3 | LABEL source="https://github.com/davetang/learning_docker/blob/main/shiny/Dockerfile" 4 | 5 | MAINTAINER Dave Tang 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | libhdf5-dev \ 12 | libcurl4-gnutls-dev \ 13 | libssl-dev \ 14 | libxml2-dev \ 15 | libpng-dev \ 16 | libxt-dev \ 17 | zlib1g-dev \ 18 | libbz2-dev \ 19 | liblzma-dev \ 20 | libglpk40 \ 21 | locales \ 22 | && apt-get clean all && \ 23 | apt-get purge && \ 24 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 25 | 26 | # from https://leimao.github.io/blog/Docker-Locale/ 27 | RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen 28 | ENV LANGUAGE en_US:en 29 | ENV LC_ALL en_US.UTF-8 30 | 31 | RUN Rscript -e "install.packages(c('tidyverse', 'circlize', 'shinydashboard', 'DT', 'GetoptLong', 'BiocManager'));" 32 | RUN Rscript -e "BiocManager::install(c('airway', 'DESeq2', 'ComplexHeatmap', 'InteractiveComplexHeatmap'), version = '3.17')" 33 | 34 | WORKDIR /home/shiny 35 | -------------------------------------------------------------------------------- /shiny/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Image based on [rocker/shiny](https://hub.docker.com/r/rocker/shiny) and app 4 | based on [A Shiny app for visualising DESeq2 5 | results](https://jokergoo.github.io/InteractiveComplexHeatmap/articles/deseq2_app.html). 6 | 7 | The `run_shiny.sh` script will start a Docker container named 8 | shiny_(your_userid). 9 | 10 | Once the container is running, open your browser and navigate to 11 | `localhost:3838` (default port). 12 | 13 | ## Troubleshooting 14 | 15 | Check log in `shinylog`. 16 | 17 | ```bash 18 | cat shinylog/shiny-server-shiny-20220310-001302-45127.log 19 | ``` 20 | 21 | "Log" into container and check other logs. 22 | 23 | ```bash 24 | docker exec -it shiny_dtang /bin/bash 25 | ``` 26 | -------------------------------------------------------------------------------- /shiny/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=shiny 6 | ver=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | >&2 echo Done 10 | exit 0 11 | 12 | # docker login 13 | # docker push davetang/${image}:${ver} 14 | 15 | -------------------------------------------------------------------------------- /shiny/deseq2_app/app.R: -------------------------------------------------------------------------------- 1 | library(shiny) 2 | library(shinydashboard) 3 | library(airway) 4 | library(DT) 5 | library(GetoptLong) # for qq() function 6 | library(DESeq2) 7 | library(InteractiveComplexHeatmap) 8 | library(ComplexHeatmap) 9 | library(circlize) 10 | library(GetoptLong) 11 | 12 | data(airway) 13 | se = airway 14 | dds = DESeqDataSet(se, design = ~ dex) 15 | keep = rowSums(counts(dds)) >= 10 16 | dds = dds[keep, ] 17 | dds$dex = relevel(dds$dex, ref = "untrt") 18 | dds = DESeq(dds) 19 | res = results(dds) 20 | res = as.data.frame(res) 21 | 22 | env = new.env() 23 | 24 | make_heatmap = function(fdr = 0.01, base_mean = 0, log2fc = 0) { 25 | l = res$padj <= fdr & res$baseMean >= base_mean & 26 | abs(res$log2FoldChange) >= log2fc; l[is.na(l)] = FALSE 27 | 28 | if(sum(l) == 0) return(NULL) 29 | 30 | m = counts(dds, normalized = TRUE) 31 | m = m[l, ] 32 | 33 | env$row_index = which(l) 34 | 35 | ht = Heatmap(t(scale(t(m))), name = "z-score", 36 | top_annotation = HeatmapAnnotation( 37 | dex = colData(dds)$dex, 38 | sizeFactor = anno_points(colData(dds)$sizeFactor) 39 | ), 40 | show_row_names = FALSE, show_column_names = FALSE, row_km = 2, 41 | column_title = paste0(sum(l), " significant genes with FDR < ", fdr), 42 | show_row_dend = FALSE) + 43 | Heatmap(log10(res$baseMean[l]+1), show_row_names = FALSE, width = unit(5, "mm"), 44 | name = "log10(baseMean+1)", show_column_names = FALSE) + 45 | Heatmap(res$log2FoldChange[l], show_row_names = FALSE, width = unit(5, "mm"), 46 | name = "log2FoldChange", show_column_names = FALSE, 47 | col = colorRamp2(c(-2, 0, 2), c("green", "white", "red"))) 48 | ht = draw(ht, merge_legend = TRUE) 49 | ht 50 | } 51 | 52 | # make the MA-plot with some genes highlighted 53 | make_maplot = function(res, highlight = NULL) { 54 | col = rep("#00000020", nrow(res)) 55 | cex = rep(0.5, nrow(res)) 56 | names(col) = rownames(res) 57 | names(cex) = rownames(res) 58 | if(!is.null(highlight)) { 59 | col[highlight] = "red" 60 | cex[highlight] = 1 61 | } 62 | x = res$baseMean 63 | y = res$log2FoldChange 64 | y[y > 2] = 2 65 | y[y < -2] = -2 66 | col[col == "red" & y < 0] = "darkgreen" 67 | par(mar = c(4, 4, 1, 1)) 68 | 69 | suppressWarnings( 70 | plot(x, y, col = col, 71 | pch = ifelse(res$log2FoldChange > 2 | res$log2FoldChange < -2, 1, 16), 72 | cex = cex, log = "x", 73 | xlab = "baseMean", ylab = "log2 fold change") 74 | ) 75 | } 76 | 77 | # make the volcano plot with some genes highlited 78 | make_volcano = function(res, highlight = NULL) { 79 | col = rep("#00000020", nrow(res)) 80 | cex = rep(0.5, nrow(res)) 81 | names(col) = rownames(res) 82 | names(cex) = rownames(res) 83 | if(!is.null(highlight)) { 84 | col[highlight] = "red" 85 | cex[highlight] = 1 86 | } 87 | x = res$log2FoldChange 88 | y = -log10(res$padj) 89 | col[col == "red" & x < 0] = "darkgreen" 90 | par(mar = c(4, 4, 1, 1)) 91 | 92 | suppressWarnings( 93 | plot(x, y, col = col, 94 | pch = 16, 95 | cex = cex, 96 | xlab = "log2 fold change", ylab = "-log10(FDR)") 97 | ) 98 | } 99 | 100 | body = dashboardBody( 101 | fluidRow( 102 | column(width = 4, 103 | box(title = "Differential heatmap", width = NULL, solidHeader = TRUE, status = "primary", 104 | originalHeatmapOutput("ht", height = 800, containment = TRUE) 105 | ) 106 | ), 107 | column(width = 4, 108 | id = "column2", 109 | box(title = "Sub-heatmap", width = NULL, solidHeader = TRUE, status = "primary", 110 | subHeatmapOutput("ht", title = NULL, containment = TRUE) 111 | ), 112 | box(title = "Output", width = NULL, solidHeader = TRUE, status = "primary", 113 | HeatmapInfoOutput("ht", title = NULL) 114 | ), 115 | box(title = "Note", width = NULL, solidHeader = TRUE, status = "primary", 116 | htmlOutput("note") 117 | ), 118 | ), 119 | column(width = 4, 120 | box(title = "MA-plot", width = NULL, solidHeader = TRUE, status = "primary", 121 | plotOutput("ma_plot") 122 | ), 123 | box(title = "Volcanno plot", width = NULL, solidHeader = TRUE, status = "primary", 124 | plotOutput("volcanno_plot") 125 | ), 126 | box(title = "Result table of the selected genes", width = NULL, solidHeader = TRUE, status = "primary", 127 | DTOutput("res_table") 128 | ) 129 | ), 130 | tags$style(" 131 | .content-wrapper, .right-side { 132 | overflow-x: auto; 133 | } 134 | .content { 135 | min-width:1500px; 136 | } 137 | ") 138 | ) 139 | ) 140 | 141 | brush_action = function(df, input, output, session) { 142 | 143 | row_index = unique(unlist(df$row_index)) 144 | selected = env$row_index[row_index] 145 | 146 | output[["ma_plot"]] = renderPlot({ 147 | make_maplot(res, selected) 148 | }) 149 | 150 | output[["volcanno_plot"]] = renderPlot({ 151 | make_volcano(res, selected) 152 | }) 153 | 154 | output[["res_table"]] = renderDT( 155 | formatRound(datatable(res[selected, c("baseMean", "log2FoldChange", "padj")], rownames = TRUE), columns = 1:3, digits = 3) 156 | ) 157 | 158 | output[["note"]] = renderUI({ 159 | if(!is.null(df)) { 160 | HTML(qq("

Row indices captured in Output only correspond to the matrix of the differential genes. To get the row indices in the original matrix, you need to perform:

161 |
162 | l = res$padj <= @{input$fdr} &
163 |     res$baseMean >= @{input$base_mean} &
164 |     abs(res$log2FoldChange) >= @{input$log2fc}
165 | l[is.na(l)] = FALSE
166 | which(l)[row_index]
167 | 
168 |

where res is the complete data frame from DESeq2 analysis and row_index is the row_index column captured from the code in Output.

")) 169 | } 170 | }) 171 | } 172 | 173 | ui = dashboardPage( 174 | dashboardHeader(title = "DESeq2 results"), 175 | dashboardSidebar( 176 | selectInput("fdr", label = "Cutoff for FDRs:", c("0.001" = 0.001, "0.01" = 0.01, "0.05" = 0.05)), 177 | numericInput("base_mean", label = "Minimal base mean:", value = 0), 178 | numericInput("log2fc", label = "Minimal abs(log2 fold change):", value = 0), 179 | actionButton("filter", label = "Generate heatmap") 180 | ), 181 | body 182 | ) 183 | 184 | server = function(input, output, session) { 185 | observeEvent(input$filter, { 186 | ht = make_heatmap(fdr = as.numeric(input$fdr), base_mean = input$base_mean, log2fc = input$log2fc) 187 | if(!is.null(ht)) { 188 | makeInteractiveComplexHeatmap(input, output, session, ht, "ht", 189 | brush_action = brush_action) 190 | } else { 191 | # The ID for the heatmap plot is encoded as @{heatmap_id}_heatmap, thus, it is ht_heatmap here. 192 | output$ht_heatmap = renderPlot({ 193 | grid.newpage() 194 | grid.text("No row exists after filtering.") 195 | }) 196 | } 197 | }, ignoreNULL = FALSE) 198 | } 199 | 200 | shinyApp(ui, server) 201 | 202 | -------------------------------------------------------------------------------- /shiny/run_shiny.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | version=4.3.0 6 | me=$(whoami) 7 | tool=shiny 8 | image=davetang/${tool}:${version} 9 | container_name=${tool}_${me} 10 | port=3838 11 | shinyapp=$(pwd)/test_app 12 | # shinyapp=$(pwd)/deseq2_app 13 | shinylog=$(pwd)/shinylog 14 | 15 | if [[ ! -d ${shinylog} ]]; then 16 | mkdir ${shinylog} 17 | fi 18 | 19 | docker run -d \ 20 | -p ${port}:3838 \ 21 | --restart always \ 22 | --name ${container_name} \ 23 | -v ${shinyapp}:/srv/shiny-server/ \ 24 | -v ${shinylog}:/var/log/shiny-server/ \ 25 | ${image} 26 | 27 | >&2 echo ${container_name} listening on port ${port} 28 | >&2 echo To stop and remove the container run: 29 | >&2 echo "docker stop ${container_name} && docker rm ${container_name}" 30 | 31 | exit 0 32 | -------------------------------------------------------------------------------- /shiny/test_app/app.R: -------------------------------------------------------------------------------- 1 | # 2 | # This is a Shiny web application. You can run the application by clicking 3 | # the 'Run App' button above. 4 | # 5 | # Find out more about building applications with Shiny here: 6 | # 7 | # http://shiny.rstudio.com/ 8 | # 9 | 10 | library(shiny) 11 | 12 | # Define UI for application that draws a histogram 13 | ui <- fluidPage( 14 | 15 | # Application title 16 | titlePanel("Old Faithful Geyser Data"), 17 | 18 | # Sidebar with a slider input for number of bins 19 | sidebarLayout( 20 | sidebarPanel( 21 | sliderInput("bins", 22 | "Number of bins:", 23 | min = 1, 24 | max = 50, 25 | value = 30) 26 | ), 27 | 28 | # Show a plot of the generated distribution 29 | mainPanel( 30 | plotOutput("distPlot") 31 | ) 32 | ) 33 | ) 34 | 35 | # Define server logic required to draw a histogram 36 | server <- function(input, output) { 37 | 38 | output$distPlot <- renderPlot({ 39 | # generate bins based on input$bins from ui.R 40 | x <- faithful[, 2] 41 | bins <- seq(min(x), max(x), length.out = input$bins + 1) 42 | 43 | # draw the histogram with the specified number of bins 44 | hist(x, breaks = bins, col = 'darkgray', border = 'white') 45 | }) 46 | } 47 | 48 | # Run the application 49 | shinyApp(ui = ui, server = server) 50 | -------------------------------------------------------------------------------- /tensorflow/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/verse:4.4.0 2 | 3 | MAINTAINER Dave Tang 4 | 5 | LABEL source="https://github.com/davetang/learning_docker/tensorflow" 6 | 7 | RUN apt-get clean all && \ 8 | apt-get update && \ 9 | apt-get upgrade -y && \ 10 | apt-get install -y \ 11 | autoconf \ 12 | build-essential \ 13 | default-jre \ 14 | gettext \ 15 | git-core \ 16 | libhdf5-dev \ 17 | libcurl4-gnutls-dev \ 18 | libssl-dev \ 19 | libxml2-dev \ 20 | libpng-dev \ 21 | libbz2-dev \ 22 | liblzma-dev \ 23 | libncurses-dev \ 24 | ncurses-term \ 25 | time \ 26 | unzip \ 27 | vim \ 28 | wget \ 29 | curl \ 30 | zlib1g-dev \ 31 | python3-pip \ 32 | python-is-python3 \ 33 | pandoc \ 34 | graphviz \ 35 | && apt-get clean all && \ 36 | apt-get purge && \ 37 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 38 | 39 | RUN cmake_ver=3.23.1 && \ 40 | cd /tmp/ && \ 41 | wget https://github.com/Kitware/CMake/releases/download/v${cmake_ver}/cmake-${cmake_ver}.tar.gz && \ 42 | tar -zxf cmake-${cmake_ver}.tar.gz && \ 43 | cd cmake-${cmake_ver} && \ 44 | ./bootstrap && \ 45 | make && \ 46 | make install && \ 47 | rm -rf /tmp/* 48 | 49 | RUN Rscript -e "install.packages(c('rmarkdown', 'tidymodels'));" 50 | RUN pip install --upgrade pip && pip install tensorflow 51 | RUN pip install pydot 52 | 53 | WORKDIR /work 54 | COPY --chown=rstudio:rstudio rstudio-prefs.json /home/rstudio/.config/rstudio 55 | -------------------------------------------------------------------------------- /tensorflow/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow 2 | 3 | [Install](https://www.tensorflow.org/install) using `pip`. 4 | 5 | ```console 6 | pip install --upgrade pip 7 | pip install tensorflow 8 | ``` 9 | 10 | TensorFlow is tested and supported on the following 64-bit systems: 11 | 12 | * Python 3.8–3.11 13 | * Ubuntu 16.04 or later 14 | -------------------------------------------------------------------------------- /tensorflow/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | VER=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 6 | IMG=r_tensorflow 7 | 8 | docker build -t davetang/${IMG}:${VER} . 9 | 10 | >&2 echo Build complete 11 | >&2 echo -e "Run the following to push to Docker Hub:\n" 12 | >&2 echo docker login 13 | >&2 echo docker push davetang/${IMG}:${VER} 14 | -------------------------------------------------------------------------------- /tensorflow/rstudio-prefs.json: -------------------------------------------------------------------------------- 1 | { 2 | "save_workspace": "never", 3 | "always_save_history": false, 4 | "reuse_sessions_for_project_links": true, 5 | "posix_terminal_shell": "bash", 6 | "initial_working_directory": "~", 7 | "editor_keybindings": "vim", 8 | "panes": { 9 | "quadrants": [ 10 | "Source", 11 | "TabSet1", 12 | "Console", 13 | "TabSet2" 14 | ], 15 | "tabSet1": [ 16 | "Environment", 17 | "History", 18 | "Connections", 19 | "Build", 20 | "VCS", 21 | "Tutorial", 22 | "Presentation" 23 | ], 24 | "tabSet2": [ 25 | "Files", 26 | "Plots", 27 | "Packages", 28 | "Help", 29 | "Viewer", 30 | "Presentations" 31 | ], 32 | "hiddenTabSet": [], 33 | "console_left_on_top": false, 34 | "console_right_on_top": true, 35 | "additional_source_columns": 0 36 | }, 37 | "editor_theme": "Clouds Midnight" 38 | } 39 | -------------------------------------------------------------------------------- /testing/entrypoint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.18.3 2 | COPY ./entrypoint.sh / 3 | ENTRYPOINT ["/entrypoint.sh"] 4 | -------------------------------------------------------------------------------- /testing/entrypoint/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | The ENTRYPOINT instruction can be used in combination with a helper script. 4 | When a user is specified with `docker run`, the entry point command will be run 5 | as the user. 6 | -------------------------------------------------------------------------------- /testing/entrypoint/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | id 4 | 5 | if [[ -z ${MY_EXPORT} ]]; then 6 | echo \$MY_EXPORT is not defined 7 | else 8 | echo MY_EXPORT is ${MY_EXPORT} 9 | fi 10 | -------------------------------------------------------------------------------- /testing/entrypoint/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=entrypoint 6 | ver=3.18.3 7 | 8 | docker build -q -t davetang/${image}:${ver} . 9 | 10 | docker run --rm -u 1011:1023 -e MY_EXPORT=1000 davetang/${image}:${ver} 11 | 12 | docker rmi davetang/${image}:${ver} > /dev/null 2> /dev/null 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /testing/user/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.10 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | RUN apt-get clean all && \ 5 | apt-get update && \ 6 | apt-get upgrade -y && \ 7 | apt-get install -y \ 8 | wget \ 9 | && apt-get clean all && \ 10 | apt-get purge && \ 11 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 12 | 13 | ARG user=voltaire 14 | RUN useradd --create-home --shell /bin/bash ${user} && \ 15 | echo "${user}:freedom" | chpasswd 16 | 17 | RUN wget --quiet -O /usr/local/bin/gosu https://github.com/tianon/gosu/releases/download/1.16/gosu-amd64 && \ 18 | chmod 755 /usr/local/bin/gosu 19 | 20 | USER ${user} 21 | 22 | # COPY does not run as ${user}; need to use --chown 23 | COPY ./test.txt / 24 | RUN id && ls -al /test.txt 25 | 26 | USER root 27 | ENTRYPOINT ["gosu"] 28 | -------------------------------------------------------------------------------- /testing/user/Dockerfile_user: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.10 2 | 3 | ARG user=voltaire 4 | RUN useradd \ 5 | --create-home \ 6 | --home-dir /home/${user} \ 7 | --base-dir /home/${user} \ 8 | --shell /bin/bash ${user} && \ 9 | echo "${user}:freedom" | chpasswd && \ 10 | usermod -d /home/${user} ${user} 11 | 12 | COPY --chown=${user}:${user} ./hello /home/${user}/bin/ 13 | 14 | # this will not set the path for a user specified by -u 15 | # ENV PATH=$PATH:/home/${user}/bin 16 | 17 | # hack but need to source /etc/environment 18 | RUN echo "PATH=\$PATH:/home/${user}/bin" >> /etc/environment 19 | 20 | USER ${user} 21 | -------------------------------------------------------------------------------- /testing/user/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | The [USER](https://docs.docker.com/engine/reference/builder/#user) instruction 4 | sets the user name or (UID) and optionally the user group (or GID) to use as 5 | the default user and group for the remainder of the current stage. The 6 | specified user is used for `RUN` instructions and at runtime, runs the relevant 7 | `ENTRYPOINT` and `CMD` commands. 8 | -------------------------------------------------------------------------------- /testing/user/hello: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | echo Hi there 3 | -------------------------------------------------------------------------------- /testing/user/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=usertest 6 | ver=22.10 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | 10 | # docker run --rm davetang/${image}:${ver} 11 | # docker rmi davetang/${image}:${ver} > /dev/null 2> /dev/null 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /testing/user/test.txt: -------------------------------------------------------------------------------- 1 | 123 2 | -------------------------------------------------------------------------------- /testing/user/user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=user 6 | ver=22.10 7 | 8 | docker build -f Dockerfile_user -t davetang/${image}:${ver} . 9 | docker run --rm -u 1004:1006 davetang/${image}:${ver} env | grep PATH 10 | docker rmi davetang/${image}:${ver} > /dev/null 2> /dev/null 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:23.04 2 | 3 | MAINTAINER Dave Tang 4 | LABEL source="https://github.com/davetang/learning_docker/blob/main/ubuntu/Dockerfile" 5 | 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | 8 | RUN apt-get clean all && \ 9 | apt-get update && \ 10 | apt-get upgrade -y && \ 11 | apt-get install -y \ 12 | autoconf \ 13 | build-essential \ 14 | default-jre \ 15 | gettext \ 16 | git-core \ 17 | libhdf5-dev \ 18 | libcurl4-gnutls-dev \ 19 | libssl-dev \ 20 | libxml2-dev \ 21 | libpng-dev \ 22 | libbz2-dev \ 23 | liblzma-dev \ 24 | libncurses-dev \ 25 | ncurses-term \ 26 | time \ 27 | unzip \ 28 | vim \ 29 | pigz \ 30 | wget \ 31 | curl \ 32 | nodejs \ 33 | zlib1g-dev \ 34 | libboost-all-dev \ 35 | pkg-config \ 36 | libgit2-dev \ 37 | && apt-get clean all && \ 38 | apt-get purge && \ 39 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 40 | 41 | # CMake 42 | RUN cmake_ver=3.29.2 && cd /tmp/ && \ 43 | wget https://github.com/Kitware/CMake/releases/download/v${cmake_ver}/cmake-${cmake_ver}.tar.gz && \ 44 | tar -zxf cmake-${cmake_ver}.tar.gz && \ 45 | cd cmake-${cmake_ver} && \ 46 | ./bootstrap && \ 47 | make && \ 48 | make install && \ 49 | rm -rf /tmp/* 50 | 51 | # Miniconda3 52 | RUN cd /tmp/ && \ 53 | wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \ 54 | bash Miniconda3-latest-Linux-x86_64.sh -b -p ${HOME}/miniconda3 && \ 55 | ${HOME}/miniconda3/condabin/conda install -y -c conda-forge mamba 56 | 57 | # not sure why I cannot use ${HOME} with ENV 58 | ENV PATH=/root/miniconda3/bin:${PATH} 59 | RUN rm /tmp/Miniconda3-latest-Linux-x86_64.sh 60 | 61 | WORKDIR /work 62 | -------------------------------------------------------------------------------- /ubuntu/README.md: -------------------------------------------------------------------------------- 1 | ## Ubuntu 2 | 3 | ![Build Dockerfile](https://github.com/davetang/learning_docker/actions/workflows/build_ubuntu.yml/badge.svg) 4 | 5 | An image with many requisite tools/libraries included for building tools. 6 | -------------------------------------------------------------------------------- /ubuntu/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=build 6 | ver=1.2.5 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | 10 | >&2 echo Build complete 11 | >&2 echo -e "Run the following to push to Docker Hub:\n" 12 | >&2 echo docker login 13 | >&2 echo docker push davetang/${image}:${ver} 14 | 15 | exit 0 16 | 17 | -------------------------------------------------------------------------------- /vim/.vimrc: -------------------------------------------------------------------------------- 1 | call plug#begin(has('nvim') ? stdpath('data') . '/plugged' : '~/.vim/plugged') 2 | 3 | Plug 'neoclide/coc.nvim', {'branch': 'release'} 4 | Plug 'bronson/vim-trailing-whitespace' 5 | Plug 'altercation/vim-colors-solarized' 6 | 7 | call plug#end() 8 | 9 | inoremap coc#pum#visible() ? coc#pum#confirm() : "\" 10 | inoremap coc#pum#visible() ? coc#pum#next(1) : "\" 11 | inoremap coc#pum#visible() ? coc#pum#prev(1) : "\" 12 | 13 | set background=dark 14 | try 15 | colorscheme solarized 16 | catch 17 | endtry 18 | -------------------------------------------------------------------------------- /vim/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM thinca/vim:v9.0.1946-full-ubuntu 2 | 3 | MAINTAINER Dave Tang 4 | LABEL source="https://github.com/davetang/learning_docker/blob/main/vim/Dockerfile" 5 | 6 | ENV DEBIAN_FRONTEND=noninteractive 7 | 8 | RUN apt-get clean all && \ 9 | apt-get update && \ 10 | apt-get upgrade -y && \ 11 | apt-get install -y \ 12 | curl \ 13 | git-core \ 14 | && apt-get clean all && \ 15 | apt-get purge && \ 16 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 17 | 18 | COPY .vimrc /root 19 | 20 | # install Rust 21 | RUN curl --proto '=https' --tlsv1.2 https://sh.rustup.rs -sSf | sh -s -- -y 22 | 23 | # https://github.com/junegunn/vim-plug 24 | RUN curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim 25 | # https://github.com/junegunn/vim-plug/issues/675 26 | RUN vim +'PlugInstall --sync' +qall 27 | 28 | # install Node.js 29 | RUN curl -sL install-node.vercel.app/lts | bash -s -- -y 30 | 31 | # https://github.com/fannheyward/coc-rust-analyzer 32 | RUN vim +'CocInstall -sync coc-rust-analyzer' +qall 33 | 34 | ARG rust_ver=2023-09-25 35 | # no interpolation, type version manually 36 | RUN printf '{\n "coc-rust-analyzer|global": {\n "release": "2023-09-25"\n }\n}' > /root/.config/coc/memos.json 37 | 38 | # install rust-analyzer 39 | RUN cd /tmp && \ 40 | curl -L -o rust-analyzer.gz https://github.com/rust-lang/rust-analyzer/releases/download/${rust_ver}/rust-analyzer-x86_64-unknown-linux-gnu.gz && \ 41 | gunzip rust-analyzer.gz && \ 42 | chmod 755 rust-analyzer && \ 43 | mkdir -p /root/.config/coc/extensions/coc-rust-analyzer-data/ && \ 44 | mv rust-analyzer /root/.config/coc/extensions/coc-rust-analyzer-data/ 45 | 46 | WORKDIR /work 47 | ENTRYPOINT ["/bin/bash"] 48 | -------------------------------------------------------------------------------- /vim/README.md: -------------------------------------------------------------------------------- 1 | ## Vim 2 | 3 | Vim with [rust-analyzer](https://rust-analyzer.github.io/). 4 | 5 | ```console 6 | docker run --rm -it -v $(pwd):/work davetang/vim:v9.0.1946-full-ubuntu 7 | ``` 8 | -------------------------------------------------------------------------------- /vim/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | image=vim 6 | ver=$(cat Dockerfile | grep "^FROM" | cut -f2 -d':') 7 | 8 | docker build -t davetang/${image}:${ver} . 9 | 10 | >&2 echo Build complete 11 | >&2 echo -e "Run the following to push to Docker Hub:\n" 12 | >&2 echo docker login 13 | >&2 echo docker push davetang/${image}:${ver} 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /vscode/.version: -------------------------------------------------------------------------------- 1 | version=4.16.1 2 | -------------------------------------------------------------------------------- /vscode/README.md: -------------------------------------------------------------------------------- 1 | ## README 2 | 3 | Run [VS Code](https://github.com/Microsoft/vscode) on any machine anywhere and 4 | access it through the browser using 5 | [Docker](https://hub.docker.com/r/codercom/code-server) and the [code-server 6 | image](https://hub.docker.com/r/codercom/code-server). 7 | 8 | ```bash 9 | #!/usr/bin/env bash 10 | 11 | set -euo pipefail 12 | 13 | source .version 14 | image=codercom/code-server:${version} 15 | container_name=vscode_server 16 | port=8883 17 | config_dir=${HOME}/.config 18 | 19 | if [[ ! -d ${config_dir} ]]; then 20 | mkdir ${config_dir} 21 | fi 22 | 23 | docker run \ 24 | --name $container_name \ 25 | --rm \ 26 | -d \ 27 | -p $port:8080 \ 28 | -v "${config_dir}:/home/coder/.config" \ 29 | -v ~/github/:/home/coder/project \ 30 | -u "$(id -u):$(id -g)" \ 31 | -e "DOCKER_USER=$USER" \ 32 | $image 33 | 34 | >&2 echo $container_name listening on port $port 35 | exit 0 36 | ``` 37 | 38 | Visit `localhost:8883` and check the config file at 39 | `${HOME}/.config/code-server/config.yaml` for the password. 40 | 41 | ```bash 42 | cat ${HOME}/.config/code-server/config.yaml | grep ^password | awk '{print $2}' 43 | ``` 44 | -------------------------------------------------------------------------------- /vscode/get_password.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | config=${HOME}/.config/code-server/config.yaml 4 | 5 | if [[ -f ${config} ]]; then 6 | cat ${HOME}/.config/code-server/config.yaml | grep ^password | awk '{print $2}' 7 | exit 0 8 | else 9 | >&2 echo ${config} not found 10 | exit 1 11 | fi 12 | -------------------------------------------------------------------------------- /vscode/run_vscode.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | source .version 6 | image=codercom/code-server:${version} 7 | container_name=vscode_server 8 | port=8883 9 | config_dir=${HOME}/.config 10 | 11 | if [[ ! -d ${config_dir} ]]; then 12 | mkdir ${config_dir} 13 | fi 14 | 15 | docker run \ 16 | --name $container_name \ 17 | --rm \ 18 | -d \ 19 | -p $port:8080 \ 20 | -v "${config_dir}:/home/coder/.config" \ 21 | -v ~/github/:/home/coder/project \ 22 | -u "$(id -u):$(id -g)" \ 23 | -e "DOCKER_USER=$USER" \ 24 | $image 25 | 26 | >&2 echo $container_name listening on port $port 27 | exit 0 28 | --------------------------------------------------------------------------------