├── .github
├── ISSUE_TEMPLATE
│ └── bug_report.md
├── scripts
│ ├── devel_or_patched_rversion.sh
│ └── rocker_prep.sh
└── workflows
│ ├── build_containers.yaml
│ ├── build_extensions.yaml
│ ├── build_singularity.yaml
│ ├── full-rstudio-build.yml
│ ├── image-scan.yml
│ ├── rocker-builds.yaml
│ ├── update_latest.yaml
│ └── weekly-release-bump.yaml
├── Dockerfile
├── LICENSE
├── README.md
├── Singularity
├── best_practices.md
├── bioc_scripts
└── install_bioc_sysdeps.sh
├── docker-compose.yml
├── docs
└── release_process.md
└── extensions
├── anvil
├── Dockerfile
├── bioc-extension.yaml
├── rserver.conf
├── rstudio-prefs.json
└── set_up_package_dir.sh
└── galaxy
├── Dockerfile
├── bioc-extension.yaml
├── proxy.conf
├── service-nginx-start
└── shutdown.R
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 |
32 | **Additional context**
33 | Add any other context about the problem here.
34 |
--------------------------------------------------------------------------------
/.github/scripts/devel_or_patched_rversion.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | BIOCVER=$1
3 | FILETOPATCH=$2
4 |
5 | DEVEL_R_VER=$(curl https://bioconductor.org/config.yaml | grep '_devel:' | awk '{print $2}' | sed 's/"//g')
6 | REL_VER=$(curl https://cran.r-project.org/src/base/VERSION-INFO.dcf | grep "$DEVEL_R_VER" | awk -F':' '{print $1}')
7 | # if the matching version is not under devel, use patched pre-release rather than devel pre-release
8 | if [ "$REL_VER" != "Devel" ]; then
9 | sed -i 's#\(R_VERSION=\)\(["]\?\)devel\2#\1\2patched\2#g' "$FILETOPATCH"
10 | fi
11 |
12 | cat "$FILETOPATCH"
13 |
--------------------------------------------------------------------------------
/.github/scripts/rocker_prep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | RVER=$1
3 | BIOCVER=$2
4 | ROCKERPREF=$3
5 | ARCH=$4
6 |
7 | git clone --depth 1 https://github.com/rocker-org/rocker-versioned2
8 | sed -i "s#rocker/r-ver:$RVER#$ROCKERPREF-r-ver:$RVER-$ARCH#g" rocker-versioned2/dockerfiles/rstudio_$RVER.Dockerfile
9 | sed -i "s#rocker/rstudio:$RVER#$ROCKERPREF-rstudio:$RVER-$ARCH#g" rocker-versioned2/dockerfiles/tidyverse_$RVER.Dockerfile
10 | sed -i "s#RUN /rocker_scripts/install_quarto.sh#RUN /rocker_scripts/install_quarto.sh || true#g" rocker-versioned2/dockerfiles/rstudio_$RVER.Dockerfile
11 | # Get latest version of rstudio to use
12 | source /etc/os-release
13 | LATEST_RSTUDIO_VERSION=$(curl https://dailies.rstudio.com/rstudio/latest/index.json | grep -A300 '"server"' | grep -A15 '"noble-amd64"' | grep '"version"' | sed -n 's/.*: "\(.*\)".*/\1/p' | head -1)
14 | sed -i "/^ENV RSTUDIO_VERSION=/c\ENV RSTUDIO_VERSION=\"$LATEST_RSTUDIO_VERSION\"" dockerfiles/rstudio_devel.Dockerfile
15 |
16 | echo "Bioconductor Version: $BIOCVER"
17 | if [ "$RVER" == "devel" ]; then
18 | bash .github/scripts/devel_or_patched_rversion.sh "$BIOCVER" "rocker-versioned2/dockerfiles/r-ver_$RVER.Dockerfile"
19 | bash .github/scripts/devel_or_patched_rversion.sh "$BIOCVER" "rocker-versioned2/dockerfiles/rstudio_$RVER.Dockerfile"
20 | bash .github/scripts/devel_or_patched_rversion.sh "$BIOCVER" "rocker-versioned2/dockerfiles/tidyverse_$RVER.Dockerfile"
21 | fi
22 |
--------------------------------------------------------------------------------
/.github/workflows/build_containers.yaml:
--------------------------------------------------------------------------------
1 | name: Build container images for GHCR and Dockerhub
2 | on:
3 | push:
4 | branches:
5 | - devel
6 | - RELEASE_*
7 | paths-ignore:
8 | - 'extensions/**'
9 | workflow_dispatch:
10 | schedule:
11 | - cron: '0 18 * * 5'
12 |
13 | jobs:
14 | build-amd64:
15 | runs-on: ubuntu-latest
16 | strategy:
17 | fail-fast: false
18 | matrix:
19 | base:
20 | - {image: 'rocker/r-ver', amdtag: '4.5.0', outname: 'r-ver'}
21 | - {image: 'rocker/rstudio', amdtag: '4.5.0', outname: 'bioconductor_docker'}
22 | - {image: 'rocker/tidyverse', amdtag: '4.5.0', outname: 'tidyverse'}
23 | # - {image: 'ghcr.io/bioconductor/rocker-cuda', amdtag: 'devel-amd64', outname: 'cuda'}
24 | # - {image: 'ghcr.io/bioconductor/rocker-ml', amdtag: 'devel-amd64', outname: 'ml'}
25 | # - {image: 'ghcr.io/bioconductor/rocker-ml-verse', amdtag: 'devel-amd64', outname: 'ml-verse'}
26 | - {image: 'rocker/shiny', amdtag: '4.5.0', outname: 'shiny'}
27 | steps:
28 | - uses: actions/checkout@v4
29 |
30 | - name: Free root space
31 | uses: almahmoud/free-root-space@main
32 | with:
33 | verbose: true
34 |
35 | - name: Set up QEMU
36 | uses: docker/setup-qemu-action@v3
37 |
38 | - name: Set up Docker Buildx
39 | uses: docker/setup-buildx-action@v3
40 |
41 | - name: Login to GHCR
42 | uses: docker/login-action@v3
43 | with:
44 | registry: ghcr.io
45 | username: ${{ github.actor }}
46 | password: ${{ secrets.GITHUB_TOKEN }}
47 |
48 | - name: Login to Dockerhub
49 | uses: docker/login-action@v3
50 | with:
51 | username: ${{ secrets.DOCKER_USERNAME }}
52 | password: ${{ secrets.DOCKER_PASSWORD }}
53 |
54 | - name: Extract metadata for container image
55 | id: meta
56 | uses: docker/metadata-action@v5
57 | with:
58 | images: ghcr.io/${{ github.repository_owner }}/${{ matrix.base.outname }}
59 | tags: |
60 | type=raw,value={{branch}}
61 |
62 | - name: Extract container name without tag
63 | id: vars
64 | run: |
65 | echo container=$(echo '${{ steps.meta.outputs.tags }}' | awk -F':' '{print $1}') >> $GITHUB_OUTPUT
66 |
67 | - name: Build and push AMD64 by digest
68 | id: build
69 | uses: docker/build-push-action@v6
70 | with:
71 | build-args: |
72 | BASE_IMAGE=${{ matrix.base.image }}
73 | amd64_tag=${{ matrix.base.amdtag }}
74 | file: Dockerfile
75 | platforms: linux/amd64
76 | labels: ${{ steps.meta.outputs.labels }}
77 | outputs: type=image,name=${{ steps.vars.outputs.container }},push-by-digest=true,name-canonical=true,push=true
78 |
79 | - name: Export digest
80 | run: |
81 | mkdir -p /tmp/digests
82 | digest="${{ steps.build.outputs.digest }}"
83 | touch "/tmp/digests/${digest#sha256:}"
84 |
85 | - name: Upload digest
86 | uses: actions/upload-artifact@v4
87 | with:
88 | name: digests-${{ matrix.base.outname }}-amd64
89 | path: /tmp/digests/*
90 | if-no-files-found: error
91 | retention-days: 1
92 |
93 | build-arm64:
94 | runs-on: ubuntu-latest-arm64
95 | strategy:
96 | fail-fast: false
97 | matrix:
98 | base:
99 | - {image: 'rocker/r-ver', armtag: '4.5.0', outname: 'r-ver'}
100 | - {image: 'rocker/rstudio', armtag: '4.5.0', outname: 'bioconductor_docker'}
101 | steps:
102 | - uses: actions/checkout@v3
103 |
104 | - name: Free root space
105 | uses: almahmoud/free-root-space@main
106 | with:
107 | verbose: true
108 |
109 | - name: Set up QEMU
110 | uses: docker/setup-qemu-action@v3
111 |
112 | - name: Set up Docker Buildx
113 | uses: docker/setup-buildx-action@v3
114 |
115 | - name: Login to GHCR
116 | uses: docker/login-action@v3
117 | with:
118 | registry: ghcr.io
119 | username: ${{ github.actor }}
120 | password: ${{ secrets.GITHUB_TOKEN }}
121 |
122 | - name: Login to Dockerhub
123 | uses: docker/login-action@v3
124 | with:
125 | username: ${{ secrets.DOCKER_USERNAME }}
126 | password: ${{ secrets.DOCKER_PASSWORD }}
127 |
128 | - name: Extract metadata
129 | id: meta
130 | uses: docker/metadata-action@v5
131 | with:
132 | images: ghcr.io/${{ github.repository_owner }}/${{ matrix.base.outname }}
133 | tags: |
134 | type=raw,value={{branch}}
135 |
136 | - name: Extract container name without tag
137 | id: vars
138 | run: |
139 | echo container=$(echo '${{ steps.meta.outputs.tags }}' | awk -F':' '{print $1}') >> $GITHUB_OUTPUT
140 |
141 | - name: Build and push ARM64 by digest
142 | id: build
143 | uses: docker/build-push-action@v6
144 | with:
145 | build-args: |
146 | BASE_IMAGE=${{ matrix.base.image }}
147 | arm64_tag=${{ matrix.base.armtag }}
148 | file: Dockerfile
149 | platforms: linux/arm64
150 | labels: ${{ steps.meta.outputs.labels }}
151 | outputs: type=image,name=${{ steps.vars.outputs.container }},push-by-digest=true,name-canonical=true,push=true
152 |
153 | - name: Export digest
154 | run: |
155 | mkdir -p /tmp/digests
156 | digest="${{ steps.build.outputs.digest }}"
157 | touch "/tmp/digests/${digest#sha256:}"
158 |
159 | - name: Upload digest
160 | uses: actions/upload-artifact@v4
161 | with:
162 | name: digests-${{ matrix.base.outname }}-arm64
163 | path: /tmp/digests/*
164 | if-no-files-found: error
165 | retention-days: 1
166 |
167 | merge:
168 | needs: [build-amd64, build-arm64]
169 | runs-on: ubuntu-latest
170 | if: always()
171 | strategy:
172 | fail-fast: false
173 | matrix:
174 | base:
175 | - {image: 'rocker/r-ver', amdtag: '4.5.0', outname: 'r-ver', platforms: 'amd64,arm64'}
176 | - {image: 'rocker/rstudio', amdtag: '4.5.0', outname: 'bioconductor_docker', platforms: 'amd64,arm64'}
177 | - {image: 'rocker/tidyverse', amdtag: '4.5.0', outname: 'tidyverse', platforms: 'amd64'}
178 | #- {image: 'ghcr.io/bioconductor/rocker-cuda', amdtag: 'devel-amd64', outname: 'cuda', platforms: 'amd64'}
179 | #- {image: 'ghcr.io/bioconductor/rocker-ml', amdtag: 'devel-amd64', outname: 'ml', platforms: 'amd64'}
180 | #- {image: 'ghcr.io/bioconductor/rocker-ml-verse', amdtag: 'devel-amd64', outname: 'ml-verse', platforms: 'amd64'}
181 | - {image: 'rocker/shiny', amdtag: '4.5.0', outname: 'shiny', platforms: 'amd64'}
182 | steps:
183 | - name: Set up Docker Buildx
184 | uses: docker/setup-buildx-action@v3
185 |
186 | - name: Login to GHCR
187 | uses: docker/login-action@v3
188 | with:
189 | registry: ghcr.io
190 | username: ${{ github.actor }}
191 | password: ${{ secrets.GITHUB_TOKEN }}
192 |
193 | - name: Login to Dockerhub
194 | uses: docker/login-action@v3
195 | with:
196 | username: ${{ secrets.DOCKER_USERNAME }}
197 | password: ${{ secrets.DOCKER_PASSWORD }}
198 |
199 | - name: Echo platforms to build
200 | id: buildlist
201 | shell: bash
202 | run: |
203 | PLATFORMLIST="${{matrix.base.platforms}}"
204 | IFS=','; for item in $PLATFORMLIST; do echo "$item=build" >> $GITHUB_OUTPUT; done
205 |
206 | - name: Download AMD64 digests
207 | if: steps.buildlist.outputs.amd64 == 'build'
208 | uses: actions/download-artifact@v4
209 | with:
210 | name: digests-${{ matrix.base.outname }}-amd64
211 | path: /tmp/digests/amd64
212 |
213 | - name: Download ARM64 digests
214 | if: steps.buildlist.outputs.arm64 == 'build'
215 | uses: actions/download-artifact@v4
216 | with:
217 | name: digests-${{ matrix.base.outname }}-arm64
218 | path: /tmp/digests/arm64
219 |
220 | - name: Set image tags
221 | id: meta1
222 | uses: docker/metadata-action@v5
223 | with:
224 | images: |
225 | ghcr.io/${{ github.repository_owner }}/${{ matrix.base.outname }}
226 | tags: |
227 | type=raw,value={{branch}}
228 |
229 | - name: Set image tags
230 | id: meta2
231 | uses: docker/metadata-action@v5
232 | with:
233 | images: |
234 | docker.io/${{ github.repository_owner }}/${{ matrix.base.outname }}
235 | ghcr.io/${{ github.repository_owner }}/${{ matrix.base.outname }}
236 | tags: |
237 | type=raw,value={{branch}}
238 |
239 | - name: Create manifest list and push with retries
240 | uses: nick-fields/retry@v3
241 | with:
242 | timeout_minutes: 30
243 | max_attempts: 10
244 | shell: bash
245 | command: |
246 | set -x
247 | # Prepare tags
248 | echo '${{ steps.meta2.outputs.tags }}' > /tmp/tags
249 | rm /tmp/tagargs || true
250 | cat /tmp/tags | xargs -i bash -c 'printf "%s" "-t {} " >> /tmp/tagargs'
251 | TAG_ARGS="$(cat /tmp/tagargs)"
252 | TAGS="$(cat /tmp/tags)"
253 |
254 | R_VER=$(docker pull ${{ matrix.base.image }}:${{ matrix.base.amdtag }} 2>&1 > /dev/null && \
255 | docker inspect ${{ matrix.base.image }}:${{ matrix.base.amdtag }} | \
256 | jq -r '.[].Config.Env[]|select(match("^R_VERSION"))|.[index("=")+1:]')
257 | if [ ! -z "$R_VER" ]; then
258 | for tag in $TAGS; do
259 | TAG_ARGS="$TAG_ARGS -t ${tag}-R-${R_VER} -t ${tag}-r-${R_VER}"
260 | done
261 | fi
262 |
263 | # Add alternative tags without _docker in name
264 | if [[ "${{ matrix.base.outname }}" == *"_docker"* ]]; then
265 | ALT_TAG=$(echo $TAG_ARGS | sed 's/_docker//g')
266 | TAG_ARGS="$TAG_ARGS $ALT_TAG"
267 | fi
268 |
269 | # Add alternative tags without RELEASE_ in the tag
270 | if grep -q "RELEASE_" <<< "$TAG_ARGS"; then
271 | ALT_TAG=$(echo $TAG_ARGS | sed -E 's/RELEASE_([0-9]+)_([0-9]+)/\1.\2/g')
272 | TAG_ARGS="$TAG_ARGS $ALT_TAG"
273 | fi
274 |
275 | CONTAINERNAME=$(echo '${{ steps.meta1.outputs.tags }}' | awk -F':' '{print $1}')
276 |
277 | # Create manifest list
278 | DIGESTS=""
279 | for eachdir in $(ls /tmp/digests); do
280 | DIGESTS_ARCH=$(cd /tmp/digests/$eachdir && find . -type f -exec echo "$CONTAINERNAME@sha256{}" \; | sed 's/\.\//:/')
281 | DIGESTS="$DIGESTS $DIGESTS_ARCH"
282 | done
283 | echo "$TAG_ARGS"
284 | echo "$DIGESTS"
285 | docker buildx imagetools create $TAG_ARGS $DIGESTS
286 |
287 | - name: Inspect images
288 | run: |
289 | cat /tmp/tags | xargs -i bash -c 'docker buildx imagetools inspect {}'
290 |
291 | trigger-extensions:
292 | needs: [merge]
293 | runs-on: ubuntu-latest
294 | if: success()
295 | steps:
296 | - name: Trigger extensions workflow
297 | uses: actions/github-script@v7
298 | with:
299 | github-token: ${{ secrets.GITHUB_TOKEN }}
300 | script: |
301 | const branch = context.ref.replace('refs/heads/', '');
302 | await github.rest.actions.createWorkflowDispatch({
303 | owner: context.repo.owner,
304 | repo: context.repo.repo,
305 | workflow_id: 'build_extensions.yaml',
306 | ref: branch
307 | });
308 | console.log(`Triggered build_extensions.yaml workflow on branch ${branch}`)
309 |
--------------------------------------------------------------------------------
/.github/workflows/build_extensions.yaml:
--------------------------------------------------------------------------------
1 | name: Build Bioconductor Extension Images
2 | on:
3 | push:
4 | branches:
5 | - devel
6 | - RELEASE_*
7 | paths:
8 | - 'extensions/**'
9 | workflow_dispatch:
10 | inputs:
11 | specific_extension:
12 | description: 'Build only a specific extension (folder name in extensions/)'
13 | required: false
14 | type: string
15 | schedule:
16 | - cron: '0 20 * * 5' # Run every Friday at 8PM UTC
17 |
18 | jobs:
19 | build-extensions:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 |
24 | - name: Free root space
25 | uses: almahmoud/free-root-space@main
26 | with:
27 | verbose: true
28 |
29 | - name: Set up QEMU
30 | uses: docker/setup-qemu-action@v3
31 |
32 | - name: Set up Docker Buildx
33 | uses: docker/setup-buildx-action@v3
34 |
35 | - name: Login to GHCR
36 | uses: docker/login-action@v3
37 | with:
38 | registry: ghcr.io
39 | username: ${{ github.actor }}
40 | password: ${{ secrets.GITHUB_TOKEN }}
41 |
42 | - name: Login to Dockerhub
43 | uses: docker/login-action@v3
44 | with:
45 | username: ${{ secrets.DOCKER_USERNAME }}
46 | password: ${{ secrets.DOCKER_PASSWORD }}
47 |
48 | - name: Install dependencies
49 | run: |
50 | # Install yq for YAML parsing
51 | wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
52 | chmod +x /usr/local/bin/yq
53 |
54 | - name: Process and build extensions
55 | run: |
56 | # Get branch name
57 | branch="${GITHUB_REF##*/}"
58 |
59 | # Convert repository owner to lowercase for Docker compatibility
60 | REPO_OWNER="${{ github.repository_owner }}"
61 | REPO_OWNER_LOWER=$(echo "$REPO_OWNER" | tr '[:upper:]' '[:lower:]')
62 |
63 | # Find extensions based on input or search all
64 | if [[ -n "${{ github.event.inputs.specific_extension }}" ]]; then
65 | if [[ ! -d "extensions/${{ github.event.inputs.specific_extension }}" ]]; then
66 | echo "::error::Extension directory not found: extensions/${{ github.event.inputs.specific_extension }}"
67 | exit 1
68 | fi
69 | extension_files=$(find extensions/${{ github.event.inputs.specific_extension }} -name 'bioc-extension.yaml')
70 | echo "Building specific extension: ${{ github.event.inputs.specific_extension }}"
71 | else
72 | extension_files=$(find extensions -name 'bioc-extension.yaml' | sort)
73 | echo "Building all available extensions"
74 | fi
75 |
76 | # Process each extension file directly
77 | for ext_file in $extension_files; do
78 | ext_dir=$(dirname "$ext_file")
79 | ext_name=$(basename "$ext_dir")
80 |
81 | echo "Processing extension: $ext_name from $ext_file"
82 |
83 | # Check if Dockerfile exists
84 | if [[ ! -f "$ext_dir/Dockerfile" ]]; then
85 | echo "::warning::No Dockerfile found for extension $ext_name, skipping"
86 | continue
87 | fi
88 |
89 | # Parse YAML directly using yq
90 | outname=$(yq '.container.outname' "$ext_file" | tr '[:upper:]' '[:lower:]')
91 | base_image=$(yq '.container.base.image' "$ext_file" | tr '[:upper:]' '[:lower:]')
92 |
93 | # Handle tags
94 | tags_count=$(yq '.container.base.tag | length' "$ext_file")
95 |
96 | # Build for each tag (or branch name if no tags specified)
97 | if [[ $tags_count -eq 0 ]]; then
98 | tags=("$branch")
99 | else
100 | # Get all tags as an array
101 | tags=()
102 | for (( i=0; i<$tags_count; i++ )); do
103 | tags+=("$(yq ".container.base.tag[$i]" "$ext_file")")
104 | done
105 | fi
106 |
107 | # Build for each tag
108 | for tag in "${tags[@]}"; do
109 | echo "Building $outname:$tag from $base_image:$tag using Dockerfile in $ext_dir"
110 |
111 | # Build and push with lowercase names
112 | docker buildx build --platform linux/amd64 \
113 | -t "ghcr.io/$REPO_OWNER_LOWER/$outname:$tag" \
114 | -t "docker.io/$REPO_OWNER_LOWER/$outname:$tag" \
115 | --build-arg BASE_IMAGE=$base_image \
116 | --build-arg TAG=$tag \
117 | --push \
118 | "$ext_dir"
119 |
120 | build_result=$?
121 | if [ $build_result -ne 0 ]; then
122 | echo "::error::Failed to build $outname:$tag"
123 | exit $build_result
124 | fi
125 |
126 | echo "Successfully built and pushed $outname:$tag"
127 | done
128 | done
--------------------------------------------------------------------------------
/.github/workflows/build_singularity.yaml:
--------------------------------------------------------------------------------
1 | name: Build Apptainer Images
2 |
3 | on:
4 | workflow_dispatch:
5 | workflow_run:
6 | workflows: ["Build container images for GHCR and Dockerhub"]
7 | types:
8 | - completed
9 |
10 | jobs:
11 | build-apptainer:
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | base:
16 | - {image: 'ghcr.io/bioconductor/r-ver', outname: 'r-ver'}
17 | - {image: 'ghcr.io/bioconductor/bioconductor', outname: 'bioconductor'}
18 | - {image: 'ghcr.io/bioconductor/tidyverse', outname: 'tidyverse'}
19 | - {image: 'ghcr.io/bioconductor/shiny', outname: 'shiny'}
20 | steps:
21 | - name: Checkout code
22 | uses: actions/checkout@v4
23 |
24 | - name: Install Apptainer dependencies
25 | run: |
26 | sudo apt-get update
27 | sudo apt-get install -y \
28 | autoconf \
29 | automake \
30 | cryptsetup \
31 | fuse2fs \
32 | git \
33 | fuse \
34 | libfuse-dev \
35 | libseccomp-dev \
36 | libtool \
37 | pkg-config \
38 | runc \
39 | squashfs-tools \
40 | squashfs-tools-ng \
41 | uidmap \
42 | wget \
43 | zlib1g-dev \
44 | libsubid-dev
45 |
46 | - name: Setup Go
47 | uses: actions/setup-go@v5
48 | with:
49 | go-version: 1.24.1
50 |
51 | - name: Install Apptainer
52 | run: |
53 | export VERSION=1.3.6 # Latest Apptainer version as of 2025-04
54 | wget https://github.com/apptainer/apptainer/releases/download/v${VERSION}/apptainer-${VERSION}.tar.gz
55 | tar -xzf apptainer-${VERSION}.tar.gz
56 | cd apptainer-${VERSION}
57 | ./mconfig
58 | make -C ./builddir
59 | sudo make -C ./builddir install
60 |
61 | - name: Verify Apptainer config
62 | run: apptainer buildcfg
63 |
64 | - name: Get branch name
65 | id: vars
66 | shell: bash
67 | run: |
68 | BRANCH_NAME=${GITHUB_REF#refs/heads/}
69 | PROCESSED_NAME="$BRANCH_NAME"
70 |
71 | if [[ $BRANCH_NAME == RELEASE_* ]]; then
72 | PROCESSED_NAME=${BRANCH_NAME#RELEASE_}
73 | PROCESSED_NAME=${PROCESSED_NAME//_/.}
74 | fi
75 |
76 | echo "origbranch=$BRANCH_NAME" >> $GITHUB_OUTPUT
77 | echo "procbranch=$PROCESSED_NAME" >> $GITHUB_OUTPUT
78 |
79 | - name: Build Apptainer Image
80 | run: |
81 | apptainer build \
82 | "${{matrix.base.outname}}-$(date +%s).sif" \
83 | docker://${{ matrix.base.image }}:${{steps.vars.origbranch}}
84 |
85 | - name: Push to GitHub Packages
86 | run: |
87 | echo ${{ secrets.GITHUB_TOKEN }} | apptainer remote login -u ${{ github.actor }} --password-stdin oras://ghcr.io
88 | apptainer push *.sif oras://ghcr.io/${{github.repository_owner}}/${{matrix.base.outname}}:${{steps.vars.origbranch}}
89 | apptainer push *.sif oras://ghcr.io/${{github.repository_owner}}/${{matrix.base.outname}}:${{steps.vars.procbranch}}
90 |
--------------------------------------------------------------------------------
/.github/workflows/full-rstudio-build.yml:
--------------------------------------------------------------------------------
1 | # This is a basic workflow to help you get started with Actions
2 | name: Full rocker & bioc build
3 |
4 | # Controls when the action will run. Triggers the workflow on push or pull request
5 | # events but only for the master branch
6 | on:
7 | workflow_dispatch:
8 | inputs:
9 | rver:
10 | default: "devel"
11 | biocver:
12 | default: "3.21"
13 | outname:
14 | default: "bioconductor_docker"
15 | # schedule:
16 | # - cron: '0 18 * * 5'
17 |
18 | jobs:
19 | bump:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v3
23 | - name: Push patch bump
24 | uses: nick-fields/retry@v2
25 | with:
26 | timeout_minutes: 10
27 | max_attempts: 50
28 | shell: bash
29 | command: |
30 | set -x
31 | git config --global --add safe.directory "$GITHUB_WORKSPACE"
32 | git config user.name github-actions
33 | git config user.email github-actions@github.com
34 | git pull origin main || git reset --hard origin/main
35 | sed -r -i 's/(^ARG BIOCONDUCTOR_PATCH=)([0-9]+)$/echo "\1$((\2+1))"/ge' Dockerfile
36 | git add Dockerfile
37 | git commit -m "Bump BIOCONDUCTOR_PATCH"
38 | git push
39 |
40 | build:
41 | outputs:
42 | registryuser: ${{ steps.defs.outputs.registryuser }}
43 | outname: ${{ steps.defs.outputs.outname }}
44 | rver: ${{ steps.defs.outputs.rver }}
45 | strategy:
46 | matrix:
47 | arch: [amd64, arm64]
48 | runs-on: ubuntu-latest
49 | needs: bump
50 | steps:
51 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
52 | - uses: actions/checkout@v3
53 |
54 | - name: Free root space
55 | uses: almahmoud/free-root-space@main
56 | with:
57 | verbose: true
58 |
59 | - name: Set defaults for schedule
60 | id: defs
61 | run: |
62 | echo outname=$(echo ${{ github.event.inputs.outname || 'bioconductor_docker' }}) >> $GITHUB_OUTPUT
63 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
64 | echo biocver=$(echo ${{ github.event.inputs.biocver || '3.21' }}) >> $GITHUB_OUTPUT
65 | echo registryuser=$(echo ${{ github.repository_owner }} | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
66 | echo rockerintermediateprefix=$(echo "ghcr.io/${{ github.repository_owner }}/rocker" | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
67 |
68 | - name: Set up QEMU
69 | uses: docker/setup-qemu-action@v2
70 | with:
71 | platforms: arm64
72 | if: matrix.arch == 'arm64'
73 |
74 | - name: Login to GHCR
75 | uses: docker/login-action@v2
76 | with:
77 | registry: ghcr.io
78 | username: ${{ github.actor }}
79 | password: ${{ secrets.GITHUB_TOKEN }}
80 |
81 | - name: Login to Dockerhub
82 | uses: docker/login-action@v2
83 | with:
84 | username: ${{ secrets.DOCKER_USERNAME }}
85 | password: ${{ secrets.DOCKER_PASSWORD }}
86 |
87 | - name: Extract metadata for container image
88 | id: meta
89 | uses: docker/metadata-action@v4
90 | with:
91 | images: ${{ steps.defs.outputs.registryuser }}/${{ steps.defs.outputs.outname }}
92 | tags: |
93 | type=raw,value=${{ steps.defs.outputs.rver }}-${{ matrix.arch }}
94 |
95 | - name: Set comma-separated list with all repository names
96 | id: images
97 | run: |
98 | IMG=${{ steps.meta.outputs.tags }}
99 | REPOLIST="docker.io/$IMG,ghcr.io/$IMG"
100 | echo ghcr=$(echo "ghcr.io/$IMG") >> $GITHUB_OUTPUT
101 | SUB="_docker"
102 | # Also add alternative without _docker when in name
103 | echo list=$(if [[ $REPOLIST == *$SUB* ]]; then echo "$REPOLIST,$(echo $REPOLIST | sed 's/_docker//g')"; else echo $REPOLIST; fi) >> $GITHUB_OUTPUT
104 |
105 | bash .github/scripts/rocker_prep.sh "${{ steps.defs.outputs.rver }}" "${{ steps.defs.outputs.biocver }}" "${{ steps.defs.outputs.rockerintermediateprefix }}" "${{ matrix.arch }}"
106 |
107 | - name: Set up Docker Buildx
108 | uses: docker/setup-buildx-action@v2
109 | with:
110 | platforms: linux/${{ matrix.arch }}
111 |
112 | - name: Build and load r-ver
113 | uses: docker/build-push-action@v3
114 | with:
115 | file: rocker-versioned2/dockerfiles/r-ver_${{ steps.defs.outputs.rver }}.Dockerfile
116 | context: rocker-versioned2
117 | push: true
118 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-r-ver:${{ steps.defs.outputs.rver }}-${{ matrix.arch }}
119 | platforms: linux/${{ matrix.arch }}
120 |
121 | - name: Build and load rstudio
122 | uses: docker/build-push-action@v3
123 | with:
124 | file: rocker-versioned2/dockerfiles/rstudio_${{ steps.defs.outputs.rver }}.Dockerfile
125 | context: rocker-versioned2
126 | push: true
127 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-rstudio:${{ steps.defs.outputs.rver }}-${{ matrix.arch }}
128 | platforms: linux/${{ matrix.arch }}
129 |
130 | - name: Build and push container image to all repos both arm64 and amd64
131 | uses: docker/build-push-action@v3
132 | with:
133 | build-args: |
134 | BASE_IMAGE=${{ steps.defs.outputs.rockerintermediateprefix }}-rstudio
135 | arm64_tag=${{ steps.defs.outputs.rver }}-${{ matrix.arch }}
136 | amd64_tag=${{ steps.defs.outputs.rver }}-${{ matrix.arch }}
137 | BIOCONDUCTOR_VERSION=${{ steps.defs.outputs.biocver }}
138 | file: Dockerfile
139 | context: .
140 | push: true
141 | tags: ${{ steps.images.outputs.list }}
142 | labels: ${{ steps.meta.outputs.labels }}
143 | platforms: linux/${{ matrix.arch }}
144 |
145 | test-built-containers:
146 | strategy:
147 | matrix:
148 | arch: [arm64, amd64]
149 | # The type of runner that the job will run on
150 | runs-on: ubuntu-latest
151 | needs: build
152 | steps:
153 | - name: Get image name
154 | id: image
155 | run: |
156 | IMG=$(echo "${{needs.build.outputs.registryuser}}/${{ needs.build.outputs.outname }}:${{ needs.build.outputs.rver }}-${{ matrix.arch }}")
157 | echo name=$IMG >> $GITHUB_OUTPUT
158 | cat << EOF > test.Dockerfile
159 | FROM $IMG as test
160 | USER root
161 | RUN mkdir -p /tmp/bioc_test && \
162 | apt list --installed | tee /tmp/bioc_test/aptlist && \
163 | Rscript -e "BiocManager::install(c('SummarizedExperiment','usethis','data.table','igraph','GEOquery'))" 2>&1 | tee /tmp/bioc_test/packages_install && \
164 | Rscript -e "BiocManager::install('gert'); require('gert')" 2>&1 | tee /tmp/bioc_test/gert_install
165 | FROM scratch as export
166 | COPY --from=test /tmp/bioc_test /tmp
167 | EOF
168 |
169 | - name: Set up QEMU
170 | uses: docker/setup-qemu-action@v2
171 | with:
172 | platforms: arm64
173 | if: matrix.arch == 'arm64'
174 |
175 | - name: Set up Docker Buildx
176 | uses: docker/setup-buildx-action@v2
177 | with:
178 | platforms: linux/${{ matrix.arch }}
179 |
180 | - name: Login to GHCR
181 | uses: docker/login-action@v2
182 | with:
183 | registry: ghcr.io
184 | username: ${{ github.actor }}
185 | password: ${{ secrets.GITHUB_TOKEN }}
186 |
187 | - name: Test via build
188 | id: dockerbuild
189 | uses: docker/build-push-action@v4
190 | with:
191 | file: test.Dockerfile
192 | context: .
193 | push: false
194 | load: false
195 | outputs: type=tar,dest=/tmp/image.tar
196 | tags: ${{ steps.image.outputs.name }}
197 |
198 | - run: |
199 | mkdir -p /tmp/contents
200 | cd /tmp/contents
201 | tar -xvf /tmp/image.tar
202 | sudo ls tmp/* | xargs -i bash -c "echo 'cat {}' && cat {}"
203 |
204 |
--------------------------------------------------------------------------------
/.github/workflows/image-scan.yml:
--------------------------------------------------------------------------------
1 | name: Docker Vulnerability Scan
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: "0 0 * * *"
7 | jobs:
8 | scan-images:
9 | strategy:
10 | fail-fast: false
11 | matrix:
12 | image: ['r-ver', 'bioconductor_docker', 'tidyverse', 'ml-verse', 'shiny']
13 |
14 | name: Scan container images
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Extract metadata for container image
18 | id: meta
19 | uses: docker/metadata-action@v4
20 | with:
21 | images: ${{ github.repository_owner }}/${{ matrix.image }}
22 | tags: |
23 | type=raw,value={{branch}}
24 |
25 | - name: Run Trivy vulnerability scanner
26 | uses: aquasecurity/trivy-action@master
27 | with:
28 | image-ref: '${{ steps.meta.outputs.tags }}'
29 | format: 'table'
30 | exit-code: '0'
31 | vuln-type: 'os,library'
32 | severity: 'CRITICAL,HIGH'
33 |
--------------------------------------------------------------------------------
/.github/workflows/rocker-builds.yaml:
--------------------------------------------------------------------------------
1 | name: Extra rocker builds
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | rver:
7 | default: "devel"
8 | biocver:
9 | default: "3.21"
10 | check-rocker-image:
11 | default: "rocker/rstudio"
12 | schedule:
13 | - cron: '0 18 */4 * *'
14 |
15 | jobs:
16 | check_versions:
17 | runs-on: ubuntu-latest
18 | steps:
19 | - uses: actions/checkout@v4
20 | - name: Set defaults for schedule
21 | id: defs
22 | run: |
23 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
24 | BIOCVER=$(echo ${{ github.event.inputs.biocver || '3.21' }})
25 | echo "develbiocver=$BIOCVER" >> $GITHUB_OUTPUT
26 | echo check-rocker-image=$(echo ${{ github.event.inputs.check-rocker-image || 'rocker/rstudio' }}) >> $GITHUB_OUTPUT
27 | RELEASE_BIOC_VER=$(echo "${BIOCVER%.*}.$((${BIOCVER##*.}-1))")
28 | echo "releasebiocver=$RELEASE_BIOC_VER" >> $GITHUB_OUTPUT
29 | echo release-tag=$(echo "RELEASE_${RELEASE_BIOC_VER}" | sed 's/\./_/g') >> $GITHUB_OUTPUT
30 |
31 | - name: Bump R version
32 | id: rbump
33 | run: |
34 | curl https://hub.docker.com/v2/repositories/${{steps.defs.outputs.check-rocker-image}}/tags?page_size=1000 | jq '.results[].name' | tr -d '"' > /tmp/taglist
35 | curl https://raw.githubusercontent.com/${{ github.repository }}/${{steps.defs.outputs.release-tag}}/.github/workflows/build_containers.yaml | grep 'amdtag' | awk -F"'" '/amdtag:/ {print $4}' | uniq > /tmp/currtag
36 | RELEASE_R_VER=$(curl https://bioconductor.org/config.yaml | yq e '.r_ver_for_bioc_ver."${{steps.defs.outputs.releasebiocver}}"')
37 | LATEST_TAG=$(cat /tmp/taglist | grep "$RELEASE_R_VER" | sort -n | tail -n 1)
38 | CURR_TAG=$(cat /tmp/currtag | sort -n | tail -n 1 | cut -d'-' -f1)
39 | echo latest-tag=$LATEST_TAG >> $GITHUB_OUTPUT
40 | if [ "$LATEST_TAG" == "$CURR_TAG" ]; then
41 | echo "Detected '$LATEST_TAG' == '$CURR_TAG' as latest available tag"
42 | echo verdict="no" >> $GITHUB_OUTPUT
43 | else
44 | echo "Detected mismatching versions latest '$LATEST_TAG' != '$CURR_TAG' current tag"
45 | mkdir -p ${{github.workspace}}/tmp/${{github.repository}}
46 | git clone https://github.com/${{github.repository}} -b ${{steps.defs.outputs.release-tag}} ${{github.workspace}}/tmp/${{github.repository}}
47 | cd ${{github.workspace}}/tmp/${{github.repository}}
48 | AUTO_BRANCH="auto-bump-${{steps.defs.outputs.releasebiocver}}-for-${LATEST_TAG}"
49 | sed -i "s/$CURR_TAG/$LATEST_TAG/g" .github/workflows/build_containers.yaml
50 | sed -r -i 's/(^ARG BIOCONDUCTOR_PATCH=)([0-9]+)$/echo "\1$((\2+1))"/ge' Dockerfile
51 | echo verdict="yes" >> $GITHUB_OUTPUT
52 | fi
53 |
54 | - name: Open pull request
55 | id: cpr
56 | uses: peter-evans/create-pull-request@v6
57 | if: steps.rbump.outputs.verdict == 'yes'
58 | with:
59 | token: ${{secrets.PAT}}
60 | commit-message: Auto-bump ${{steps.rbump.outputs.latest-tag}}
61 | committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
62 | author: ${{ github.actor }} <${{ github.actor_id }}+${{ github.actor }}@users.noreply.github.com>
63 | base: ${{steps.defs.outputs.release-tag}}
64 | path: ${{github.workspace}}/tmp/${{github.repository}}
65 | branch: auto-bump-bioc-${{steps.defs.outputs.release-tag}}-r-${{steps.rbump.outputs.latest-tag}}
66 | title: '[Auto] Bumping ${{steps.defs.outputs.release-tag}} branch to R ${{steps.rbump.outputs.latest-tag}}'
67 | body: |
68 | Bumping R version.
69 | Note that the Bioconductor automatic PR bot is experimental, please make sure to check the changes manually before merging.
70 | labels: |
71 | auto-bump
72 |
73 | buildrver:
74 | strategy:
75 | matrix:
76 | build:
77 | - { arch: 'amd64', runner: 'ubuntu-latest' }
78 | - { arch: 'arm64', runner: 'ubuntu-latest-arm64' }
79 | runs-on: ${{ matrix.build.runner }}
80 | steps:
81 | - uses: actions/checkout@v3
82 |
83 | - name: Free root space
84 | uses: almahmoud/free-root-space@main
85 | with:
86 | verbose: true
87 |
88 | - name: Set defaults for schedule
89 | id: defs
90 | run: |
91 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
92 | echo biocver=$(echo ${{ github.event.inputs.biocver || '3.21' }}) >> $GITHUB_OUTPUT
93 | echo registryuser=$(echo ${{ github.repository_owner }} | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
94 | echo rockerintermediateprefix=$(echo "ghcr.io/${{ github.repository_owner }}/rocker" | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
95 |
96 | - name: Login to GHCR
97 | uses: docker/login-action@v2
98 | with:
99 | registry: ghcr.io
100 | username: ${{ github.actor }}
101 | password: ${{ secrets.GITHUB_TOKEN }}
102 |
103 | - name: Prep rocker rocker files
104 | run: bash .github/scripts/rocker_prep.sh ${{ steps.defs.outputs.rver }} ${{ steps.defs.outputs.biocver }} ${{ steps.defs.outputs.rockerintermediateprefix }} ${{ matrix.build.arch }}
105 |
106 | - name: Set up Docker Buildx
107 | uses: docker/setup-buildx-action@v2
108 | with:
109 | platforms: linux/${{ matrix.build.arch }}
110 |
111 | - name: Build and push r-ver
112 | uses: docker/build-push-action@v3
113 | with:
114 | file: rocker-versioned2/dockerfiles/r-ver_${{ steps.defs.outputs.rver }}.Dockerfile
115 | context: rocker-versioned2
116 | push: true
117 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-r-ver:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
118 | platforms: linux/${{ matrix.build.arch }}
119 |
120 | buildrstudio:
121 | strategy:
122 | matrix:
123 | build:
124 | - { arch: 'amd64', runner: 'ubuntu-latest' }
125 | - { arch: 'arm64', runner: 'ubuntu-latest-arm64' }
126 | runs-on: ${{ matrix.build.runner }}
127 | needs: buildrver
128 | steps:
129 | - uses: actions/checkout@v3
130 |
131 | - name: Free root space
132 | uses: almahmoud/free-root-space@main
133 | with:
134 | verbose: true
135 |
136 | - name: Set defaults for schedule
137 | id: defs
138 | run: |
139 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
140 | echo biocver=$(echo ${{ github.event.inputs.biocver || '3.21' }}) >> $GITHUB_OUTPUT
141 | echo registryuser=$(echo ${{ github.repository_owner }} | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
142 | echo rockerintermediateprefix=$(echo "ghcr.io/${{ github.repository_owner }}/rocker" | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
143 |
144 | - name: Login to GHCR
145 | uses: docker/login-action@v2
146 | with:
147 | registry: ghcr.io
148 | username: ${{ github.actor }}
149 | password: ${{ secrets.GITHUB_TOKEN }}
150 |
151 | - name: Prep rocker rocker files
152 | run: |
153 | bash .github/scripts/rocker_prep.sh ${{ steps.defs.outputs.rver }} ${{ steps.defs.outputs.biocver }} ${{ steps.defs.outputs.rockerintermediateprefix }} ${{ matrix.build.arch }}
154 |
155 |
156 | - name: Move some tidyverse builds to docker for arm64 to avoid timeout
157 | run: |
158 | head -n44 rocker-versioned2/scripts/install_tidyverse.sh >> rocker-versioned2/scripts/install_rstudio.sh
159 | sed -i "\|RUN /rocker_scripts/install_rstudio.sh|i COPY scripts /rocker_scripts" rocker-versioned2/dockerfiles/rstudio_${{ steps.defs.outputs.rver }}.Dockerfile
160 | if: matrix.build.arch == 'arm64'
161 |
162 | - name: Set up Docker Buildx
163 | uses: docker/setup-buildx-action@v2
164 | with:
165 | platforms: linux/${{ matrix.build.arch }}
166 |
167 | - name: Build and push rstudio
168 | uses: docker/build-push-action@v3
169 | with:
170 | file: rocker-versioned2/dockerfiles/rstudio_${{ steps.defs.outputs.rver }}.Dockerfile
171 | context: rocker-versioned2
172 | push: true
173 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-rstudio:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
174 | platforms: linux/${{ matrix.build.arch }}
175 |
176 | buildtidyverse:
177 | strategy:
178 | matrix:
179 | build:
180 | - { arch: 'amd64', runner: 'ubuntu-latest' }
181 | runs-on: ${{ matrix.build.runner }}
182 | needs: buildrstudio
183 | steps:
184 | - uses: actions/checkout@v3
185 |
186 | - name: Free root space
187 | uses: almahmoud/free-root-space@main
188 | with:
189 | verbose: true
190 |
191 | - name: Set defaults for schedule
192 | id: defs
193 | run: |
194 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
195 | echo biocver=$(echo ${{ github.event.inputs.biocver || '3.21' }}) >> $GITHUB_OUTPUT
196 | echo registryuser=$(echo ${{ github.repository_owner }} | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
197 | echo rockerintermediateprefix=$(echo "ghcr.io/${{ github.repository_owner }}/rocker" | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
198 |
199 | - name: Login to GHCR
200 | uses: docker/login-action@v2
201 | with:
202 | registry: ghcr.io
203 | username: ${{ github.actor }}
204 | password: ${{ secrets.GITHUB_TOKEN }}
205 |
206 | - name: Prep rocker rocker files
207 | run: bash .github/scripts/rocker_prep.sh ${{ steps.defs.outputs.rver }} ${{ steps.defs.outputs.biocver }} ${{ steps.defs.outputs.rockerintermediateprefix }} ${{ matrix.build.arch }}
208 |
209 | - name: Set up Docker Buildx
210 | uses: docker/setup-buildx-action@v2
211 | with:
212 | platforms: linux/${{ matrix.build.arch }}
213 |
214 | - name: Build and push tidyverse
215 | uses: docker/build-push-action@v3
216 | with:
217 | file: rocker-versioned2/dockerfiles/tidyverse_${{ steps.defs.outputs.rver }}.Dockerfile
218 | context: rocker-versioned2
219 | push: true
220 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-tidyverse:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
221 | platforms: linux/${{ matrix.build.arch }}
222 |
223 | mlbuild:
224 | strategy:
225 | matrix:
226 | build:
227 | - { arch: 'amd64', runner: 'ubuntu-latest' }
228 | runs-on: ${{ matrix.build.runner }}
229 | steps:
230 | - uses: actions/checkout@v3
231 |
232 | - name: Free root space
233 | uses: almahmoud/free-root-space@main
234 | with:
235 | verbose: true
236 |
237 | - name: Set defaults for schedule
238 | id: defs
239 | run: |
240 | echo rver=$(echo ${{ github.event.inputs.rver || 'devel' }}) >> $GITHUB_OUTPUT
241 | echo biocver=$(echo ${{ github.event.inputs.biocver || '3.21' }}) >> $GITHUB_OUTPUT
242 | echo registryuser=$(echo ${{ github.repository_owner }} | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
243 | echo rockerintermediateprefix=$(echo "ghcr.io/${{ github.repository_owner }}/rocker" | awk '{print tolower($0)}') >> $GITHUB_OUTPUT
244 |
245 | - name: Login to GHCR
246 | uses: docker/login-action@v2
247 | with:
248 | registry: ghcr.io
249 | username: ${{ github.actor }}
250 | password: ${{ secrets.GITHUB_TOKEN }}
251 |
252 | - name: Prep rocker rocker files
253 | run: |
254 | ## git clone rocker
255 | git clone --depth 1 https://github.com/rocker-org/rocker-versioned2
256 | sed -i 's#11.8.0-cudnn8-devel-ubuntu22.04#12.6.2-cudnn-devel-ubuntu24.04#g' rocker-versioned2/dockerfiles/cuda_${{ steps.defs.outputs.rver }}.Dockerfile
257 | sed -i 's#11.8.0-cudnn8-devel-ubuntu22.04#12.6.2-cudnn-devel-ubuntu24.04#g' rocker-versioned2/dockerfiles/ml_${{ steps.defs.outputs.rver }}.Dockerfile
258 | sed -i 's#11.8.0-cudnn8-devel-ubuntu22.04#12.6.2-cudnn-devel-ubuntu24.04#g' rocker-versioned2/dockerfiles/ml-verse_${{ steps.defs.outputs.rver }}.Dockerfile
259 |
260 | # Package archived from CRAN
261 | sed -i '/rmdshower/d' rocker-versioned2/scripts/install_verse.sh
262 |
263 | sed -i 's#rocker/cuda:${{ steps.defs.outputs.rver }}#${{ steps.defs.outputs.rockerintermediateprefix }}-cuda:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}#g' rocker-versioned2/dockerfiles/ml_${{ steps.defs.outputs.rver }}.Dockerfile
264 | sed -i 's#rocker/ml:${{ steps.defs.outputs.rver }}#${{ steps.defs.outputs.rockerintermediateprefix }}-ml:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}#g' rocker-versioned2/dockerfiles/ml-verse_${{ steps.defs.outputs.rver }}.Dockerfile
265 |
266 | echo "Bioconductor Version: ${{ steps.defs.outputs.biocver }}"
267 | if [ "${{ steps.defs.outputs.rver }}" == "devel" ]; then
268 | bash .github/scripts/devel_or_patched_rversion.sh "${{ steps.defs.outputs.biocver }}" "rocker-versioned2/dockerfiles/cuda_${{ steps.defs.outputs.rver }}.Dockerfile"
269 | bash .github/scripts/devel_or_patched_rversion.sh "${{ steps.defs.outputs.biocver }}" "rocker-versioned2/dockerfiles/ml_${{ steps.defs.outputs.rver }}.Dockerfile"
270 | bash .github/scripts/devel_or_patched_rversion.sh "${{ steps.defs.outputs.biocver }}" "rocker-versioned2/dockerfiles/ml-verse_${{ steps.defs.outputs.rver }}.Dockerfile"
271 | fi
272 |
273 | - name: Set up Docker Buildx
274 | uses: docker/setup-buildx-action@v2
275 | with:
276 | platforms: linux/${{ matrix.build.arch }}
277 |
278 | - name: Build and load cuda
279 | uses: docker/build-push-action@v3
280 | with:
281 | file: rocker-versioned2/dockerfiles/cuda_${{ steps.defs.outputs.rver }}.Dockerfile
282 | context: rocker-versioned2
283 | push: true
284 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-cuda:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
285 | platforms: linux/${{ matrix.build.arch }}
286 |
287 | - name: Build and load ml
288 | uses: docker/build-push-action@v3
289 | with:
290 | file: rocker-versioned2/dockerfiles/ml_${{ steps.defs.outputs.rver }}.Dockerfile
291 | context: rocker-versioned2
292 | push: true
293 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-ml:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
294 | platforms: linux/${{ matrix.build.arch }}
295 |
296 | - name: Build and load ml-verse
297 | uses: docker/build-push-action@v3
298 | with:
299 | file: rocker-versioned2/dockerfiles/ml-verse_${{ steps.defs.outputs.rver }}.Dockerfile
300 | context: rocker-versioned2
301 | push: true
302 | tags: ${{ steps.defs.outputs.rockerintermediateprefix }}-ml-verse:${{ steps.defs.outputs.rver }}-${{ matrix.build.arch }}
303 | platforms: linux/${{ matrix.build.arch }}
304 |
305 |
--------------------------------------------------------------------------------
/.github/workflows/update_latest.yaml:
--------------------------------------------------------------------------------
1 | name: Update latest tag to latest release
2 |
3 | on:
4 | workflow_dispatch:
5 | schedule:
6 | - cron: '0 18 * * *'
7 |
8 | jobs:
9 | update_latest:
10 | name: "Update latest tag"
11 | strategy:
12 | fail-fast: false
13 | matrix:
14 | image: ['r-ver', 'bioconductor', 'bioconductor_docker', 'tidyverse', 'ml-verse', 'shiny']
15 | runs-on: ubuntu-latest
16 | steps:
17 | - name: Set defaults for schedule
18 | id: defs
19 | run: |
20 | OWNER_REPO="bioconductor"
21 | echo repo="$OWNER_REPO" >> $GITHUB_OUTPUT
22 | echo releasetag=$(curl https://hub.docker.com/v2/repositories/$OWNER_REPO/${{matrix.image}}/tags?page_size=1000 | jq '.results[].name' | tr -d '"' | grep -v "-" | sort -n | grep RELEASE | tail -n 1) >> $GITHUB_OUTPUT
23 |
24 | - name: Login to GHCR
25 | uses: docker/login-action@v2
26 | with:
27 | registry: ghcr.io
28 | username: ${{ github.actor }}
29 | password: ${{ secrets.GITHUB_TOKEN }}
30 |
31 | - name: Login to Dockerhub
32 | uses: docker/login-action@v2
33 | with:
34 | username: ${{ secrets.DOCKER_USERNAME }}
35 | password: ${{ secrets.DOCKER_PASSWORD }}
36 |
37 | - name: Update latest tag
38 | run: |
39 | OWNER_REPO="${{steps.defs.outputs.repo}}"
40 | docker pull $OWNER_REPO/${{matrix.image}}:${{steps.defs.outputs.releasetag}}
41 | docker pull $OWNER_REPO/${{matrix.image}}:latest
42 | IMAGEDIFF=$(diff <(docker inspect $OWNER_REPO/${{matrix.image}}:latest) <(docker inspect $OWNER_REPO/${{matrix.image}}:${{steps.defs.outputs.releasetag}}))
43 | if [ -z "$IMAGEDIFF" ]; then
44 | echo '"latest" tag is already "${{steps.defs.outputs.releasetag}}"'
45 | else
46 | docker tag $OWNER_REPO/${{matrix.image}}:${{steps.defs.outputs.releasetag}} $OWNER_REPO/${{matrix.image}}:latest
47 | docker push $OWNER_REPO/${{matrix.image}}:latest
48 | docker tag $OWNER_REPO/${{matrix.image}}:${{steps.defs.outputs.releasetag}} ghcr.io/$OWNER_REPO/${{matrix.image}}:latest
49 | docker push ghcr.io/$OWNER_REPO/${{matrix.image}}:latest
50 | fi
51 |
--------------------------------------------------------------------------------
/.github/workflows/weekly-release-bump.yaml:
--------------------------------------------------------------------------------
1 | name: Weekly Release Patch Bump
2 |
3 | on:
4 | schedule:
5 | - cron: '0 6 * * 1' # 2 AM EST (UTC-4) on Monday mornings
6 | workflow_dispatch:
7 |
8 | jobs:
9 | bump-patch:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 | with:
14 | fetch-depth: 0
15 |
16 | - name: Find Latest Release Branch
17 | id: find-branch
18 | run: |
19 | latest_version=$(git branch -r | grep 'origin/RELEASE_' | sed 's|origin/||' | grep -oP 'RELEASE_\K\d+_\d+' | sort -Vr | head -n1)
20 | latest_branch=$(echo "RELEASE_${latest_version}" | sed 's/\./_/g')
21 | echo "Detected latest release branch: $latest_branch"
22 | echo "branch=$latest_branch" >> $GITHUB_OUTPUT
23 |
24 | - uses: actions/checkout@v4
25 | with:
26 | ref: ${{ steps.find-branch.outputs.branch }}
27 | token: ${{ secrets.PAT }}
28 |
29 | - name: Run patch bump script and push
30 | run: |
31 | git checkout ${{ steps.find-branch.outputs.branch }}
32 |
33 | sed -r -i 's/(^ARG BIOCONDUCTOR_PATCH=)([0-9]+)$/echo "\1$((\2+1))"/ge' Dockerfile
34 |
35 | git config user.name github-actions
36 | git config user.email github-actions@github.com
37 | git add Dockerfile
38 |
39 | git commit -m "Weekly auto-bump Dockerfile patch version for latest release" || exit 0
40 | git push origin ${{ steps.find-branch.outputs.branch }}
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # The suggested name for this image is: bioconductor/bioconductor_docker:devel
2 | ARG BASE_IMAGE=rocker/rstudio
3 | ARG arm64_tag=latest
4 | ARG amd64_tag=latest
5 | FROM ${BASE_IMAGE}:${arm64_tag} AS base-arm64
6 | # This will persist in final image
7 | ENV BIOCONDUCTOR_USE_CONTAINER_REPOSITORY=FALSE
8 |
9 | FROM ${BASE_IMAGE}:${amd64_tag} AS base-amd64
10 | # This will persist in final image
11 | ENV BIOCONDUCTOR_USE_CONTAINER_REPOSITORY=TRUE
12 |
13 | # Set automatically when building with --platform
14 | ARG TARGETARCH
15 | ENV TARGETARCH=${TARGETARCH:-amd64}
16 | FROM base-$TARGETARCH AS base
17 |
18 | ## Set Dockerfile version number
19 | ARG BIOCONDUCTOR_VERSION=3.22
20 |
21 | ##### IMPORTANT ########
22 | ## The PATCH version number should be incremented each time
23 | ## there is a change in the Dockerfile.
24 | ARG BIOCONDUCTOR_PATCH=0
25 |
26 | ARG BIOCONDUCTOR_DOCKER_VERSION=${BIOCONDUCTOR_VERSION}.${BIOCONDUCTOR_PATCH}
27 |
28 | ## Do not use binary repositories during container creation
29 | ## Avoid using binaries produced for older version of same container
30 | ENV BIOCONDUCTOR_USE_CONTAINER_REPOSITORY=FALSE
31 |
32 | # Add Bioconductor system dependencies
33 | # Add host-site-library# DEVEL: Add sys env variables to DEVEL image
34 | # Variables in Renviron.site are made available inside of R.
35 | # Add libsbml CFLAGS
36 | ADD bioc_scripts/install_bioc_sysdeps.sh /tmp/
37 | RUN bash /tmp/install_bioc_sysdeps.sh $BIOCONDUCTOR_VERSION \
38 | && echo "R_LIBS=/usr/local/lib/R/host-site-library:\${R_LIBS}" > /usr/local/lib/R/etc/Renviron.site \
39 | && curl -OL http://bioconductor.org/checkResults/devel/bioc-LATEST/Renviron.bioc \
40 | && sed -i '/^IS_BIOC_BUILD_MACHINE/d' Renviron.bioc \
41 | && cat Renviron.bioc | grep -o '^[^#]*' | sed 's/export //g' >>/etc/environment \
42 | && cat Renviron.bioc >> /usr/local/lib/R/etc/Renviron.site \
43 | && echo BIOCONDUCTOR_VERSION=${BIOCONDUCTOR_VERSION} >> /usr/local/lib/R/etc/Renviron.site \
44 | && echo BIOCONDUCTOR_DOCKER_VERSION=${BIOCONDUCTOR_DOCKER_VERSION} >> /usr/local/lib/R/etc/Renviron.site \
45 | && echo 'LIBSBML_CFLAGS="-I/usr/include"' >> /usr/local/lib/R/etc/Renviron.site \
46 | && echo 'LIBSBML_LIBS="-lsbml"' >> /usr/local/lib/R/etc/Renviron.site \
47 | && rm -rf Renviron.bioc
48 |
49 | ARG TARGETARCH
50 | ENV TARGETARCH=${TARGETARCH:-amd64}
51 |
52 | FROM base-$TARGETARCH AS final
53 | COPY --from=base / /
54 |
55 | LABEL name="bioconductor/bioconductor_docker" \
56 | version=$BIOCONDUCTOR_DOCKER_VERSION \
57 | url="https://github.com/Bioconductor/bioconductor_docker" \
58 | vendor="Bioconductor Project" \
59 | maintainer="maintainer@bioconductor.org" \
60 | description="Bioconductor docker image with system dependencies to install all packages." \
61 | license="Artistic-2.0"
62 |
63 | # Reset args in last layer
64 | ARG BIOCONDUCTOR_VERSION=3.22
65 | ARG BIOCONDUCTOR_PATCH=0
66 | ARG BIOCONDUCTOR_DOCKER_VERSION=${BIOCONDUCTOR_VERSION}.${BIOCONDUCTOR_PATCH}
67 |
68 | # Set automatically when building with --platform
69 | ARG TARGETPLATFORM
70 | ENV TARGETPLATFORM=${TARGETPLATFORM:-linux/amd64}
71 |
72 | ## Set env variables
73 | ENV PLATFORM=${TARGETPLATFORM}
74 | ENV LIBSBML_CFLAGS="-I/usr/include"
75 | ENV LIBSBML_LIBS="-lsbml"
76 | ENV BIOCONDUCTOR_DOCKER_VERSION=$BIOCONDUCTOR_DOCKER_VERSION
77 | ENV BIOCONDUCTOR_VERSION=$BIOCONDUCTOR_VERSION
78 | ENV BIOCONDUCTOR_NAME="bioconductor_docker"
79 |
80 | # Init command for s6-overlay
81 | CMD ["/init"]
82 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The Artistic License 2.0
2 |
3 | Copyright (c) 2020 Bioconductor
4 |
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | This license establishes the terms under which a given free software
11 | Package may be copied, modified, distributed, and/or redistributed.
12 | The intent is that the Copyright Holder maintains some artistic
13 | control over the development of that Package while still keeping the
14 | Package available as open source and free software.
15 |
16 | You are always permitted to make arrangements wholly outside of this
17 | license directly with the Copyright Holder of a given Package. If the
18 | terms of this license do not permit the full use that you propose to
19 | make of the Package, you should contact the Copyright Holder and seek
20 | a different licensing arrangement.
21 |
22 | Definitions
23 |
24 | "Copyright Holder" means the individual(s) or organization(s)
25 | named in the copyright notice for the entire Package.
26 |
27 | "Contributor" means any party that has contributed code or other
28 | material to the Package, in accordance with the Copyright Holder's
29 | procedures.
30 |
31 | "You" and "your" means any person who would like to copy,
32 | distribute, or modify the Package.
33 |
34 | "Package" means the collection of files distributed by the
35 | Copyright Holder, and derivatives of that collection and/or of
36 | those files. A given Package may consist of either the Standard
37 | Version, or a Modified Version.
38 |
39 | "Distribute" means providing a copy of the Package or making it
40 | accessible to anyone else, or in the case of a company or
41 | organization, to others outside of your company or organization.
42 |
43 | "Distributor Fee" means any fee that you charge for Distributing
44 | this Package or providing support for this Package to another
45 | party. It does not mean licensing fees.
46 |
47 | "Standard Version" refers to the Package if it has not been
48 | modified, or has been modified only in ways explicitly requested
49 | by the Copyright Holder.
50 |
51 | "Modified Version" means the Package, if it has been changed, and
52 | such changes were not explicitly requested by the Copyright
53 | Holder.
54 |
55 | "Original License" means this Artistic License as Distributed with
56 | the Standard Version of the Package, in its current version or as
57 | it may be modified by The Perl Foundation in the future.
58 |
59 | "Source" form means the source code, documentation source, and
60 | configuration files for the Package.
61 |
62 | "Compiled" form means the compiled bytecode, object code, binary,
63 | or any other form resulting from mechanical transformation or
64 | translation of the Source form.
65 |
66 |
67 | Permission for Use and Modification Without Distribution
68 |
69 | (1) You are permitted to use the Standard Version and create and use
70 | Modified Versions for any purpose without restriction, provided that
71 | you do not Distribute the Modified Version.
72 |
73 |
74 | Permissions for Redistribution of the Standard Version
75 |
76 | (2) You may Distribute verbatim copies of the Source form of the
77 | Standard Version of this Package in any medium without restriction,
78 | either gratis or for a Distributor Fee, provided that you duplicate
79 | all of the original copyright notices and associated disclaimers. At
80 | your discretion, such verbatim copies may or may not include a
81 | Compiled form of the Package.
82 |
83 | (3) You may apply any bug fixes, portability changes, and other
84 | modifications made available from the Copyright Holder. The resulting
85 | Package will still be considered the Standard Version, and as such
86 | will be subject to the Original License.
87 |
88 |
89 | Distribution of Modified Versions of the Package as Source
90 |
91 | (4) You may Distribute your Modified Version as Source (either gratis
92 | or for a Distributor Fee, and with or without a Compiled form of the
93 | Modified Version) provided that you clearly document how it differs
94 | from the Standard Version, including, but not limited to, documenting
95 | any non-standard features, executables, or modules, and provided that
96 | you do at least ONE of the following:
97 |
98 | (a) make the Modified Version available to the Copyright Holder
99 | of the Standard Version, under the Original License, so that the
100 | Copyright Holder may include your modifications in the Standard
101 | Version.
102 |
103 | (b) ensure that installation of your Modified Version does not
104 | prevent the user installing or running the Standard Version. In
105 | addition, the Modified Version must bear a name that is different
106 | from the name of the Standard Version.
107 |
108 | (c) allow anyone who receives a copy of the Modified Version to
109 | make the Source form of the Modified Version available to others
110 | under
111 |
112 | (i) the Original License or
113 |
114 | (ii) a license that permits the licensee to freely copy,
115 | modify and redistribute the Modified Version using the same
116 | licensing terms that apply to the copy that the licensee
117 | received, and requires that the Source form of the Modified
118 | Version, and of any works derived from it, be made freely
119 | available in that license fees are prohibited but Distributor
120 | Fees are allowed.
121 |
122 |
123 | Distribution of Compiled Forms of the Standard Version
124 | or Modified Versions without the Source
125 |
126 | (5) You may Distribute Compiled forms of the Standard Version without
127 | the Source, provided that you include complete instructions on how to
128 | get the Source of the Standard Version. Such instructions must be
129 | valid at the time of your distribution. If these instructions, at any
130 | time while you are carrying out such distribution, become invalid, you
131 | must provide new instructions on demand or cease further distribution.
132 | If you provide valid instructions or cease distribution within thirty
133 | days after you become aware that the instructions are invalid, then
134 | you do not forfeit any of your rights under this license.
135 |
136 | (6) You may Distribute a Modified Version in Compiled form without
137 | the Source, provided that you comply with Section 4 with respect to
138 | the Source of the Modified Version.
139 |
140 |
141 | Aggregating or Linking the Package
142 |
143 | (7) You may aggregate the Package (either the Standard Version or
144 | Modified Version) with other packages and Distribute the resulting
145 | aggregation provided that you do not charge a licensing fee for the
146 | Package. Distributor Fees are permitted, and licensing fees for other
147 | components in the aggregation are permitted. The terms of this license
148 | apply to the use and Distribution of the Standard or Modified Versions
149 | as included in the aggregation.
150 |
151 | (8) You are permitted to link Modified and Standard Versions with
152 | other works, to embed the Package in a larger work of your own, or to
153 | build stand-alone binary or bytecode versions of applications that
154 | include the Package, and Distribute the result without restriction,
155 | provided the result does not expose a direct interface to the Package.
156 |
157 |
158 | Items That are Not Considered Part of a Modified Version
159 |
160 | (9) Works (including, but not limited to, modules and scripts) that
161 | merely extend or make use of the Package, do not, by themselves, cause
162 | the Package to be a Modified Version. In addition, such works are not
163 | considered parts of the Package itself, and are not subject to the
164 | terms of this license.
165 |
166 |
167 | General Provisions
168 |
169 | (10) Any use, modification, and distribution of the Standard or
170 | Modified Versions is governed by this Artistic License. By using,
171 | modifying or distributing the Package, you accept this license. Do not
172 | use, modify, or distribute the Package, if you do not accept this
173 | license.
174 |
175 | (11) If your Modified Version has been derived from a Modified
176 | Version made by someone other than you, you are nevertheless required
177 | to ensure that your Modified Version complies with the requirements of
178 | this license.
179 |
180 | (12) This license does not grant you the right to use any trademark,
181 | service mark, tradename, or logo of the Copyright Holder.
182 |
183 | (13) This license includes the non-exclusive, worldwide,
184 | free-of-charge patent license to make, have made, use, offer to sell,
185 | sell, import and otherwise transfer the Package with respect to any
186 | patent claims licensable by the Copyright Holder that are necessarily
187 | infringed by the Package. If you institute patent litigation
188 | (including a cross-claim or counterclaim) against any party alleging
189 | that the Package constitutes direct or contributory patent
190 | infringement, then this Artistic License to you shall terminate on the
191 | date that such litigation is filed.
192 |
193 | (14) Disclaimer of Warranty:
194 | THE PACKAGE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS
195 | IS' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES. THE IMPLIED
196 | WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
197 | NON-INFRINGEMENT ARE DISCLAIMED TO THE EXTENT PERMITTED BY YOUR LOCAL
198 | LAW. UNLESS REQUIRED BY LAW, NO COPYRIGHT HOLDER OR CONTRIBUTOR WILL
199 | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
200 | DAMAGES ARISING IN ANY WAY OUT OF THE USE OF THE PACKAGE, EVEN IF
201 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://opensource.org/licenses/Artistic-2.0)
2 | [](https://www.repostatus.org/#active)
3 | [](https://github.com/Bioconductor/bioconductor_docker/actions/workflows/weekly-devel-builder.yml)
4 |
5 | # Docker containers for Bioconductor
6 |
7 | [Docker](https:/docs.docker.com/engine/docker-overview/) packages software
8 | into self-contained environments, called containers, that include necessary
9 | dependencies to run. Containers can run on any operating system including
10 | Windows and Mac (using modern Linux kernels) via the
11 | [Docker engine](https://docs.docker.com/engine/).
12 |
13 | Containers can also be deployed in the cloud using
14 | [Amazon Elastic Container Service](https://aws.amazon.com/ecs/),
15 | [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/)
16 | or [Microsoft Azure Container Instances](https://azure.microsoft.com/en-us/services/container-instances/)
17 |
18 |
19 |
20 | - [Quick start](#quickstart)
21 | - [Why Use Containers](#intro)
22 | * [Goals for new containers](#goals)
23 | - [Current Containers](#current)
24 | - [Using Containers](#usage)
25 | * [Running Containers](#running)
26 | * [Mounting Additional Volume](#mounting)
27 | * [Using docker-compose](#dockercompose)
28 | - [Modifying Image Container](#modify)
29 | - [Singularity](#singularity)
30 | - [Microsoft Azure Container Instances](#msft)
31 | * [Using containers hosted on Microsoft Container Registry](#mcr)
32 | * [Use Azure Container Instances to run bioconductor images on-demand on Azure](#aci)
33 | - [How to contribute](#contribute)
34 | - [Deprecation Notice](#deprecation)
35 | * [Legacy Containers](#legacy)
36 | * [Reason for deprecation](#reason)
37 | * [Reporting issues](#issues)
38 | - [Acknowledgements](#acknowledgements)
39 |
40 |
41 | ## Quick start
42 |
43 | 1. Install Docker
44 |
45 | 1. Run container with Bioconductor and RStudio
46 |
47 | docker run \
48 | -e PASSWORD=bioc \
49 | -p 8787:8787 \
50 | bioconductor/bioconductor_docker:devel
51 |
52 | This command will run the docker container
53 | `bioconductor/bioconductor_docker:devel` on your local machine.
54 |
55 | RStudio will be available on your web browser at
56 | `http://localhost:8787`. The USER is fixed to always being
57 | `rstudio`. The password in the above command is given as `bioc` but
58 | it can be set to anything. `8787` is the port being mapped between
59 | the docker container and your host machine. NOTE: password cannot
60 | be `rstudio`.
61 |
62 | The user is logged into the `rstudio` user by default.
63 |
64 |
65 | ## Why use Containers
66 |
67 | With Bioconductor containers, we hope to enhance
68 |
69 | * **Reproducibility**: If you run some code in a container today,
70 | you can run it again in the same container (with the same
71 | [tag](https://docs.docker.com/userguide/dockerimages/#setting-tags-on-an-image))
72 | years later and know that nothing in the container has changed.
73 | You should always take note of the tag you used if you think
74 | you might want to reproduce some work later.
75 |
76 | * **Ease of use**: With one command, you can be running the
77 | latest release or devel Bioconductor. No need to worry
78 | about whether packages and system dependencies are
79 | installed.
80 |
81 | * **Convenience**: Easily start a fresh R session with
82 | no packages installed for testing. Quickly run an analysis with package
83 | dependencies not typical of your workflow. Containers make this easy.
84 |
85 | Our aim is to provide up-to-date containers for the current release
86 | and devel versions of Bioconductor, and some older
87 | versions. Bioconductor’s Docker images are stored in Docker Hub; the
88 | source Dockerfile(s) are on Github.
89 |
90 | Our release images and devel images are based on the [Rocker Project](https://www.rocker-project.org/) -
91 | [rocker/rstudio](https://github.com/rocker-org/rocker/tree/master/rstudio)
92 | image and built when a Bioconductor release occurs.
93 |
94 |
95 | ### Goals for new container architecture
96 |
97 | A few of our key goals to migrate to a new set of Docker containers are,
98 |
99 | - to keep the image size being shipped by the Bioconductor team at a
100 | manageable size.
101 |
102 | - easy to extend, so developers can just use a single image to
103 | inherit and build their docker image.
104 |
105 | - easy to maintain, by streamlining the docker inheritance chain.
106 |
107 | - Adopt a "best practices" outline so that new community contributed
108 | docker images get reviewed and follow standards.
109 |
110 | - Adopt a deprecation policy and life cycle for images similar to
111 | Bioconductor packages.
112 |
113 | - Replicate the Linux build machines (_malbec2_) on the
114 | `bioconductor/bioconductor_docker:devel` image as closely as
115 | possible. While this is not fully possible just yet, this image can
116 | be used by maintainers who wish to reproduce errors seen on the
117 | Bioconductor Linux build machine and as a helpful debugging tool.
118 |
119 | - Make Bioconductor package binaries available to all users of the
120 | this container. Users can now install Bioconductor packages as binaries
121 | by simply doing, `BiocManager::install()`.
122 | This speeds up installation of Bioconductor packages by avoiding compilation.
123 |
124 | To see the latest status of the Bioconductor binary repository, check with
125 | `BiocPkgTools::repositoryStats()`.
126 |
127 |
128 | ## Current Containers
129 |
130 | For each supported version of Bioconductor, we provide
131 |
132 | - **bioconductor/bioconductor_docker:RELEASE_X_Y**
133 |
134 | - **bioconductor/bioconductor_docker:devel**
135 |
136 | Bioconductor's Docker images are stored in [Docker Hub](https://hub.docker.com/u/bioconductor/);
137 | the source Dockerfile(s) are in [Github](https://github.com/Bioconductor/bioconductor_docker).
138 |
139 |
140 | ## Using the containers
141 |
142 | A well organized guide to popular docker commands can be found
143 | [here](https://github.com/wsargent/docker-cheat-sheet). For
144 | convenience, below are some commands to get you started. The following
145 | examples use the `bioconductor/bioconductor_docker:devel` image.
146 |
147 | **Note:** that you may need to prepend `sudo` to all `docker`
148 | commands. But try them without first.
149 |
150 | **Prerequisites**: On Linux, you need Docker
151 | [installed](https://docs.docker.com/installation/) and on
152 | [Mac](http://docs.docker.com/installation/mac/) or
153 | [Windows](http://docs.docker.com/installation/windows/) you need
154 | Docker Toolbox installed and running.
155 |
156 | ##### List which docker machines are available locally
157 |
158 | docker images
159 |
160 | ##### List running containers
161 |
162 | docker ps
163 |
164 | ##### List all containers
165 |
166 | docker ps -a
167 |
168 | ##### Resume a stopped container
169 |
170 | docker start
171 |
172 | ##### Shell into a running container
173 |
174 | docker exec -it /bin/bash
175 |
176 | ##### Shutdown container
177 |
178 | docker stop
179 |
180 | ##### Delete container
181 |
182 | docker rm
183 |
184 | ##### Delete image
185 |
186 | docker rmi bioconductor/bioconductor_docker:devel
187 |
188 |
189 | ### Running the container
190 |
191 | The above commands can be helpful but the real basics of running a
192 | Bioconductor Docker involves pulling the public image and running the
193 | container.
194 |
195 | ##### Get a copy of public docker image
196 |
197 | docker pull bioconductor/bioconductor_docker:devel
198 |
199 | ##### To run RStudio Server:
200 |
201 | docker run -e PASSWORD= \
202 | -p 8787:8787 \
203 | bioconductor/bioconductor_docker:devel
204 |
205 | You can then open a web browser pointing to your docker host on
206 | port 8787. If you're on Linux and using default settings, the docker
207 | host is `127.0.0.1` (or `localhost`, so the full URL to RStudio would
208 | be `http://localhost:8787)`. If you are on Mac or Windows and running
209 | `Docker Toolbox`, you can determine the docker host with the
210 | `docker-machine ip default` command.
211 |
212 | In the above command, `-e PASSWORD=` is setting the RStudio password
213 | and is required by the RStudio Docker image. It can be whatever you
214 | like except it cannot be `rstudio`. Log in to RStudio with the
215 | username `rstudio` and whatever password was specified.
216 |
217 | If you want to run RStudio as a user on your host machine, in order to
218 | read/write files in a host directory, please [read this](https://github.com/rocker-org/rocker/wiki/Sharing-files-with-host-machine).
219 |
220 | NOTE: If you forget to add the tag `devel` or `RELEASE_X_Y` while
221 | using the `bioconductor/bioconductor_docker` image, it will
222 | automatically use the `latest` tag which points to the latest RELEASE
223 | version of Bioconductor.
224 |
225 | ##### To run R from the command line:
226 |
227 | docker run -it --user rstudio bioconductor/bioconductor_docker:devel R
228 |
229 | ##### To open a Bash shell on the container:
230 |
231 | docker run -it --user rstudio bioconductor/bioconductor_docker:devel bash
232 |
233 | **Note**: The `docker run` command is very powerful and versatile.
234 | For full documentation, type `docker run --help` or visit
235 | the [help page](https://docs.docker.com/reference/run/).
236 |
237 | [ Back to top ]
238 |
239 |
240 | ### Mounting Additional Volume
241 |
242 | One such option for `docker run` is `-v` to mount an additional volume
243 | to the docker image. This might be useful for say mounting a local R
244 | install directory for use on the docker. The path on the docker image
245 | that should be mapped to a local R library directory is
246 | `/usr/local/lib/R/host-site-library`.
247 |
248 | The follow example would mount my locally installed packages to this
249 | docker directory. In turn, that path is automatically loaded in the R
250 | `.libPaths` on the docker image and all of my locally installed
251 | package would be available for use.
252 |
253 | * Running it interactively,
254 |
255 | docker run \
256 | -v /home/my-devel-library:/usr/local/lib/R/host-site-library \
257 | -it \
258 | --user rstudio \
259 | bioconductor/bioconductor_docker:devel
260 |
261 | without the `--user rstudio` option, the container is started and
262 | logged in as the `root` user.
263 |
264 | The `-it` flag gives you an interactive tty (shell/terminal) to the
265 | docker container.
266 |
267 | * Running it with RStudio interface
268 |
269 | docker run \
270 | -v /home/my-devel-library:/usr/local/lib/R/host-site-library \
271 | -e PASSWORD=password \
272 | -p 8787:8787 \
273 | bioconductor/bioconductor_docker:devel
274 |
275 |
276 |
277 | ### Using docker-compose
278 |
279 | To run the docker-compose file `docker-compose.yaml` from the same
280 | directory,
281 |
282 | ```
283 | docker-compose up
284 | ```
285 |
286 | Using `docker-compose`, the user can launch the image with a single
287 | command. The RStudio image is launched at `http://localhost:8787`.
288 |
289 | The `docker-composer.yaml` includes settings so that the user doesn't
290 | have to worry about setting the port, password (default is `bioc`), or
291 | the volume to save libraries.
292 |
293 | The library path, where all the packages are installed are
294 | automatically configured to use the volume
295 | `$HOME/R/bioconductor_docker/`, in the case of
296 | the Bioconductor version 3.14, it would be
297 | `$HOME/R/bioconductor_docker/3.14`. This location is mounted on to the
298 | path, `/usr/local/lib/R/host-site-library`, which is the first value
299 | in your search path for packages if you check `.libPaths()`.
300 |
301 | When the user starts the docker image using `docker-compose`, it will
302 | recognize previously mounted libraries with the apprpriate
303 | bioconductor version, and save users time reinstalling the previously
304 | installed packages.
305 |
306 | To add another volume for data, it's possible to modify the
307 | `docker-compose.yml` to include another volume, so all the data is
308 | stored in the same location as well.
309 |
310 | ```
311 | volumes:
312 | - ${HOME}/R/bioconductor_docker/3.14:/usr/local/lib/R/host-site-library
313 | - ${HOME}/R/data:/home/rstudio
314 | ```
315 |
316 |
317 | To run in the background, use the `-d` or `--detach` flag,
318 |
319 | ```
320 | docker-compose up -d
321 | ```
322 |
323 | If the image is run in a detached state, the `container-name` can be
324 | used to exec into the terminal if the user wishes `root` access in a
325 | terminal, without using RStudio.
326 |
327 | Within the `root` user, additional system dependencies can be
328 | installed to make the image fit the needs of the user.
329 |
330 | ```
331 | docker exec -it bioc-3.14 bash
332 | ```
333 |
334 | For more information on how to use `docker-compose`, use the
335 | [official docker-compose reference](https://docs.docker.com/compose/reference/up/).
336 |
337 | [ Back to top ]
338 |
339 |
340 | ## Modifying the images
341 |
342 | There are two ways to modify these images:
343 |
344 | 1. Making changes in a running container and then committing them
345 | using the `docker commit` command.
346 |
347 | docker commit
348 |
349 | 2. Using a Dockerfile to declare the changes you want to make.
350 |
351 | The second way is the recommended way. Both ways are
352 | [documented here](https://docs.docker.com/userguide/dockerimages/#creating-our-own-images).
353 |
354 | Example 1:
355 |
356 | My goal is to add a python package 'tensorflow' and to install a
357 | Bioconductor package called 'scAlign' on top of the base docker
358 | image i.e bioconductor/bioconductor_docker:devel.
359 |
360 | As a first step, my Dockerfile should inherit from the
361 | `bioconductor/bioconductor_docker:devel` image, and build from
362 | there. Since all docker images are Linux environments, and this
363 | container is specifically 'Debian', I need some knowledge on how to
364 | install libraries on Linux machines.
365 |
366 | In your new `Dockerfile`, you can have the following commands
367 |
368 | # Docker inheritance
369 | FROM bioconductor/bioconductor_docker:devel
370 |
371 | # Update apt-get
372 | RUN apt-get update \
373 | ## Install the python package tensorflow
374 | && pip install tensorflow \
375 | ## Remove packages in '/var/cache/' and 'var/lib'
376 | ## to remove side-effects of apt-get update
377 | && apt-get clean \
378 | && rm -rf /var/lib/apt/lists/*
379 |
380 | # Install required Bioconductor package
381 | RUN R -e 'BiocManager::install("scAlign")'
382 |
383 | This `Dockerfile` can be built with the command, (note: you can name
384 | it however you want)
385 |
386 | docker build -t bioconductor_docker_tensorflow:devel .
387 |
388 | This will let you use the docker image with 'tensorflow' installed and
389 | also `scAlign` package.
390 |
391 | docker run -p 8787:8787 -e PASSWORD=bioc bioconductor_docker_tensorflow:devel
392 |
393 | Example 2:
394 |
395 | My goal is to add all the required infrastructure to be able to
396 | compile vignettes and knit documents into pdf files. My `Dockerfile`
397 | will look like the following for this requirement,
398 |
399 | # This docker image has LaTeX to build the vignettes
400 | FROM bioconductor/bioconductor_docker:devel
401 |
402 | # Update apt-get
403 | RUN apt-get update \
404 | && apt-get install -y --no-install-recommends apt-utils \
405 | && apt-get install -y --no-install-recommends \
406 | texlive \
407 | texlive-latex-extra \
408 | texlive-fonts-extra \
409 | texlive-bibtex-extra \
410 | texlive-science \
411 | texi2html \
412 | texinfo \
413 | && apt-get clean \
414 | && rm -rf /var/lib/apt/lists/*
415 |
416 | ## Install BiocStyle
417 | RUN R -e 'BiocManager::install("BiocStyle")'
418 |
419 | This `Dockerfile` can be built with the command,
420 |
421 | docker build -t bioconductor_docker_latex:devel .
422 |
423 | This will let you use the docker image as needed to build and
424 | compile vignettes for packages.
425 |
426 | docker run -p 8787:8787 -e PASSWORD=bioc bioconductor_docker_latex:devel
427 |
428 | [ Back to top ]
429 |
430 |
431 | ## Singularity
432 |
433 | The latest `bioconductor/bioconductor_docker` images are available on
434 | Singularity Hub as well. Singularity is a container runtime just like
435 | Docker, and Singularity Hub is the host registry for Singularity
436 | containers.
437 |
438 | You can find the Singularity containers collection on this link
439 | https://singularity-hub.org/collections/3955.
440 |
441 | These images are particularly useful on compute clusters where you
442 | don't need admin access. You need to have the module `singularity`
443 | installed. See https://singularity.lbl.gov/docs-installation (contact your
444 | IT department when in doubt).
445 |
446 | If you have Singularity installed on your machine or cluster are:
447 |
448 | Inspect available modules
449 |
450 | module available
451 |
452 | If Singularity is available,
453 |
454 | module load singularity
455 |
456 | Please check this link for specific usage instructions relevant to Singularity
457 | containers and their usage https://www.rocker-project.org/use/singularity/.
458 |
459 |
460 | ## Microsoft Azure Container Instances
461 |
462 | If you are a Microsoft Azure user, you have an option to run your
463 | containers using images hosted on [Microsoft Container Registry](https://github.com/microsoft/ContainerRegistry).
464 |
465 | > Microsoft Container Registry (MCR) is the primary Registry for all Microsoft Published docker images that offers a reliable and trustworthy delivery of container images with a syndicated catalog
466 |
467 |
468 | ### Using containers hosted on Microsoft Container Registry
469 |
470 | You can learn more about the `bioconductor_docker` image hosted on
471 | Micosoft Container Registry
472 | [here](https://hub.docker.com/_/microsoft-bioconductor/).
473 |
474 | Pull the `bioconductor_docker` image from Microsoft Container
475 | Registry, specifying your `tag` of choice. Check
476 | [here](https://hub.docker.com/_/microsoft-bioconductor-bioconductor-docker)
477 | for the list of tags under "Full Tag Listing":
478 |
479 | docker pull mcr.microsoft.com/bioconductor/bioconductor_docker:
480 |
481 | To pull the latest image:
482 |
483 | docker pull mcr.microsoft.com/bioconductor/bioconductor_docker:latest
484 |
485 | **Example: Run RStudio interactively from your docker container**
486 |
487 | To run RStudio in a web browser session, run the following and access
488 | it from `127.0.0.1:8787`. The default user name is "rstudio" and you
489 | can specify your password as the example below (here, it is set to
490 | 'bioc'):
491 |
492 | docker run --name bioconductor_docker_rstudio \
493 | -v ~/host-site-library:/usr/local/lib/R/host-site-library \
494 | -e PASSWORD='bioc' \
495 | -p 8787:8787 \
496 | mcr.microsoft.com/bioconductor/bioconductor_docker:latest
497 |
498 | To run RStudio on your terminal:
499 |
500 | docker run --name bioconductor_docker_rstudio \
501 | -it \
502 | -v ~/host-site-library:/usr/local/lib/R/host-site-library \
503 | -e PASSWORD='bioc' \
504 | -p 8787:8787 \
505 | mcr.microsoft.com/bioconductor/bioconductor_docker:latest R
506 |
507 | [ Back to top ]
508 |
509 |
510 | ### Use Azure Container Instances to run bioconductor images on-demand on Azure
511 |
512 | [Azure Container Instances or ACI](https://azure.microsoft.com/en-us/services/container-instances/#features)
513 | provide a way to run Docker containers on-demand in a managed,
514 | serverless Azure environment. To learn more, check out the
515 | documentation
516 | [here](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-overview).
517 |
518 | ### Run bioconductor images using ACI
519 |
520 | **Prerequisites**:
521 | 1. [An Azure account and a
522 | subscription](https://docs.microsoft.com/en-us/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing)
523 | you can create resources in
524 |
525 | 2. [Azure
526 | CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
527 |
528 | 3. Create a [resource
529 | group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal)
530 | within your subscription
531 |
532 | You can run [Azure CLI or "az cli"
533 | commands](https://docs.microsoft.com/en-us/cli/azure/?view=azure-cli-latest)
534 | to create, stop, restart or delete container instances running any
535 | bioconductor image - either official images by bioconductor or images
536 | available on [Microsoft Container
537 | Registry](https://hub.docker.com/_/microsoft-bioconductor). To get
538 | started, ensure you have an Azure account and a subscription or
539 | [create a free account](https://azure.microsoft.com/en-us/free/).
540 |
541 | Follow [this
542 | tutorial](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-quickstart)
543 | to get familiar with Azure Container Instances.
544 |
545 | To run the bioconductor image hosted on Microsoft Container Registry
546 | or MCR, create a new resource group in your Azure subscription. Then
547 | run the following command using Azure CLI. You can customize any or
548 | all of the inputs. This command is adapted to run on an Ubuntu
549 | machine:
550 |
551 | az container create \
552 | --resource-group resourceGroupName \
553 | --name mcr-bioconductor \
554 | --image mcr.microsoft.com/bioconductor/bioconductor_docker \
555 | --cpu 2 \
556 | --memory 4 \
557 | --dns-name-label mcr-bioconductor \
558 | --ports 8787 \
559 | --environment-variables 'PASSWORD'='bioc'
560 |
561 | When completed, run this command to get the fully qualified domain name(FQDN):
562 |
563 | az container show \
564 | --resource-group resourceGroupName \
565 | --name mcr-bioconductor \
566 | --query "{FQDN:ipAddress.fqdn,ProvisioningState:provisioningState}" \
567 | --out table
568 |
569 | Here we expose port `8787` on this publicly accessible FQDN. You may
570 | have to choose a different "dns-name-label" to avoid conflicts. By
571 | default, the username for RStudio is "rstudio" (similar to the
572 | official bioconductor docker image). Here we set the password for
573 | RStudio to 'bioc' in the environment variable configuration. The
574 | `--cpu` and `--memory` (in GB) configurations can also be customized
575 | to your needs. By default, ACI have 1 cpu core and 1.5GB of memory
576 | assigned.
577 |
578 | To learn more about what you can configure and customize when creating
579 | an ACI, run:
580 |
581 | az container create --help
582 |
583 | #### Mount Azure File Share to persist analysis data between sessions
584 |
585 | To ensure that the data persists between different analysis sessions
586 | when using Azure Container Instances, you can use the feature to
587 | [mount Azure file share to your container instance](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files). In this example, we will create
588 | an ACI that mounts the "/home/rstudio" directory in RStudio to an
589 | [Azure File Share](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction).
590 |
591 | **Prerequisites**:
592 |
593 | 1. [An Azure account and a subscription](https://docs.microsoft.com/en-us/azure/guides/developer/azure-developer-guide#understanding-accounts-subscriptions-and-billing) you can create resources in
594 |
595 | 2. [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest)
596 |
597 | 3. Create a [resource group](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/manage-resource-groups-portal)
598 | within your subscription
599 |
600 | Now, run the following Azure CLI commands to:
601 |
602 | 1. Create an [Azure
603 | Storage](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&tabs=azure-cli)
604 | account
605 |
606 | 2. Create an [Azure file
607 | share](https://docs.microsoft.com/en-us/azure/storage/files/storage-how-to-use-files-cli)
608 |
609 | 3. Get the [storage account
610 | key](https://docs.microsoft.com/en-us/cli/azure/storage/account/keys?view=azure-cli-latest)
611 |
612 | ```
613 | # Change these four parameters as needed
614 | ACI_PERS_RESOURCE_GROUP=resourceGroupName
615 | ACI_PERS_STORAGE_ACCOUNT_NAME=storageAccountName
616 | ACI_PERS_LOCATION=eastus
617 | ACI_PERS_SHARE_NAME=fileShareName
618 |
619 | # Step1: Create the storage account with the parameters
620 | az storage account create \
621 | --resource-group $ACI_PERS_RESOURCE_GROUP \
622 | --name $ACI_PERS_STORAGE_ACCOUNT_NAME \
623 | --location $ACI_PERS_LOCATION \
624 | --sku Standard_LRS
625 |
626 | # Step2: Create the file share
627 | az storage share create \
628 | --name $ACI_PERS_SHARE_NAME \
629 | --account-name $ACI_PERS_STORAGE_ACCOUNT_NAME
630 |
631 | # Step3: Get the storage account key
632 | STORAGE_KEY=$(az storage account keys list \
633 | --resource-group $ACI_PERS_RESOURCE_GROUP \
634 | --account-name $ACI_PERS_STORAGE_ACCOUNT_NAME \
635 | --query "[0].value" --output tsv)
636 | echo $STORAGE_KEY
637 | ```
638 |
639 | Here is an example command to mount an Azure file share to an ACI running bioconductor. This command is adapted to run on an Ubuntu machine:
640 |
641 | az container create \
642 | --resource-group resourceGroupName \
643 | --name mcr-bioconductor-fs \
644 | --image mcr.microsoft.com/bioconductor/bioconductor_docker \
645 | --dns-name-label mcr-bioconductor-fs \
646 | --cpu 2 \
647 | --memory 4 \
648 | --ports 8787 \
649 | --environment-variables 'PASSWORD'='bioc' \
650 | --azure-file-volume-account-name storageAccountName \
651 | --azure-file-volume-account-key $STORAGE_KEY \
652 | --azure-file-volume-share-name fileShareName \
653 | --azure-file-volume-mount-path /home/rstudio
654 |
655 | When completed, run this command to get the fully qualified domain name or FQDN:
656 |
657 | az container show \
658 | --resource-group resourceGroupName \
659 | --name mcr-bioconductor-fs \
660 | --query "{FQDN:ipAddress.fqdn,ProvisioningState:provisioningState}" \
661 | --out table
662 |
663 | Here we expose port 8787 on this publicly accessible FQDN. You may
664 | have to choose a different "dns-name-label" to avoid conflicts. By
665 | default, the username for RStudio is "rstudio" (similar to the
666 | official bioconductor docker image). Here we set the password for
667 | RStudio to 'bioc' in the environment variable configuration. The
668 | "--cpu" and "--memory" (in GB) configurations can also be customized
669 | to your needs. By default, ACI have 1 cpu core and 1.5GB of memory
670 | assigned. Here, we also mount RStudio "/home/rstudio" directory to a
671 | persistent Azure file share named "fileShareName" in the storage
672 | account specified. When you stop or restart an ACI, this data will not
673 | be lost.
674 |
675 | #### Stop, Start, Restart or Delete containers running on ACI
676 |
677 | You can run Azure CLI commands to [stop, start,
678 | restart](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-stop-start)
679 | or
680 | [delete](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-quickstart#clean-up-resources)
681 | container instances on Azure. You can find all the commands and
682 | options
683 | [here](https://docs.microsoft.com/en-us/cli/azure/container?view=azure-cli-latest#commands).
684 |
685 | Replace `containerName` and `resourceGroupName` in the following CLI commands.
686 |
687 | ##### Stop the container instance
688 |
689 | az container stop -n containerName -g resourceGroupName
690 |
691 |
692 | ##### Start the container instance
693 |
694 | az container start -n containerName -g resourceGroupName
695 |
696 | ##### Restart the container instance
697 |
698 | az container restart -n containerName -g resourceGroupName
699 |
700 | ##### Delete the container instance
701 |
702 | az container delete -n containerName -g resourceGroupName
703 |
704 | To not be prompted for confirmation for deleting the ACI:
705 |
706 | az container delete -n containerName -g resourceGroupName -y
707 |
708 | To troubleshoot any issues when using Azure Container Instances, try
709 | out the recommendations
710 | [here](https://docs.microsoft.com/en-us/azure/container-instances/container-instances-troubleshooting). For
711 | feedback or further issues, contact us via
712 | [email](mailto:genomics@microsoft.com).
713 |
714 | [ Back to top ]
715 |
716 |
717 | ## How to Contribute
718 |
719 | There is a comprehensive list of best practices and standards on how
720 | community members can contribute images
721 | [here](https://github.com/Bioconductor/bioconductor_docker/blob/master/best_practices.md).
722 |
723 | link: https://github.com/Bioconductor/bioconductor_docker/blob/master/best_practices.md
724 |
725 |
726 | ## Deprecation Notice
727 |
728 | For previous users of docker containers for Bioconductor, please note
729 | that we are deprecating the following images. These images were
730 | maintained by Bioconductor Core, and also the community.
731 |
732 |
733 | ### Legacy Containers
734 |
735 | These images are NO LONGER MAINTAINED and updated. They will however
736 | be available to use should a user choose. They are not
737 | supported anymore by the Bioconductor Core team.
738 |
739 | Bioconductor Core Team: bioc-issue-bot@bioconductor.org
740 |
741 | * [bioconductor/devel_base2](https://hub.docker.com/r/bioconductor/devel_base2/)
742 | * [bioconductor/devel_core2](https://hub.docker.com/r/bioconductor/devel_core2/)
743 | * [bioconductor/release_base2](https://hub.docker.com/r/bioconductor/release_base2/)
744 | * [bioconductor/release_core2](https://hub.docker.com/r/bioconductor/release_core2/)
745 |
746 | Steffen Neumann: sneumann@ipb-halle.de, Maintained as part of the "PhenoMeNal, funded by Horizon2020 grant 654241"
747 |
748 | * [bioconductor/devel_protmetcore2](https://hub.docker.com/r/bioconductor/devel_protmetcore2/)
749 | * [bioconductor/devel_metabolomics2](https://hub.docker.com/r/bioconductor/devel_metabolomics2/)
750 | * [bioconductor/release_protmetcore2](https://hub.docker.com/r/bioconductor/release_protmetcore2/)
751 | * [bioconductor/release_metabolomics2](https://hub.docker.com/r/bioconductor/release_metabolomics2/)
752 |
753 | Laurent Gatto: lg390@cam.ac.uk
754 |
755 | * [bioconductor/devel_mscore2](https://hub.docker.com/r/bioconductor/devel_mscore2/)
756 | * [bioconductor/devel_protcore2](https://hub.docker.com/r/bioconductor/devel_protcore2/)
757 | * [bioconductor/devel_proteomics2](https://hub.docker.com/r/bioconductor/devel_proteomics2/)
758 | * [bioconductor/release_mscore2](https://hub.docker.com/r/bioconductor/release_mscore2/)
759 | * [bioconductor/release_protcore2](https://hub.docker.com/r/bioconductor/release_protcore2/)
760 | * [bioconductor/release_proteomics2](https://hub.docker.com/r/bioconductor/release_proteomics2/)
761 |
762 | RGLab: wjiang2@fredhutch.org
763 |
764 | * [bioconductor/devel_cytometry2](https://hub.docker.com/r/bioconductor/devel_cytometry2/)
765 | * [bioconductor/release_cytometry2](https://hub.docker.com/r/bioconductor/release_cytometry2/)
766 |
767 | First iteration containers
768 |
769 | * bioconductor/devel_base
770 | * bioconductor/devel_core
771 | * bioconductor/devel_flow
772 | * bioconductor/devel_microarray
773 | * bioconductor/devel_proteomics
774 | * bioconductor/devel_sequencing
775 | * bioconductor/devel_metabolomics
776 | * bioconductor/release_base
777 | * bioconductor/release_core
778 | * bioconductor/release_flow
779 | * bioconductor/release_microarray
780 | * bioconductor/release_proteomics
781 | * bioconductor/release_sequencing
782 | * bioconductor/release_metabolomics
783 |
784 |
785 | ### Reason for deprecation
786 |
787 | The new Bioconductor Docker image `bioconductor/bioconductor_docker`
788 | makes it possible to easily install any package the user chooses since
789 | all the system dependencies are built in to this new image. The
790 | previous images did not have all the system dependencies built in to
791 | the image. The new installation of packages can be done with,
792 |
793 | BiocManager::install(c("package_name", "package_name"))
794 |
795 | Other reasons for deprecation:
796 |
797 | - the chain of inheritance of Docker images was too complex and hard
798 | to maintain.
799 |
800 | - Hard to extend because there were multiple flavors of images.
801 |
802 | - Naming convention was making things harder to use.
803 |
804 | - Images which were not maintained were not deprecated.
805 |
806 |
807 | ### Reporting Issues
808 |
809 | Please report issues with the new set of images on [GitHub Issues](https://github.com/Bioconductor/bioconductor_docker/issues) or
810 | the [Bioc-devel](mailto:bioc-devel@r-project.org) mailing list.
811 |
812 | These issues can be questions about anything related to this piece of
813 | software such as, usage, extending Docker images, enhancements, and
814 | bug reports.
815 |
816 |
817 | ## Acknowledgements
818 |
819 | Thanks to the [rocker](https://github.com/rocker-org/rocker) project
820 | for providing the R/RStudio Server containers upon which ours are
821 | based.
822 |
--------------------------------------------------------------------------------
/Singularity:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: bioconductor/bioconductor_docker:devel
--------------------------------------------------------------------------------
/best_practices.md:
--------------------------------------------------------------------------------
1 | # Best practices and standards for Bioconductor Docker Images
2 |
3 | This document aims to outline some of the policies we have set to
4 | better maintain the Bioconductor Docker images and also community
5 | contributions.
6 |
7 |
8 |
9 |
10 | **Table of Contents**
11 |
12 | - [How to contribute a new Docker image](#how-to-contribute-a-new-docker-image)
13 | - [Best Practices and Standards for a Docker image and Dockerfile](#best-practices-and-standards-for-a-docker-image-and-dockerfile)
14 | - [Deprecation policy](#deprecation-policy)
15 | - [Criteria for image deprecation](#criteria-for-image-deprecation)
16 | - [End of Life process](#end-of-life-process)
17 | - [Reversing End of Life](#reversing-end-of-life)
18 |
19 |
20 | ## How to contribute a new Docker image
21 |
22 | 1. The Docker image should be on a GitHub repository owned by the
23 | author. The name of the repository should follow the convention
24 | `bioconductor_docker_`. All image names should be
25 | lowercase.
26 |
27 | 1. Once the image is ready to be submitted, submit the repository as
28 | an [issue](https://github.com/Bioconductor/Contributions/issues/)
29 | through contributions page. For more instructions on how to submit,
30 | https://github.com/Bioconductor/Contributions.
31 |
32 | 1. The first iteration of the contribution should be developed from
33 | the "devel" (master branch) branch of the base image i.e
34 | `bioconductor/bioconductor_docker:devel`.
35 |
36 | 1. Once accepted, the repository will be cloned under the Bioconductor
37 | organization with full read/write access to the Github
38 | repository (https://github.com/Bioconductor).
39 |
40 | 1. The Docker Hub organization for Bioconductor will be controlled by
41 | the Core team, and builds will be triggered through a push to the
42 | GitHub repository which the maintainer will have access to. A
43 | new tag for a branch will be added by the Core team at the time of
44 | release to enable builds on Docker Hub. We hope to automate some of
45 | this in the future.
46 |
47 | 1. The author should demonstrate, through Docker Hub builds before
48 | acceptance, that the image builds successfully. This guide on
49 | Docker Hub is very helpful,
50 | https://docs.docker.com/docker-hub/builds/link-source/.
51 |
52 | 1. The repository should have an open source license in the
53 | LICENSE.txt file. Core Bioconductor packages are typically licensed
54 | under Artistic-2.0.
55 |
56 | 1. The DESCRIPTION file should contain a field `BiocType: Docker`
57 |
58 | 1. At the time of RELEASE, `bioconductor/bioconductor_docker` will
59 | provide a new RELEASE_X_Y branch. The core team will
60 | create a new branch in the Github repository with the tag
61 | `RELEASE_X_Y`. If the contributed image depends on `bioconductor/bioconductor_docker`
62 | the tag will be updated to
63 | `bioconductor/bioconductor_docker:RELEASE_X_Y`.
64 |
65 | [ Back to top ]
66 |
67 | ## Best Practices and Standards for a Docker image and Dockerfile
68 |
69 | 1. For every repository submitted the Dockerfile is in the top level
70 | directory.
71 |
72 | 1. Docker images should be based on the
73 | `bioconductor/bioconductor_docker:devel` images, so that they can inherit
74 | all the installed packages and customization, but
75 | that is not strictly required.
76 |
77 | 1. Images should have a Dockerfile which is clearly defined and
78 | documented.
79 |
80 | 1. Images should clean up extraneous files created as part of the
81 | installation process, to optimize the space used by the image
82 | layers. Avoid reinstalling existing software, as this adds another
83 | layer without removing the original installtion layer. Please
84 | check https://www.fromlatest.io if you have any
85 | questions regarding how to optimize the Dockerfile.
86 |
87 | 1. The README.md file will act as the vignette for the images. The
88 | README.md should have a clear reasoning on *what* this Dockerfile
89 | provides, i.e additional tools or packages on top of
90 | bioconductor/bioconductor_docker. Remember there is very little
91 | value in providing the same image with a couple of extra
92 | packages on top of it as users can install these packages
93 | themselves.
94 |
95 | 1. Add metadata fields identifying the image in the Dockerfile like
96 | below,
97 |
98 | ```
99 | LABEL name="bioconductor/bioconductor_docker_" \
100 | version="0.99.0" \
101 | url="https://github.com/Bioconductor/bioconductor_docker_" \
102 | maintainer="myname@email.com" \
103 | description="Description of my image" \
104 | license="Artistic-2.0"
105 | ```
106 |
107 | These Metadata fields allow the image to be identified by the
108 | user.
109 |
110 | 1. Add build badges in your README.md file for the image. This can be
111 | easily generated by https://shields.io/category/build, like below
112 |
113 | [](https://hub.docker.com/r/bioconductor/bioconductor_docker/builds/)
114 |
115 | 1. The `RELEASE_X_Y` image should be maintained without a build
116 | failure. Similar to packages, changes to the RELEASE_X_Y images should
117 | be for bug fixes only.
118 |
119 | [ Back to top ]
120 |
121 | ## Deprecation policy
122 |
123 | We expect Docker images to follow the same deprecation policy as
124 | [Bioconductor packages](http://bioconductor.org/developers/package-end-of-life/).
125 |
126 | ### Criteria for image deprecation
127 |
128 | 1. `docker build` fails.
129 |
130 | The image must build successfully at each Bioconductor release. All
131 | efforts will be made to keep the image in Bioconductor if the
132 | maintainer is actively attempting to fix.
133 |
134 | If an image is broken for an extended period of time the
135 | maintainer will be given a final 6 week notice. If the image is
136 | not fixed by the end of the 6 weeks, an 'End-of-Life' process will
137 | be started.
138 |
139 | 1. Inactive maintainer
140 |
141 | The maintainer listed in the Dockerfile under `LABEL
142 | maintainer="maintainer@email.com"` must be responsive to questions
143 | on the support site, image related emails from users, build
144 | failures on Docker Hub and queries from Bioconductor team members. Broken
145 | images should be fixed in a timely manner.
146 |
147 | If you are unable to fix your image or no longer wish to maintain your image,
148 | please contact the [Bioc-devel mailing list][].
149 |
150 | ### End of Life process
151 |
152 | **Step 1**: Deprecation
153 |
154 | The `:devel` image will be marked with a *Deprecated* warning. The README file
155 | of the Github repository will be updated, along with the build badge.
156 |
157 | If at any time prior to the next release, the required criteria are met
158 | (e.g., the image returns to active maintenance, perhaps after
159 | ‘adoption’ by a third party) the warning is removed.
160 |
161 | **Step 2**: Defunct
162 |
163 | Any `:devel` image marked *Deprecated*, at the start of a release cycle will be marked as *Defunct*.
164 | At the start of the following release cycle, the image will be removed from the Bioconductor organization on Docker Hub.
165 |
166 | For more details take a look at the Package deprecation page http://bioconductor.org/developers/package-end-of-life/.
167 |
168 | ### Reversing End of Life
169 |
170 | A deprecated image can be un-deprecated if it is fixed before the next
171 | Bioconductor release. To have an image un-deprecated, please contact
172 | [Bioc-devel mailing list][].
173 |
174 | [ Back to top ]
175 |
176 | [Bioc-devel mailing list]: https://stat.ethz.ch/mailman/listinfo/bioc-devel
177 |
--------------------------------------------------------------------------------
/bioc_scripts/install_bioc_sysdeps.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | BIOC_VERSION=${1:-"3.21"}
5 |
6 | # This is to avoid the error
7 | # 'debconf: unable to initialize frontend: Dialog'
8 | export DEBIAN_FRONTEND=noninteractive
9 |
10 | ## Update apt-get
11 | apt-get update
12 |
13 | apt-get install -y --no-install-recommends apt-utils
14 |
15 | ## Basic Deps
16 | apt-get install -y --no-install-recommends \
17 | gdb \
18 | libxml2-dev \
19 | python3-pip \
20 | libz-dev \
21 | liblzma-dev \
22 | libbz2-dev \
23 | libpng-dev \
24 | libgit2-dev
25 |
26 | ## sys deps from bioc_full
27 | apt-get install -y --no-install-recommends \
28 | pkg-config \
29 | fortran77-compiler \
30 | byacc \
31 | automake \
32 | curl \
33 | cmake
34 |
35 | ## This section installs libraries
36 | apt-get install -y --no-install-recommends \
37 | libpcre2-dev \
38 | libnetcdf-dev \
39 | libhdf5-serial-dev \
40 | libfftw3-dev \
41 | libopenbabel-dev \
42 | libopenmpi-dev \
43 | libxt-dev \
44 | libudunits2-dev \
45 | libgeos-dev \
46 | libproj-dev \
47 | libcairo2-dev \
48 | libtiff5-dev \
49 | libreadline-dev \
50 | libgsl0-dev \
51 | libgslcblas0 \
52 | libgtk2.0-dev \
53 | libgl1-mesa-dev \
54 | libglu1-mesa-dev \
55 | libgmp3-dev \
56 | libhdf5-dev \
57 | libncurses-dev \
58 | libxpm-dev \
59 | liblapack-dev \
60 | libv8-dev \
61 | libgtkmm-2.4-dev \
62 | libmpfr-dev \
63 | libmodule-build-perl \
64 | libapparmor-dev \
65 | libprotoc-dev \
66 | librdf0-dev \
67 | libmagick++-dev \
68 | libsasl2-dev \
69 | libpoppler-cpp-dev \
70 | libprotobuf-dev \
71 | libpq-dev \
72 | libarchive-dev \
73 | coinor-libcgl-dev \
74 | coinor-libsymphony-dev \
75 | coinor-libsymphony-doc \
76 | libpoppler-glib-dev
77 |
78 | ## software - perl extentions and modules
79 | apt-get install -y --no-install-recommends \
80 | libperl-dev \
81 | libarchive-extract-perl \
82 | libfile-copy-recursive-perl \
83 | libcgi-pm-perl \
84 | libdbi-perl \
85 | libdbd-mysql-perl \
86 | libxml-simple-perl
87 |
88 | ## new libs
89 | apt-get install -y --no-install-recommends \
90 | libglpk-dev \
91 | libeigen3-dev \
92 | liblz4-dev
93 |
94 | ## Databases and other software
95 | apt-get install -y --no-install-recommends \
96 | sqlite3 \
97 | openmpi-bin \
98 | mpi-default-bin \
99 | openmpi-common \
100 | openmpi-doc \
101 | tcl8.6-dev \
102 | tk-dev \
103 | default-jdk \
104 | imagemagick \
105 | tabix \
106 | ggobi \
107 | graphviz \
108 | protobuf-compiler \
109 | jags \
110 | libhiredis-dev
111 |
112 | ## Additional resources
113 | apt-get install -y --no-install-recommends \
114 | xfonts-100dpi \
115 | xfonts-75dpi \
116 | biber \
117 | libsbml5-dev \
118 | libzmq3-dev \
119 | python3-dev \
120 | python3-venv
121 |
122 | ## More additional resources
123 | ## libavfilter-dev -
124 | ## mono-runtime -
125 | ## libfuse-dev -
126 | ## ocl-icd-opencl-dev - - but machine needs to be a GPU--otherwise it's useless
127 | apt-get -y --no-install-recommends install \
128 | libmariadb-dev-compat \
129 | libjpeg-dev \
130 | libjpeg-turbo8-dev \
131 | libjpeg8-dev \
132 | libavfilter-dev \
133 | libfuse-dev \
134 | mono-runtime \
135 | ocl-icd-opencl-dev
136 |
137 | ## Python installations
138 | # pip3 install scikit-learn pandas pyyaml --break-system-packages || pip3 install scikit-learn pandas pyyaml
139 | # Replacing old pip installation above with apt install for system-wide installations of packages given the
140 | # new restrictions breaking system-wide pip install
141 | apt-get -y --no-install-recommends install \
142 | python3-pandas \
143 | python3-yaml \
144 | python3-sklearn
145 |
146 | ## libgdal is needed for sf
147 | apt-get install -y --no-install-recommends \
148 | libgdal-dev \
149 | default-libmysqlclient-dev \
150 | libmysqlclient-dev
151 |
152 | Rscript -e 'install.packages("BiocManager", repos="https://cran.rstudio.com")'
153 | Rscript -e "BiocManager::install(version='$BIOC_VERSION', update=TRUE, ask=FALSE)"
154 | Rscript -e "BiocManager::install(c('devtools'))"
155 |
156 | ## Install preprocess core manually to disable threading https://github.com/Bioconductor/bioconductor_docker/issues/22
157 | Rscript -e 'BiocManager::install("preprocessCore", configure.args = c(preprocessCore = "--disable-threading"), update=TRUE, force=TRUE, ask=FALSE, type="source")'
158 |
159 | ## clean up
160 | apt-get clean
161 | apt-get autoremove -y
162 | apt-get autoclean -y
163 | rm -rf /var/lib/apt/lists/*
164 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 | services:
3 | bioc:
4 | container_name: bioc-3.16
5 | image: "bioconductor/bioconductor_docker:devel"
6 | ports:
7 | - "8787:8787"
8 | environment:
9 | - PASSWORD=bioc
10 | volumes:
11 | - ${HOME}/R/bioconductor_docker/3.16:/usr/local/lib/R/host-site-library
12 |
--------------------------------------------------------------------------------
/docs/release_process.md:
--------------------------------------------------------------------------------
1 | # Release process of _bioconductor_docker_ images
2 |
3 | Notes to capture the release process of the
4 | _bioconductor/bioconductor_docker_ image for RELEASE_3_15.
5 |
6 | ## General
7 |
8 | The image is based on `rocker/rstudio:4.1.0`, which is based on
9 | `ubuntu 20.04`
10 |
11 | The rockerdev project is located here,
12 | https://hub.docker.com/r/rockerdev/rstudio/tags
13 |
14 | Although the latest Dockerfile's are available, the images under
15 | rocker's dockerhub repo are missing. To look at the Dockerfile(s),
16 | follow the link
17 | https://github.com/rocker-org/rocker-versioned2/tree/master/dockerfiles
18 |
19 | ## Steps to update *devel*
20 |
21 | 1. Before making any changes to the `master` branch, create a
22 | RELEASE_3_15 branch with
23 |
24 | git branch RELEASE_3_15
25 |
26 | 1. *Update version number* of `BIOCONDUCTOR_DOCKER_VERSION` to latest
27 | `X.Y.Z`, where `X.Y` represent the Bioconductor version of devel.
28 |
29 | - For Bioconductor 3.16, the `BIOCONDUCTOR_DOCKER_VERSION` will
30 | be `3.16.0`.
31 |
32 | 1. Change the `install.R` file to reflect the latest verison of
33 | Bioconductor in `BiocManager::install(version='devel')`.
34 |
35 | 1. Try to rebuild the image with,
36 |
37 | docker build -t bioconductor/bioconductor_docker:devel
38 |
39 | There were a few issues with the system libraries,
40 |
41 | - The following libraries do not INSTALL in the main `apt-get
42 | install` block, but install in the subsequent block. This might
43 | be because of missing dependencies. But this is something I have
44 | to look further into.
45 | - libmariadb-dev-compat
46 | - libjpeg62-dev
47 |
48 | 1. Make sure the `docker-compose.yml` file has the correct values as well in both places.
49 |
50 | 2. **Validity check**: The final step is installing all the packages
51 | and to triage which packages DO NOT install on the new devel
52 | image.
53 |
54 | ## Steps to update *RELEASE_3_15*
55 |
56 | 1. Checkout the RELEASE_3_15 branch,
57 |
58 | `git checkout RELEASE_3_15`
59 |
60 | 1. The `BIOCONDUCTOR_DOCKER_VERSION` number of the branch just gets
61 | incremented in the **Z** by 1. Since it is the same Bioc version as
62 | the previous devel.
63 |
64 | 1. Make sure the correct rocker/rstudio: is being used. If
65 | there are doubts about this check the
66 | http://bioconductor.org/checkResults/devel/bioc-LATEST/ (devel) and
67 | http://bioconductor.org/checkResults/release/bioc-LATEST/ (release)
68 | versions of R on the build machines. They should match.
69 |
70 | 1. Try to rebuild the image with
71 |
72 | docker build -t bioconductor/bioconductor_docker:RELEASE_3_15 .
73 |
74 | There were a few issues with the system libraries, (same as the
75 | above with devel
76 |
77 | - The following libraries do not INSTALL in the main `apt-get
78 | install` block, but install in the subsequent block. This might
79 | be because of missing dependencies. But this is something I have
80 | to look further into.
81 | - libmariadb-dev-compat
82 | - libjpeg62-dev
83 |
84 | 1. There are no changes in the `install.R` file, except to install
85 | BiocManager 3.13
86 |
87 | 1. Remove the following lines in the Dockerfile , i.e, no devel build
88 | variables in the RELEASE branch
89 |
90 | # DEVEL: Add sys env variables to DEVEL image
91 | RUN curl -O http://bioconductor.org/checkResults/devel/bioc-LATEST/Renviron.bioc \
92 | && cat Renviron.bioc | grep -o '^[^#]*' | sed 's/export //g' >>/etc/environment \
93 | && cat Renviron.bioc >> /usr/local/lib/R/etc/Renviron.site \
94 | && rm -rf Renviron.bioc
95 |
96 | 1. **Validity check** : Install packages on the new image and triage
97 | which packages DO NOT install. Set `option(Ncpus=)` for faster installation.
99 |
100 | ## Create singularity images as final step.
101 |
102 | 1. In the newly created RELEASE_X_Y branch, rename the file
103 | `Singularity` to `Singularity.RELEASE_X_Y`.
104 |
105 | 1. Inside the file, change the lines, `From:
106 | bioconductor/bioconductor_docker:devel` to `From:
107 | bioconductor/bioconductor_docker:RELEASE_X_Y`. Below is example for
108 | RELEASE_3_11.
109 |
110 | Bootstrap: docker
111 | From: bioconductor/bioconductor_docker:RELEASE_3_11
112 |
113 | ## Code to test installation of all packages
114 |
115 | The following code should be run to install all packages (running on a
116 | machine with 16 cores)
117 |
118 | ```
119 | options(Ncpus=14)
120 |
121 | installed <- rownames(installed.packages())
122 | biocsoft <- available.packages(repos = BiocManager::repositories()[["BioCsoft"]])
123 |
124 | ## Packages which failed to install on docker image
125 | to_install <- rownames(biocsoft)[!rownames(biocsoft) %in% installed]
126 |
127 | BiocManager::install(to_install)
128 | ```
129 |
130 | # Addendum - Github Actions
131 |
132 | 1. Github actions should be removed from the newly created
133 | `RELEASE_X_Y` branch. Dockerhub does the builds for the RELEASE_X_Y
134 | branches as they are "stable". Push to the image with an updated
135 | `BIOCONDUCTOR_DOCKER_VERSION` number will trigger build to the
136 | RELEASE_X_Y branch.
137 |
138 | 2. Under the current structure, the way we build the image for the
139 | `bioconductor/bioconductor_docker:devel` image is via Github
140 | actions. The action pulls the `rocker/r-ver:` image and the
141 | `rocker:rstudio:`, builds both of those images on an instance,
142 | then builds the `bioconductor/bioconductor_docker:devel` image on
143 | the latest version of those build images.
144 |
145 | - The `devel` image is updated weekly at 3pm on Friday.
146 |
147 | - To replace with the `rockerdev` image stack, we need to be able
148 | to get the github repos and Dockerfiles of the latest
149 | `rockerdev/r-ver` and `rockerdev/rstudio` images build on
150 | Ubuntu 18.04.
151 |
152 | 3. **TODO**: This github action needs to be edited to reflect the
153 | latest changes through rocker.
154 |
155 | 4. **Validity check**: To check validity of the weekly build, it'll be
156 | useful to temporarily set the CRON job on the scheduled github
157 | action to run every 3 hours to debug if needed and change back to
158 | the weekly cycle once it is done.
159 |
160 | ## Failing packages
161 |
162 |
--------------------------------------------------------------------------------
/extensions/anvil/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=us.gcr.io/broad-dsp-gcr-public/terra-jupyter-bioconductor
2 | ARG TAG=2.2.7
3 |
4 | FROM ${BASE_IMAGE}:${TAG}
5 |
6 | USER root
7 |
8 | ENV R_VERSION="4.5.0"
9 | ENV R_HOME="/usr/local/lib/R"
10 | ENV TZ="Etc/UTC"
11 | ENV CRAN="https://p3m.dev/cran/__linux__/jammy/latest"
12 | ENV LANG=en_US.UTF-8
13 | ENV PATH=/usr/local/bin:$PATH
14 | ENV S6_VERSION="v2.1.0.2"
15 | ENV RSTUDIO_VERSION="2025.05.0+496"
16 | ENV DEFAULT_USER="rstudio"
17 |
18 | ENV RSTUDIO_PORT="8001"
19 | ENV RSTUDIO_HOME="/etc/rstudio"
20 | ENV RSTUDIO_USERSETTING="/home/rstudio/.config/rstudio/rstudio-prefs.json"
21 |
22 | COPY rserver.conf $RSTUDIO_HOME/
23 | COPY set_up_package_dir.sh ${RSTUDIO_HOME}/scripts/
24 | COPY rstudio-prefs.json $RSTUDIO_USERSETTING
25 |
26 | RUN apt-get update && apt-get install -y --no-install-recommends gnupg lsb-release \
27 | && mkdir -p /rocker_scripts $R_HOME/etc \
28 | && echo "deb http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list \
29 | && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
30 | && apt-get update \
31 | && apt-get install -yq --no-install-recommends google-cloud-sdk \
32 | && wget -q https://github.com/rocker-org/rocker-versioned2/archive/e3a7d1f.tar.gz -O /tmp/rocker-scripts.tar.gz \
33 | && tar -xzf /tmp/rocker-scripts.tar.gz -C /tmp \
34 | && cp -r /tmp/rocker-versioned2-*/scripts/* /rocker_scripts/ \
35 | && chmod +x /rocker_scripts/*.sh \
36 | && rm -rf /tmp/rocker-scripts.tar.gz /tmp/rocker-versioned2-* \
37 | && if grep -q '1000' /etc/passwd; then usermod -l $DEFAULT_USER $(id -un 1000); fi \
38 | && /rocker_scripts/setup_R.sh \
39 | && /rocker_scripts/install_rstudio.sh \
40 | && /rocker_scripts/install_pandoc.sh \
41 | && /rocker_scripts/install_quarto.sh \
42 | && ln -s $(grep "^${DEFAULT_USER}:" /etc/passwd | cut -d: -f6) /home/${DEFAULT_USER} \
43 | && pip3 -V \
44 | && usermod -g users rstudio \
45 | && pip3 install --no-cache-dir -U crcmod terra-notebook-utils torch \
46 | && curl -L https://github.com/Bioconductor/bioconductor_docker/raw/refs/heads/devel/bioc_scripts/install_bioc_sysdeps.sh | sh \
47 | && R -e "BiocManager::install(c('AnVIL', 'AnVILBase', 'AnVILGCP', 'DataBiosphere/Ronaldo', 'shiny', 'bigrquery', 'googleCloudStorageR', 'tidyverse', 'Seurat', 'markdown', 'SingleCellExperiment', 'GenomicFeatures', 'GenomicAlignments', 'ShortRead', 'DESeq2', 'AnnotationHub', 'ExperimentHub', 'ensembldb', 'scRNAseq', 'scran', 'Rtsne'))" \
48 | && find ${RSTUDIO_HOME}/scripts -name '*.sh' -type f | xargs chmod +x \
49 | && echo "PIP_USER=true" >> /usr/local/lib/R/etc/Renviron.site \
50 | && apt-get clean && rm -rf /var/lib/apt/lists/* \
51 | && usermod -o -u 1000 rstudio \
52 | && find / \( -path /proc -o -path /sys -o -path /dev \) -prune -o -user 1001 -exec chown -h 1000 {} \;
53 |
54 | EXPOSE $RSTUDIO_PORT
55 |
56 | ENTRYPOINT ["/bin/sh"]
57 | CMD ["/init"]
58 |
--------------------------------------------------------------------------------
/extensions/anvil/bioc-extension.yaml:
--------------------------------------------------------------------------------
1 | container:
2 | outname: anvil-rstudio
3 | base:
4 | image: us.gcr.io/broad-dsp-gcr-public/terra-jupyter-bioconductor
5 | tag:
6 | - "2.2.7"
--------------------------------------------------------------------------------
/extensions/anvil/rserver.conf:
--------------------------------------------------------------------------------
1 | # Server Configuration File
2 | # See https://support.rstudio.com/hc/en-us/articles/200552316-Configuring-the-Server
3 |
4 | rsession-which-r=/usr/local/bin/R
5 | auth-none=1
6 | www-port=8001
7 | www-address=0.0.0.0
--------------------------------------------------------------------------------
/extensions/anvil/rstudio-prefs.json:
--------------------------------------------------------------------------------
1 | {
2 | "save_workspace": "always",
3 | "always_save_history": true,
4 | "reuse_sessions_for_project_links": true,
5 | "posix_terminal_shell": "bash"
6 | }
--------------------------------------------------------------------------------
/extensions/anvil/set_up_package_dir.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | # This script needs to be separated from the Dockerfile because we mount persistent disks in the
5 | # /home/rstudio directory and when we mount the PD, everything in that directory gets wiped.
6 | # Therefore, we will run this script from Leonardo after the PD is mounted
7 |
8 | RSTUDIO_USER_HOME=/home/rstudio
9 |
10 | # Because of how R_PATH is run '/root' is being added as the base dir name for this R_PATH.
11 | # This chunk of code checks to see if root is coming from this location and replaces it
12 | # with a blank space.
13 | R_PATH=`Rscript -e "cat(Sys.getenv('R_LIBS_USER'))"`
14 | CHECK_ROOT='/root'
15 | if [[ "$R_PATH" == *"$CHECK_ROOT"* ]]; then
16 | R_PATH="${R_PATH/\/root\//}"
17 | fi
18 |
19 | BIOCONDUCTOR_VERSION=`printenv BIOCONDUCTOR_DOCKER_VERSION | sed 's/\(^[0-9].[0-9][0-9]\).*/\1/g'`
20 | R_PACKAGE_DIR=${RSTUDIO_USER_HOME}/${R_PATH}-${BIOCONDUCTOR_VERSION}
21 |
22 | # The sudo command to make the directory here confirms it's running as root.
23 | sudo -E -u rstudio mkdir -p ${R_PACKAGE_DIR}
24 |
25 | echo R_LIBS=${R_PACKAGE_DIR} >> /usr/local/lib/R/etc/Renviron.site
26 |
--------------------------------------------------------------------------------
/extensions/galaxy/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=bioconductor/bioconductor_docker
2 | ARG TAG=RELEASE_3_21
3 |
4 | FROM ${BASE_IMAGE}:${TAG}
5 | RUN apt update -y && apt install -y net-tools python3-bioblend nginx && \
6 | Rscript -e 'BiocManager::install(c("Biobase", "BiocGenerics", "Biostrings", "cummeRbund", "DESeq2", "dplyr", "edgeR", "GenomeInfoDb", "ggplot2", "ggvis", "Gviz", "hexylena/rGalaxyConnector", "IRanges", "limma", "RCurl", "reshape2", "Rgraphviz", "RODBC", "Rsamtools", "S4Vectors", "shiny", "SummarizedExperiment", "tidyr", "XML"))' && \
7 | pip3 install galaxy-ie-helpers --break-system-packages && \
8 | chown -R rstudio:rstudio /usr/local/lib/R/library && \
9 | chown -R rstudio:rstudio /usr/local/lib/R/doc && \
10 | echo "alias ll='ls -l'" >> ~/.bashrc && \
11 | echo "rstudio ALL=(ALL) NOPASSWD: /usr/bin/s6-svscanctl" >> /etc/sudoers && \
12 | apt-get autoremove -y && apt-get clean && \
13 | rm -rf /var/lib/apt/lists/* /tmp/*
14 |
15 | RUN mkdir -p /etc/services.d/nginx
16 | COPY service-nginx-start /etc/services.d/nginx/run
17 | COPY proxy.conf /etc/nginx/sites-enabled/default
18 | COPY shutdown.R /home/rstudio/shutdown.R
19 |
20 | ENV PIP_USER=0
21 |
22 | EXPOSE 8780
23 |
--------------------------------------------------------------------------------
/extensions/galaxy/bioc-extension.yaml:
--------------------------------------------------------------------------------
1 | container:
2 | outname: galaxy-rstudio
3 | base:
4 | image: ghcr.io/bioconductor/bioconductor
5 | tag:
6 | - "RELEASE_3_21"
7 | - "devel"
8 |
--------------------------------------------------------------------------------
/extensions/galaxy/proxy.conf:
--------------------------------------------------------------------------------
1 | map $http_upgrade $connection_upgrade {
2 | default upgrade;
3 | '' close;
4 | }
5 |
6 | server {
7 | listen 8780;
8 |
9 | location / {
10 | proxy_pass http://localhost:8787;
11 | proxy_redirect http://localhost:8787/ $scheme://$http_host/;
12 | proxy_http_version 1.1;
13 |
14 | proxy_set_header Connection $http_connection;
15 | proxy_set_header Upgrade $http_upgrade;
16 | proxy_set_header X-Forwarded-Protocol $scheme;
17 | proxy_set_header X-Scheme $scheme;
18 | proxy_set_header X-Real-IP $remote_addr;
19 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/extensions/galaxy/service-nginx-start:
--------------------------------------------------------------------------------
1 | #!/usr/bin/with-contenv bash
2 | env > /asdf
3 |
4 | # Pass some system environment variables to RStudio environment
5 | echo "Sys.setenv(DEBUG=\"$DEBUG\")
6 | Sys.setenv(GALAXY_WEB_PORT=\"$GALAXY_WEB_PORT\")
7 | Sys.setenv(CORS_ORIGIN=\"$CORS_ORIGIN\")
8 | Sys.setenv(DOCKER_PORT=\"$DOCKER_PORT\")
9 | Sys.setenv(API_KEY=\"$API_KEY\")
10 | Sys.setenv(HISTORY_ID=\"$HISTORY_ID\")
11 | Sys.setenv(REMOTE_HOST=\"$REMOTE_HOST\")
12 | Sys.setenv(GALAXY_URL=\"$GALAXY_URL\")
13 | " >> /usr/local/lib/R/etc/Rprofile.site
14 |
15 | # And nginx in foreground mode.
16 | nginx -g 'daemon off;'
17 |
--------------------------------------------------------------------------------
/extensions/galaxy/shutdown.R:
--------------------------------------------------------------------------------
1 | # Shutdown script for Galaxy Interactive tool.
2 | # To shut down this IT, run this script (or below command).
3 | system("sudo s6-svscanctl -t /var/run/s6/services/", wait = FALSE)
4 |
--------------------------------------------------------------------------------