├── .github └── workflows │ ├── publish_image.yml │ ├── update-base-image.yml │ └── update-cookiecutter.yml ├── LICENSE ├── README.md ├── base-images.json ├── docker ├── Dockerfile ├── _configure-docker-group.sh ├── _dev-init.sh ├── pre-commit-hook.sh └── vendored │ ├── LICENSE │ ├── README.md │ └── features │ ├── docker-in-docker │ └── install.sh │ └── src │ └── common-utils │ ├── main-patched.sh │ ├── main.sh │ └── no-lsb-release.patch ├── readme-images ├── devcontainer-extension-notifications.png ├── loaded-devcontainer.png └── reopen-in-devcontainer.png └── scripts ├── update_base_image.py └── update_base_image_reqs.txt /.github/workflows/publish_image.yml: -------------------------------------------------------------------------------- 1 | name: Build and publish Docker image 2 | 3 | on: 4 | # push: 5 | # branches: 6 | # - main 7 | workflow_dispatch: 8 | 9 | env: 10 | REGISTRY: ghcr.io 11 | IMAGE_NAME: ${{ github.repository }} # user/reponame 12 | 13 | 14 | jobs: 15 | publish: 16 | runs-on: ubuntu-latest 17 | 18 | strategy: 19 | matrix: 20 | base-image-name: 21 | - jammy 22 | - jammy-cuda 23 | - focal-cuda 24 | # Set permissions for GitHub token 25 | # 26 | permissions: 27 | contents: read 28 | packages: write 29 | 30 | steps: 31 | - name: Checkout source 32 | uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # v3.0.2 33 | 34 | - name: Set up QEMU 35 | uses: docker/setup-qemu-action@8b122486cedac8393e77aa9734c3528886e4a1a8 # v2.0.0 36 | 37 | - name: Setup Docker buildx 38 | uses: docker/setup-buildx-action@dc7b9719a96d48369863986a06765841d7ea23f6 # v2.0.0 39 | 40 | - name: Extract base image tag from base-images.json 41 | id: extract_base_image 42 | run: | 43 | BASE_IMAGE=$(jq -r ".\"${{ matrix.base-image-name }}\"" docker/base-images.json) 44 | echo "BASE_IMAGE=$BASE_IMAGE" >> $GITHUB_ENV 45 | 46 | - name: Print the base image 47 | run: | 48 | echo "Using base image: ${{ env.BASE_IMAGE }}" 49 | 50 | - name: Extract tag prefix 51 | id: extract_tag 52 | # Extract everything between `:` and `@` in the `base-image` 53 | run: | 54 | TAG_PREFIX=$(echo "${{ env.BASE_IMAGE }}" | sed -E 's/[^:]+:([^@]+)@.*/\1/') 55 | echo "TAG_PREFIX=$TAG_PREFIX" >> $GITHUB_ENV 56 | 57 | - name: Prepare metadata 58 | id: meta 59 | uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a # v4.0.1 60 | with: 61 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 62 | tags: | 63 | type=raw,value=${{ env.TAG_PREFIX }} 64 | type=raw,value=latest 65 | 66 | - name: Log into registry ${{ env.REGISTRY }} 67 | uses: docker/login-action@49ed152c8eca782a232dede0303416e8f356c37b # v2.0.0 68 | with: 69 | registry: ${{ env.REGISTRY }} 70 | username: ${{ github.actor }} 71 | password: ${{ secrets.GITHUB_TOKEN }} 72 | 73 | - name: Build and push Docker image 74 | id: docker_build 75 | uses: docker/build-push-action@e551b19e49efd4e98792db7592c17c09b89db8d8 # v3.0.0 76 | with: 77 | context: ./docker 78 | platforms: linux/amd64,linux/arm64 79 | push: true 80 | tags: ${{ steps.meta.outputs.tags }} 81 | labels: ${{ steps.meta.outputs.labels }} 82 | build-args: BASE_IMAGE=${{ env.BASE_IMAGE }} 83 | cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 84 | cache-to: type=inline 85 | -------------------------------------------------------------------------------- /.github/workflows/update-base-image.yml: -------------------------------------------------------------------------------- 1 | name: Autoupdate micromamba-docker base image 2 | 3 | on: 4 | workflow_dispatch: 5 | # Run every day at 12:18 (https://crontab.guru/every-6-hours) 6 | schedule: 7 | - cron: '18 12 * * *' 8 | 9 | jobs: 10 | generate_pr_if_new_micromamba_docker: 11 | runs-on: ubuntu-22.04 12 | steps: 13 | - name: Checkout source 14 | uses: actions/checkout@5c3ccc22eb2c950a0fa5bc7c47190d8e3f7e681a 15 | - name: Set up Python 16 | uses: actions/setup-python@47c4a7af1d72897a511c975c95a5335bb6329dec 17 | with: 18 | python-version: "3.11" 19 | - name: Update Dockerfile base image to latest version 20 | id: update_dockerfile 21 | run: | 22 | python -m pip install --quiet --disable-pip-version-check -r scripts/update_base_image_reqs.txt 23 | python scripts/update_base_image.py 24 | - name: Create PR 25 | uses: peter-evans/create-pull-request@v6.0.5 26 | with: 27 | branch: "update-base-image" 28 | commit-message: "Update to micromamba base image to ${{ env.NEW_DOCKER_TAG }}" 29 | title: "Update to micromamba base image to ${{ env.NEW_DOCKER_TAG }}" 30 | body: "Generated by [update-base-image.yml](https://github.com/mamba-org/micromamba-devcontainer/actions/runs/${{ github.run_id }}/workflow)" 31 | delete-branch: true 32 | reviewers: maresb 33 | -------------------------------------------------------------------------------- /.github/workflows/update-cookiecutter.yml: -------------------------------------------------------------------------------- 1 | name: Update the cookiecutter template 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_run: 6 | workflows: 7 | - Build and publish Docker image 8 | types: 9 | - completed 10 | 11 | jobs: 12 | updaterepos: 13 | if: (github.event_name == 'workflow_dispatch') || (github.event.workflow_run.conclusion == 'success') 14 | runs-on: ubuntu-22.04 15 | steps: 16 | - uses: actions/github-script@v6 17 | with: 18 | github-token: ${{ secrets.RUN_ACTIONS_MARESB_PA_TOKEN }} 19 | script: | 20 | await github.rest.actions.createWorkflowDispatch({ 21 | owner: 'maresb', 22 | repo: 'cookiecutter-micromamba-devcontainer', 23 | workflow_id: 'update-base-image.yml', 24 | ref: 'main' 25 | }) 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Ben Mares 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # micromamba-devcontainer 2 | 3 | A general-purpose Micromamba-enabled VS Code development container image. 4 | 5 | Save the time and effort of configuring development tools for each project × collaborator × device. 6 | 7 | Featuring: 8 | 9 | * Reproducible and isolated development environments "as code". 📄 10 | * Runs [remotely](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh) (e.g. from the cloud) just as well as locally. ☁️ 11 | * Several common tools (including Docker) are pre-installed with convenient defaults to minimize necessary configuration. 🐳 12 | 13 | ## Alternative 14 | 15 | This project is meant to provide a comprehensive Micromamba base image for development. 16 | 17 | If instead you'd prefer a minimalistic approach for adding Micromamba to an existing dev container, see the [Micromamba dev container feature](https://github.com/mamba-org/devcontainer-features/tree/main/src/micromamba). 18 | 19 | ## Links 20 | 21 | * [This micromamba-devcontainer on GitHub](https://github.com/mamba-org/micromamba-devcontainer) 22 | * [Docker images on GitHub Container Registry](https://github.com/mamba-org/micromamba-devcontainer/pkgs/container/micromamba-devcontainer) 23 | * [Example development container](https://github.com/maresb/micromamba-devcontainer-example) for a Python project 24 | * [Micromamba Docker container](https://github.com/mamba-org/micromamba-docker) (parent image) 25 | * VS Code [Developing inside a Container](https://code.visualstudio.com/docs/remote/containers) documentation 26 | * VS Code [devcontainer.json reference](https://code.visualstudio.com/docs/remote/devcontainerjson-reference) documentation 27 | 28 | ## Getting started / Usage 29 | 30 | ### Quickstart 31 | 32 | Refer to this [example](https://github.com/maresb/micromamba-devcontainer-example) and this corresponding [cookiecutter template](https://gitlab.com/bmares/cookiecutter-micromamba-devcontainer) for how this image can be used. 33 | 34 | In short, if the [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) extension installed in VS Code, you can clone the example repository and open it with VS Code. 35 | 36 | You will then be prompted to reopen it in a container: 37 | 38 | ![reopen the folder in a development container](readme-images/reopen-in-devcontainer.png). 39 | 40 | Upon reopening, VS Code will build the development Dockerfile if necessary, launch a container, install the VS Code backend inside, and configure everything according to the specification defined in `.devcontainer/devcontainer.json`. 41 | 42 | The project folder will then open, mounted inside the container, and all installed extensions (e.g. Docker, Git History, and Jupyter) will be available: 43 | 44 | ![loaded devcontainer](readme-images/loaded-devcontainer.png) 45 | 46 | ### Details 47 | 48 | The Dockerfile hosted in this repository is intended to be used as [the base image for the `dev.Dockerfile`](https://github.com/maresb/micromamba-devcontainer-example/blob/main/.devcontainer/dev.Dockerfile#L1) which is specified in [`devcontainer.json`](https://github.com/maresb/micromamba-devcontainer-example/blob/main/.devcontainer/devcontainer.json#L15). 49 | 50 | To adapt this to your own project, your `dev.Dockerfile` should install development dependencies and initialize your project for development (for example by running `pip install --editable .` for Python a project). 51 | 52 | ## Introduction 53 | 54 | This repository hosts the base image for a VS Code development container which comes with the following features to ease development: 55 | 56 | * Micromamba via [`mambaorg/micromamba`](https://github.com/mamba-org/micromamba-docker). 57 | * Ubuntu 22.04 (jammy). 58 | * Sudo rights for the default user. 59 | * Docker, Docker Compose and BuildKit installed for [use from within the container](#running-docker-from-docker). 60 | * Basic command-line utilities from Ubuntu (e.g. `nano`, `git`, `less`, `man`, `htop`, `zip`, `jq`, `ping`). 61 | * Configuration of Git, with some [helpful defaults](#git-pre-configured-for-ease-of-use) 62 | * Can be used [remotely over SSH](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-ssh). 63 | 64 | ## Configuration 65 | 66 | ### Running "Docker from Docker" 67 | 68 | If you want Docker to be accessible from within the development container, simply bind-mount `/var/run/docker.sock`. 69 | 70 | In this case, any other containers started from within the development container will run *alongside* the development container. 71 | 72 | It is recommended to install [Microsoft's VS Code Docker extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-docker) within the development container. 73 | 74 | ### Git pre-configured for ease-of-use 75 | 76 | Note that VS Code will automatically try to copy the user's Git configuration to the development container. This includes `user.name` and `user.email`, as well as GPG keys. To configure authenticated access to Git repositories via the SSH protocol, see [SSH agent](#ssh-agent). 77 | 78 | The following have been set as system defaults within the container (`git config --system`): 79 | 80 | * The default editor is `nano`. 81 | * Pulling is configured with [fast-forward only](https://blog.sffc.xyz/post/185195398930), to avoid unexpected merge-conflict situations. 82 | * The blame-ignore file is automatically configured to `.git-blame-ignore-revs` when it exists. 83 | * The default branch name is `main`. 84 | 85 | In addition, 86 | 87 | * Git is ready for use with [pre-commit](https://pre-commit.com): newly created or cloned repositories will use pre-commit when both pre-commit is installed and a `.pre-commit-config.yaml` exists.) 88 | * Git LFS comes pre-installed. 89 | 90 | ### SSH agent 91 | 92 | VS Code can automatically forward your local SSH keys (e.g. for use with Git) to the development container (even when that development container is remote). Detailed instructions are [here](https://code.visualstudio.com/docs/remote/troubleshooting#_setting-up-the-ssh-agent). 93 | 94 | The main steps are: 95 | 96 | 1. Make sure the SSH agent is running locally by opening a local terminal and listing your keys with `ssh-add -l`. (In case the agent is not running, follow the instructions in the above link.) 97 | 2. In case no keys are listed (`"The agent has no identities"`), add them by running `ssh-add`. (To instead add an individual key, run `ssh-add `). Run `ssh-add -l` again to verify that the key was added. 98 | 3. Check if your keys are being forwarded to the container by opening an integrated terminal in the development container (`Ctrl`+`Shift`+``` ` ```) and running `ssh-add -l`. The results should agree with the local terminal. 99 | 100 | ## Troubleshooting 101 | 102 | ### Known issues 103 | 104 | #### Extensions don't initialize at first 105 | 106 | When extensions are installed for the first time after the container is started or rebuilt, there may be a series of warning notifications due to the extensions not initializing properly. They usually go away after reloading the window, either by clicking on one of the buttons, or by running the command `Ctrl+Shift+P` → `Developer: Reload Window`. 107 | 108 | ![extension warnings](readme-images/devcontainer-extension-notifications.png))] 109 | 110 | #### Permission denied when multiple users run development containers on the same computer 111 | 112 | This is a [known issue with VS Code](https://github.com/microsoft/vscode-remote-release/issues/2347). The precise error is: 113 | 114 | ```text 115 | EACCES: permission denied, mkdir '/tmp/vsch 116 | ``` 117 | 118 | One solution is to log into the host machine and run 119 | 120 | ```bash 121 | sudo chmod -R a+rwX /tmp/vsch 122 | ``` 123 | 124 | (This could reduce security and is recommended only when all the users are trusted.) 125 | 126 | #### Permission denied while trying to connect to the Docker daemon socket 127 | 128 | Check that your user within the devcontainer is a member of the `docker` group: 129 | 130 | ```bash 131 | (base) mambauser@devcontainer:/workspaces/micromamba-devcontainer-example$ groups 132 | mambauser sudo docker 133 | ``` 134 | 135 | It seems to happen occasionally that the user does not get added as a member of the Docker group, and I don't understand why. (Perhaps a race condition?) Try quitting all VS Code windows and rebuilding the container. Please create an issue if you have any insight. 136 | 137 | ### Unknown issues 138 | 139 | If you encounter some other problem, please file [an issue](https://github.com/mamba-org/micromamba-devcontainer/issues) with this GitHub repository. 140 | -------------------------------------------------------------------------------- /base-images.json: -------------------------------------------------------------------------------- 1 | { 2 | "jammy": "mambaorg/micromamba:git-a241157-jammy@sha256:375f53418dab857c40c7a4cf569ea11ea416a23e27fd214f2c6bd2d2072999ca", 3 | "jammy-cuda": "mambaorg/micromamba:git-a241157-jammy-cuda-11.8.0@sha256:9dca3ad41afe82bdc4cfefa4ec68935bc3841a2d83807e8da8a1ddc4ed0c25da", 4 | "focal-cuda": "mambaorg/micromamba:git-13eebff-focal-cuda-11.2.2@sha256:024aff5da4b8d53df824fabb073249e3daf2018d4bc3cde87f0d16bfe4d217ea" 5 | } 6 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # To set up our environment, we start from Micromamba's base image. The latest tags 2 | # can be found here: 3 | # For reproducibility, we should pin to a particular Git tag (not a micromamba version). 4 | 5 | # For more info, about micromamba, see: 6 | # . 7 | 8 | ARG BASE_IMAGE=mambaorg/micromamba:git-a241157-jammy@sha256:375f53418dab857c40c7a4cf569ea11ea416a23e27fd214f2c6bd2d2072999ca 9 | 10 | # The folder to use as a workspace. The project should be mounted here. 11 | ARG DEV_WORK_DIR=/workspaces 12 | 13 | FROM ${BASE_IMAGE} 14 | 15 | # Grab gosu for switching users. 16 | COPY --from=tianon/gosu /usr/local/bin/gosu /usr/local/bin/gosu 17 | 18 | USER root 19 | 20 | ENV USERNAME=mambauser 21 | COPY vendored/features/src/common-utils/main-patched.sh /tmp/ 22 | RUN bash /tmp/main-patched.sh true mambauser && rm /tmp/main-patched.sh 23 | 24 | # Install some useful OS packages 25 | RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --no-install-recommends --reinstall \ 26 | # more helpful utils like sponge 27 | moreutils \ 28 | # 29 | # tab autocompletion for bash 30 | bash-completion \ 31 | # 32 | # monitor output of repeated command 33 | watch \ 34 | # 35 | # version control 36 | patch \ 37 | # 38 | # Git Large File Storage 39 | git-lfs \ 40 | # 41 | # determines file types 42 | file \ 43 | # 44 | # compression 45 | p7zip-full \ 46 | # 47 | # ping and ip utilities 48 | iputils-ping \ 49 | # 50 | # nslookup and dig (for looking up hostnames) 51 | dnsutils \ 52 | # 53 | # socket cat for bidirectional byte streams 54 | socat \ 55 | # 56 | # TCP terminal 57 | telnet \ 58 | # 59 | # Automatically resolve .rej files from failed patches 60 | wiggle \ 61 | && rm -rf /var/lib/apt/lists/* 62 | 63 | COPY vendored/features/docker-in-docker/install.sh /tmp/install.sh 64 | RUN DOCKERDASHCOMPOSEVERSION=v2 bash /tmp/install.sh && rm /tmp/install.sh 65 | 66 | # Make sure everyone can access the working directory. 67 | ARG DEV_WORK_DIR 68 | RUN : \ 69 | && mkdir --parents --mode=777 "${DEV_WORK_DIR}" \ 70 | && chown "$MAMBA_USER:$MAMBA_USER" "${DEV_WORK_DIR}" 71 | 72 | # Set the working directory. 73 | ENV DEV_WORK_DIR="${DEV_WORK_DIR}" 74 | WORKDIR "${DEV_WORK_DIR}" 75 | 76 | # Sane defaults for Git 77 | RUN : \ 78 | # Switch default editor from vim to nano 79 | && git config --system core.editor nano \ 80 | # Prevent unintentional merges 81 | # 82 | && git config --system pull.ff only \ 83 | # Use default branch name "main" instead of "master" 84 | && git config --system init.defaultBranch main \ 85 | # Initialize Git LFS 86 | && git lfs install --system --skip-repo \ 87 | ; 88 | # Install Git pre-commit hook 89 | COPY pre-commit-hook.sh /usr/share/git-core/templates/hooks/pre-commit 90 | # Override any existing templateDir defined in ~/.gitconfig 91 | # 92 | ENV GIT_TEMPLATE_DIR=/usr/share/git-core/templates 93 | 94 | USER $MAMBA_USER 95 | 96 | # Create pre-commit cache directory 97 | RUN mkdir -p /home/$MAMBA_USER/.cache/pre-commit \ 98 | && chown -R $MAMBA_USER:$MAMBA_USER \ 99 | /home/$MAMBA_USER/.cache/pre-commit \ 100 | # Additionally, make sure these directories are writable by everyone 101 | && chmod a+rwx \ 102 | /home/$MAMBA_USER/.cache \ 103 | /home/$MAMBA_USER/.cache/pre-commit \ 104 | ; 105 | 106 | # Set CMD script to run on container startup. 107 | COPY _dev-init.sh /usr/local/bin/_dev-init.sh 108 | COPY _configure-docker-group.sh /usr/local/bin/_configure-docker-group.sh 109 | CMD [ \ 110 | "bash", \ 111 | "-c", \ 112 | "_dev-init.sh; echo 'Sleeping forever.'; while sleep 1000; do :; done" \ 113 | ] 114 | -------------------------------------------------------------------------------- /docker/_configure-docker-group.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Configuring Docker group..." 4 | 5 | # Configure Docker permissions. 6 | if [[ -S /var/run/docker.sock ]] ; then 7 | # Get the GID of the "docker" group. 8 | docker_gid=`stat --format=%g /var/run/docker.sock` 9 | if [ -z "$docker_gid" ] ; then 10 | echo "No mounted Docker socket found." 11 | else 12 | if getent group "${docker_gid}" ; then 13 | # The group for the Docker socket's gid already exists. 14 | echo "Adding user to '$(getent group "${docker_gid}" | cut -d: -f1)' group for docker access." 15 | sudo usermod -aG "${docker_gid}" "$(id -u -n)" 16 | else 17 | # The group for the Docker socket's gid doesn't exist. 18 | if getent group docker ; then 19 | # The "docker" group exists, but doesn't match the gid of the Docker socket. 20 | docker_group_name="docker-conflicting-groupname" 21 | else 22 | docker_group_name="docker" 23 | fi 24 | echo "Setting the GID of the '${docker_group_name}' group to ${docker_gid}." 25 | sudo groupadd --force --gid "${docker_gid}" "${docker_group_name}" 26 | sudo usermod -aG "${docker_group_name}" "$(id -u -n)" 27 | fi 28 | fi 29 | fi 30 | -------------------------------------------------------------------------------- /docker/_dev-init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Don't use strict mode so that we run through to the end. (commented out) 4 | # set -euo pipefail 5 | 6 | set -x 7 | 8 | source _configure-docker-group.sh 9 | 10 | # Fix ownership of cache directories 11 | sudo chown "$(id -u):$(id -g)" \ 12 | ~/.cache \ 13 | ~/.cache/pre-commit \ 14 | ; 15 | 16 | # Set default blame ignore filename. 17 | # This should only be done when it exists, due to 18 | if [ -f .git-blame-ignore-revs ]; then 19 | git config --system blame.ignoreRevsFile .git-blame-ignore-revs 20 | fi 21 | 22 | # Make sure pre-commit is installed if .pre-commit-config exists 23 | # (This is to take care of repositories which have already been cloned. 24 | # Repositories cloned from within this devcontainer will acquire the 25 | # pre-commit hook from /usr/share/git-core/templates/hooks/pre-commit.) 26 | if [ -f .pre-commit-config.yaml ]; then 27 | if command -v pre-commit > /dev/null; then 28 | pre-commit install 29 | else 30 | echo '`pre-commit` is missing. Please install it in this dev container.' 1>&2 31 | fi 32 | fi 33 | -------------------------------------------------------------------------------- /docker/pre-commit-hook.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Run pre-commit if available, raise error if it's missing and necessary. Original: 4 | # 5 | 6 | ARGS=(hook-impl --config=.pre-commit-config.yaml --hook-type=pre-commit --skip-on-missing-config) 7 | 8 | HERE="$(cd "$(dirname "$0")" && pwd)" 9 | ARGS+=(--hook-dir "$HERE" -- "$@") 10 | 11 | if command -v pre-commit > /dev/null; then 12 | exec pre-commit "${ARGS[@]}" 13 | else 14 | # We are in the repository root. 15 | if [[ -f .pre-commit-config.yaml ]]; then 16 | echo '`pre-commit` is missing. Please install it in this dev container.' 1>&2 17 | exit 1 18 | fi 19 | # neither `pre-commit` nor `.pre-commit-config.yaml` found, so exit silently. 20 | fi 21 | -------------------------------------------------------------------------------- /docker/vendored/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /docker/vendored/README.md: -------------------------------------------------------------------------------- 1 | # Dev containers vendored components 2 | 3 | [License](LICENSE) 4 | 5 | * [features/src/common-utils/main.sh](features/src/common-utils/main.sh) ([source](https://github.com/devcontainers/features/blob/3ea4d6bbd7864bcf7b5a91fdeeb66e4f5a6f46c0/src/common-utils/main.sh)) 6 | * [features/docker-in-docker/install.sh](features/docker-in-docker/install.sh) ([source](https://github.com/devcontainers/features/blob/a4b31f3/src/docker-in-docker/install.sh)) 7 | -------------------------------------------------------------------------------- /docker/vendored/features/docker-in-docker/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #------------------------------------------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. 5 | #------------------------------------------------------------------------------------------------------------- 6 | # 7 | # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md 8 | # Maintainer: The Dev Container spec maintainers 9 | 10 | 11 | DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version 12 | USE_MOBY="${MOBY:-"true"}" 13 | DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v1"}" # v1 or v2 or none 14 | AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}" 15 | DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL}" 16 | USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" 17 | INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}" 18 | MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" 19 | DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="buster bullseye bionic focal jammy" 20 | DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="buster bullseye bionic focal hirsute impish jammy" 21 | 22 | # Default: Exit on any failure. 23 | set -e 24 | 25 | # Clean up 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | # Setup STDERR. 29 | err() { 30 | echo "(!) $*" >&2 31 | } 32 | 33 | if [ "$(id -u)" -ne 0 ]; then 34 | err 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' 35 | exit 1 36 | fi 37 | 38 | ################### 39 | # Helper Functions 40 | # See: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/shared/utils.sh 41 | ################### 42 | 43 | # Determine the appropriate non-root user 44 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then 45 | USERNAME="" 46 | POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") 47 | for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do 48 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then 49 | USERNAME=${CURRENT_USER} 50 | break 51 | fi 52 | done 53 | if [ "${USERNAME}" = "" ]; then 54 | USERNAME=root 55 | fi 56 | elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then 57 | USERNAME=root 58 | fi 59 | 60 | # Get central common setting 61 | get_common_setting() { 62 | if [ "${common_settings_file_loaded}" != "true" ]; then 63 | curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." 64 | common_settings_file_loaded=true 65 | fi 66 | if [ -f "/tmp/vsdc-settings.env" ]; then 67 | local multi_line="" 68 | if [ "$2" = "true" ]; then multi_line="-z"; fi 69 | local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" 70 | if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi 71 | fi 72 | echo "$1=${!1}" 73 | } 74 | 75 | apt_get_update() 76 | { 77 | if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then 78 | echo "Running apt-get update..." 79 | apt-get update -y 80 | fi 81 | } 82 | 83 | # Checks if packages are installed and installs them if not 84 | check_packages() { 85 | if ! dpkg -s "$@" > /dev/null 2>&1; then 86 | apt_get_update 87 | apt-get -y install --no-install-recommends "$@" 88 | fi 89 | } 90 | 91 | # Figure out correct version of a three part version number is not passed 92 | find_version_from_git_tags() { 93 | local variable_name=$1 94 | local requested_version=${!variable_name} 95 | if [ "${requested_version}" = "none" ]; then return; fi 96 | local repository=$2 97 | local prefix=${3:-"tags/v"} 98 | local separator=${4:-"."} 99 | local last_part_optional=${5:-"false"} 100 | if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then 101 | local escaped_separator=${separator//./\\.} 102 | local last_part 103 | if [ "${last_part_optional}" = "true" ]; then 104 | last_part="(${escaped_separator}[0-9]+)?" 105 | else 106 | last_part="${escaped_separator}[0-9]+" 107 | fi 108 | local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" 109 | local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" 110 | if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then 111 | declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" 112 | else 113 | set +e 114 | declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" 115 | set -e 116 | fi 117 | fi 118 | if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then 119 | err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 120 | exit 1 121 | fi 122 | echo "${variable_name}=${!variable_name}" 123 | } 124 | 125 | ########################################### 126 | # Start docker-in-docker installation 127 | ########################################### 128 | 129 | # Ensure apt is in non-interactive to avoid prompts 130 | export DEBIAN_FRONTEND=noninteractive 131 | 132 | 133 | # Source /etc/os-release to get OS info 134 | . /etc/os-release 135 | # Fetch host/container arch. 136 | architecture="$(dpkg --print-architecture)" 137 | 138 | # Check if distro is supported 139 | if [ "${USE_MOBY}" = "true" ]; then 140 | # 'get_common_setting' allows attribute to be updated remotely 141 | get_common_setting DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES 142 | if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then 143 | err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution" 144 | err "Support distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" 145 | exit 1 146 | fi 147 | echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}'" 148 | else 149 | get_common_setting DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES 150 | if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then 151 | err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution" 152 | err "Support distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" 153 | exit 1 154 | fi 155 | echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}'" 156 | fi 157 | 158 | # Install dependencies 159 | check_packages apt-transport-https curl ca-certificates pigz iptables gnupg2 dirmngr wget 160 | if ! type git > /dev/null 2>&1; then 161 | check_packages git 162 | fi 163 | 164 | # Swap to legacy iptables for compatibility 165 | if type iptables-legacy > /dev/null 2>&1; then 166 | update-alternatives --set iptables /usr/sbin/iptables-legacy 167 | update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy 168 | fi 169 | 170 | 171 | 172 | # Set up the necessary apt repos (either Microsoft's or Docker's) 173 | if [ "${USE_MOBY}" = "true" ]; then 174 | 175 | # Name of open source engine/cli 176 | engine_package_name="moby-engine" 177 | cli_package_name="moby-cli" 178 | 179 | # Import key safely and import Microsoft apt repo 180 | get_common_setting MICROSOFT_GPG_KEYS_URI 181 | curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg 182 | echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list 183 | else 184 | # Name of licensed engine/cli 185 | engine_package_name="docker-ce" 186 | cli_package_name="docker-ce-cli" 187 | 188 | # Import key safely and import Docker apt repo 189 | curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg 190 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list 191 | fi 192 | 193 | # Refresh apt lists 194 | apt-get update 195 | 196 | # Soft version matching 197 | if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then 198 | # Empty, meaning grab whatever "latest" is in apt repo 199 | engine_version_suffix="" 200 | cli_version_suffix="" 201 | else 202 | # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...) 203 | docker_version_dot_escaped="${DOCKER_VERSION//./\\.}" 204 | docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}" 205 | # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/ 206 | docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)" 207 | set +e # Don't exit if finding version fails - will handle gracefully 208 | cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")" 209 | engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")" 210 | set -e 211 | if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then 212 | err "No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:" 213 | apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+' 214 | exit 1 215 | fi 216 | echo "engine_version_suffix ${engine_version_suffix}" 217 | echo "cli_version_suffix ${cli_version_suffix}" 218 | fi 219 | 220 | # Install Docker / Moby CLI if not already installed 221 | if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then 222 | echo "Docker / Moby CLI and Engine already installed." 223 | else 224 | if [ "${USE_MOBY}" = "true" ]; then 225 | # Install engine 226 | set +e # Handle error gracefully 227 | apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx moby-engine${engine_version_suffix} 228 | if [ $? -ne 0 ]; then 229 | err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-20.04')." 230 | exit 1 231 | fi 232 | set -e 233 | 234 | # Install compose 235 | apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." 236 | else 237 | apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix} 238 | # Install compose 239 | apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." 240 | fi 241 | fi 242 | 243 | echo "Finished installing docker / moby!" 244 | 245 | # If 'docker-compose' command is to be included 246 | if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then 247 | # Install Docker Compose if not already installed and is on a supported architecture 248 | if type docker-compose > /dev/null 2>&1; then 249 | echo "Docker Compose v1 already installed." 250 | else 251 | target_compose_arch="${architecture}" 252 | if [ "${target_compose_arch}" = "amd64" ]; then 253 | target_compose_arch="x86_64" 254 | fi 255 | if [ "${target_compose_arch}" != "x86_64" ]; then 256 | # Use pip to get a version that runs on this architecture 257 | check_packages python3-minimal python3-pip libffi-dev python3-venv 258 | export PIPX_HOME=/usr/local/pipx 259 | mkdir -p ${PIPX_HOME} 260 | export PIPX_BIN_DIR=/usr/local/bin 261 | export PYTHONUSERBASE=/tmp/pip-tmp 262 | export PIP_CACHE_DIR=/tmp/pip-tmp/cache 263 | pipx_bin=pipx 264 | if ! type pipx > /dev/null 2>&1; then 265 | pip3 install --disable-pip-version-check --no-cache-dir --user pipx 266 | pipx_bin=/tmp/pip-tmp/bin/pipx 267 | fi 268 | 269 | set +e 270 | ${pipx_bin} install --pip-args '--no-cache-dir --force-reinstall' docker-compose 271 | exit_code=$? 272 | set -e 273 | 274 | if [ ${exit_code} -ne 0 ]; then 275 | # Temporary: https://github.com/devcontainers/features/issues/616 276 | # See https://github.com/yaml/pyyaml/issues/601 277 | echo "(*) Failed to install docker-compose via pipx. Trying via pip3..." 278 | 279 | export PYTHONUSERBASE=/usr/local 280 | pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation 281 | fi 282 | 283 | rm -rf /tmp/pip-tmp 284 | else 285 | compose_v1_version="1" 286 | find_version_from_git_tags compose_v1_version "https://github.com/docker/compose" "tags/" 287 | echo "(*) Installing docker-compose ${compose_v1_version}..." 288 | curl -fsSL "https://github.com/docker/compose/releases/download/${compose_v1_version}/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose 289 | chmod +x /usr/local/bin/docker-compose 290 | fi 291 | fi 292 | 293 | # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation 294 | current_v1_compose_path="$(which docker-compose)" 295 | target_v1_compose_path="$(dirname "${current_v1_compose_path}")/docker-compose-v1" 296 | if ! type compose-switch > /dev/null 2>&1; then 297 | echo "(*) Installing compose-switch..." 298 | compose_switch_version="latest" 299 | find_version_from_git_tags compose_switch_version "https://github.com/docker/compose-switch" 300 | curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch 301 | chmod +x /usr/local/bin/compose-switch 302 | # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11 303 | 304 | # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2) 305 | mv "${current_v1_compose_path}" "${target_v1_compose_path}" 306 | update-alternatives --install /usr/local/bin/docker-compose docker-compose /usr/local/bin/compose-switch 99 307 | update-alternatives --install /usr/local/bin/docker-compose docker-compose "${target_v1_compose_path}" 1 308 | fi 309 | if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then 310 | update-alternatives --set docker-compose "${target_v1_compose_path}" 311 | else 312 | update-alternatives --set docker-compose /usr/local/bin/compose-switch 313 | fi 314 | fi 315 | 316 | # If init file already exists, exit 317 | if [ -f "/usr/local/share/docker-init.sh" ]; then 318 | echo "/usr/local/share/docker-init.sh already exists, so exiting." 319 | # Clean up 320 | rm -rf /var/lib/apt/lists/* 321 | exit 0 322 | fi 323 | echo "docker-init doesn't exist, adding..." 324 | 325 | if ! cat /etc/group | grep -e "^docker:" > /dev/null 2>&1; then 326 | groupadd -r docker 327 | fi 328 | 329 | usermod -aG docker ${USERNAME} 330 | 331 | if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then 332 | buildx_version="latest" 333 | find_version_from_git_tags buildx_version "https://github.com/docker/buildx" "refs/tags/v" 334 | 335 | echo "(*) Installing buildx ${buildx_version}..." 336 | buildx_file_name="buildx-v${buildx_version}.linux-${architecture}" 337 | cd /tmp && wget "https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}" 338 | 339 | mkdir -p ${_REMOTE_USER_HOME}/.docker/cli-plugins 340 | mv ${buildx_file_name} ${_REMOTE_USER_HOME}/.docker/cli-plugins/docker-buildx 341 | chmod +x ${_REMOTE_USER_HOME}/.docker/cli-plugins/docker-buildx 342 | 343 | chown -R "${USERNAME}:docker" "${_REMOTE_USER_HOME}/.docker" 344 | chmod -R g+r+w "${_REMOTE_USER_HOME}/.docker" 345 | find "${_REMOTE_USER_HOME}/.docker" -type d -print0 | xargs -n 1 -0 chmod g+s 346 | fi 347 | 348 | tee /usr/local/share/docker-init.sh > /dev/null \ 349 | << EOF 350 | #!/bin/sh 351 | #------------------------------------------------------------------------------------------------------------- 352 | # Copyright (c) Microsoft Corporation. All rights reserved. 353 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. 354 | #------------------------------------------------------------------------------------------------------------- 355 | 356 | set -e 357 | 358 | AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} 359 | DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} 360 | EOF 361 | 362 | tee -a /usr/local/share/docker-init.sh > /dev/null \ 363 | << 'EOF' 364 | dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} $(cat << 'INNEREOF' 365 | # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly 366 | # ie: docker kill 367 | find /run /var/run -iname 'docker*.pid' -delete || : 368 | find /run /var/run -iname 'container*.pid' -delete || : 369 | 370 | ## Dind wrapper script from docker team, adapted to a function 371 | # Maintained: https://github.com/moby/moby/blob/master/hack/dind 372 | 373 | export container=docker 374 | 375 | if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then 376 | mount -t securityfs none /sys/kernel/security || { 377 | echo >&2 'Could not mount /sys/kernel/security.' 378 | echo >&2 'AppArmor detection and --privileged mode might break.' 379 | } 380 | fi 381 | 382 | # Mount /tmp (conditionally) 383 | if ! mountpoint -q /tmp; then 384 | mount -t tmpfs none /tmp 385 | fi 386 | 387 | # cgroup v2: enable nesting 388 | if [ -f /sys/fs/cgroup/cgroup.controllers ]; then 389 | # move the processes from the root group to the /init group, 390 | # otherwise writing subtree_control fails with EBUSY. 391 | # An error during moving non-existent process (i.e., "cat") is ignored. 392 | mkdir -p /sys/fs/cgroup/init 393 | xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || : 394 | # enable controllers 395 | sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \ 396 | > /sys/fs/cgroup/cgroup.subtree_control 397 | fi 398 | ## Dind wrapper over. 399 | 400 | # Handle DNS 401 | set +e 402 | cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' 403 | if [ $? -eq 0 ] && [ "${AZURE_DNS_AUTO_DETECTION}" = "true" ] 404 | then 405 | echo "Setting dockerd Azure DNS." 406 | CUSTOMDNS="--dns 168.63.129.16" 407 | else 408 | echo "Not setting dockerd DNS manually." 409 | CUSTOMDNS="" 410 | fi 411 | 412 | set -e 413 | 414 | if [ -z "$DOCKER_DEFAULT_ADDRESS_POOL" ] 415 | then 416 | DEFAULT_ADDRESS_POOL="" 417 | else 418 | DEFAULT_ADDRESS_POOL="--default-address-pool $DOCKER_DEFAULT_ADDRESS_POOL" 419 | fi 420 | 421 | # Start docker/moby engine 422 | ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL > /tmp/dockerd.log 2>&1 ) & 423 | INNEREOF 424 | )" 425 | 426 | # Start using sudo if not invoked as root 427 | if [ "$(id -u)" -ne 0 ]; then 428 | sudo /bin/sh -c "${dockerd_start}" 429 | else 430 | eval "${dockerd_start}" 431 | fi 432 | 433 | # Execute whatever commands were passed in (if any). This allows us 434 | # to set this script to ENTRYPOINT while still executing the default CMD. 435 | exec "$@" 436 | EOF 437 | 438 | chmod +x /usr/local/share/docker-init.sh 439 | chown ${USERNAME}:root /usr/local/share/docker-init.sh 440 | 441 | # Clean up 442 | rm -rf /var/lib/apt/lists/* 443 | 444 | echo 'docker-in-docker-debian script has completed!' 445 | -------------------------------------------------------------------------------- /docker/vendored/features/src/common-utils/main-patched.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #------------------------------------------------------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See https://github.com/devcontainers/features/blob/main/LICENSE for license information. 5 | #------------------------------------------------------------------------------------------------------------------------- 6 | # 7 | # Docs: https://github.com/devcontainers/features/tree/main/src/common-utils 8 | # Maintainer: The Dev Container spec maintainers 9 | 10 | set -e 11 | 12 | INSTALL_ZSH="${INSTALLZSH:-"true"}" 13 | CONFIGURE_ZSH_AS_DEFAULT_SHELL="${CONFIGUREZSHASDEFAULTSHELL:-"false"}" 14 | INSTALL_OH_MY_ZSH="${INSTALLOHMYZSH:-"true"}" 15 | INSTALL_OH_MY_ZSH_CONFIG="${INSTALLOHMYZSHCONFIG:-"true"}" 16 | UPGRADE_PACKAGES="${UPGRADEPACKAGES:-"true"}" 17 | USERNAME="${USERNAME:-"automatic"}" 18 | USER_UID="${USERUID:-"automatic"}" 19 | USER_GID="${USERGID:-"automatic"}" 20 | ADD_NON_FREE_PACKAGES="${NONFREEPACKAGES:-"false"}" 21 | 22 | MARKER_FILE="/usr/local/etc/vscode-dev-containers/common" 23 | 24 | FEATURE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 25 | 26 | # Debian / Ubuntu packages 27 | install_debian_packages() { 28 | # Ensure apt is in non-interactive to avoid prompts 29 | export DEBIAN_FRONTEND=noninteractive 30 | 31 | local package_list="" 32 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 33 | package_list="${package_list} \ 34 | apt-utils \ 35 | openssh-client \ 36 | gnupg2 \ 37 | dirmngr \ 38 | iproute2 \ 39 | procps \ 40 | lsof \ 41 | htop \ 42 | net-tools \ 43 | psmisc \ 44 | curl \ 45 | tree \ 46 | wget \ 47 | rsync \ 48 | ca-certificates \ 49 | unzip \ 50 | bzip2 \ 51 | zip \ 52 | nano \ 53 | vim-tiny \ 54 | less \ 55 | jq \ 56 | apt-transport-https \ 57 | dialog \ 58 | libc6 \ 59 | libgcc1 \ 60 | libkrb5-3 \ 61 | libgssapi-krb5-2 \ 62 | libicu[0-9][0-9] \ 63 | liblttng-ust[0-9] \ 64 | libstdc++6 \ 65 | zlib1g \ 66 | locales \ 67 | sudo \ 68 | ncdu \ 69 | man-db \ 70 | strace \ 71 | manpages \ 72 | manpages-dev \ 73 | init-system-helpers" 74 | 75 | # Include libssl1.1 if available 76 | if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then 77 | package_list="${package_list} libssl1.1" 78 | fi 79 | 80 | # Include libssl3 if available 81 | if [[ ! -z $(apt-cache --names-only search ^libssl3$) ]]; then 82 | package_list="${package_list} libssl3" 83 | fi 84 | 85 | # Include appropriate version of libssl1.0.x if available 86 | local libssl_package=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '') 87 | if [ "$(echo "$libssl_package" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then 88 | if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then 89 | # Debian 9 90 | package_list="${package_list} libssl1.0.2" 91 | elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then 92 | # Ubuntu 18.04 93 | package_list="${package_list} libssl1.0.0" 94 | fi 95 | fi 96 | 97 | # Include git if not already installed (may be more recent than distro version) 98 | if ! type git > /dev/null 2>&1; then 99 | package_list="${package_list} git" 100 | fi 101 | fi 102 | 103 | # Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian 104 | if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then 105 | # Bring in variables from /etc/os-release like VERSION_CODENAME 106 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list 107 | sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list 108 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list 109 | sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list 110 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list 111 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list 112 | sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list 113 | sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list 114 | # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html 115 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list 116 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list 117 | echo "Running apt-get update..." 118 | package_list="${package_list} manpages-posix manpages-posix-dev" 119 | fi 120 | 121 | # Install the list of packages 122 | echo "Packages to verify are installed: ${package_list}" 123 | rm -rf /var/lib/apt/lists/* 124 | apt-get update -y 125 | apt-get -y install --no-install-recommends ${package_list} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 ) 126 | 127 | # Install zsh (and recommended packages) if needed 128 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 129 | apt-get install -y zsh 130 | fi 131 | 132 | # Get to latest versions of all packages 133 | if [ "${UPGRADE_PACKAGES}" = "true" ]; then 134 | apt-get -y upgrade --no-install-recommends 135 | apt-get autoremove -y 136 | fi 137 | 138 | # Ensure at least the en_US.UTF-8 UTF-8 locale is available = common need for both applications and things like the agnoster ZSH theme. 139 | if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then 140 | echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen 141 | locale-gen 142 | LOCALE_ALREADY_SET="true" 143 | fi 144 | 145 | PACKAGES_ALREADY_INSTALLED="true" 146 | 147 | # Clean up 148 | apt-get -y clean 149 | rm -rf /var/lib/apt/lists/* 150 | } 151 | 152 | # RedHat / RockyLinux / CentOS / Fedora packages 153 | install_redhat_packages() { 154 | local package_list="" 155 | local remove_epel="false" 156 | local install_cmd=dnf 157 | if ! type dnf > /dev/null 2>&1; then 158 | install_cmd=yum 159 | fi 160 | 161 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 162 | package_list="${package_list} \ 163 | gawk \ 164 | openssh-clients \ 165 | gnupg2 \ 166 | iproute \ 167 | procps \ 168 | lsof \ 169 | net-tools \ 170 | psmisc \ 171 | wget \ 172 | ca-certificates \ 173 | rsync \ 174 | unzip \ 175 | zip \ 176 | nano \ 177 | vim-minimal \ 178 | less \ 179 | jq \ 180 | openssl-libs \ 181 | krb5-libs \ 182 | libicu \ 183 | zlib \ 184 | sudo \ 185 | sed \ 186 | grep \ 187 | which \ 188 | man-db \ 189 | strace" 190 | 191 | # rockylinux:9 installs 'curl-minimal' which clashes with 'curl' 192 | # Install 'curl' for every OS except this rockylinux:9 193 | if [[ "${ID}" = "rocky" ]] && [[ "${VERSION}" != *"9."* ]]; then 194 | package_list="${package_list} curl" 195 | fi 196 | 197 | # Install OpenSSL 1.0 compat if needed 198 | if ${install_cmd} -q list compat-openssl10 >/dev/null 2>&1; then 199 | package_list="${package_list} compat-openssl10" 200 | fi 201 | 202 | # Install lsb_release if available 203 | if ${install_cmd} -q list redhat-lsb-core >/dev/null 2>&1; then 204 | package_list="${package_list} redhat-lsb-core" 205 | fi 206 | 207 | # Install git if not already installed (may be more recent than distro version) 208 | if ! type git > /dev/null 2>&1; then 209 | package_list="${package_list} git" 210 | fi 211 | 212 | # Install EPEL repository if needed (required to install 'jq' for CentOS) 213 | if ! ${install_cmd} -q list jq >/dev/null 2>&1; then 214 | ${install_cmd} -y install epel-release 215 | remove_epel="true" 216 | fi 217 | fi 218 | 219 | # Install zsh if needed 220 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 221 | package_list="${package_list} zsh" 222 | fi 223 | 224 | if [ -n "${package_list}" ]; then 225 | ${install_cmd} -y install ${package_list} 226 | fi 227 | 228 | # Get to latest versions of all packages 229 | if [ "${UPGRADE_PACKAGES}" = "true" ]; then 230 | ${install_cmd} upgrade -y 231 | fi 232 | 233 | if [[ "${remove_epel}" = "true" ]]; then 234 | ${install_cmd} -y remove epel-release 235 | fi 236 | 237 | PACKAGES_ALREADY_INSTALLED="true" 238 | } 239 | 240 | # Alpine Linux packages 241 | install_alpine_packages() { 242 | apk update 243 | 244 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 245 | apk add --no-cache \ 246 | openssh-client \ 247 | gnupg \ 248 | procps \ 249 | lsof \ 250 | htop \ 251 | net-tools \ 252 | psmisc \ 253 | curl \ 254 | wget \ 255 | rsync \ 256 | ca-certificates \ 257 | unzip \ 258 | zip \ 259 | nano \ 260 | vim \ 261 | less \ 262 | jq \ 263 | libgcc \ 264 | libstdc++ \ 265 | krb5-libs \ 266 | libintl \ 267 | libssl1.1 \ 268 | lttng-ust \ 269 | tzdata \ 270 | userspace-rcu \ 271 | zlib \ 272 | sudo \ 273 | coreutils \ 274 | sed \ 275 | grep \ 276 | which \ 277 | ncdu \ 278 | shadow \ 279 | strace 280 | 281 | # Install man pages - package name varies between 3.12 and earlier versions 282 | if apk info man > /dev/null 2>&1; then 283 | apk add --no-cache man man-pages 284 | else 285 | apk add --no-cache mandoc man-pages 286 | fi 287 | 288 | # Install git if not already installed (may be more recent than distro version) 289 | if ! type git > /dev/null 2>&1; then 290 | apk add --no-cache git 291 | fi 292 | fi 293 | 294 | # Install zsh if needed 295 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 296 | apk add --no-cache zsh 297 | fi 298 | 299 | PACKAGES_ALREADY_INSTALLED="true" 300 | } 301 | 302 | # ****************** 303 | # ** Main section ** 304 | # ****************** 305 | 306 | if [ "$(id -u)" -ne 0 ]; then 307 | echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' 308 | exit 1 309 | fi 310 | 311 | # Load markers to see which steps have already run 312 | if [ -f "${MARKER_FILE}" ]; then 313 | echo "Marker file found:" 314 | cat "${MARKER_FILE}" 315 | source "${MARKER_FILE}" 316 | fi 317 | 318 | # Ensure that login shells get the correct path if the user updated the PATH using ENV. 319 | rm -f /etc/profile.d/00-restore-env.sh 320 | echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh 321 | chmod +x /etc/profile.d/00-restore-env.sh 322 | 323 | # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME 324 | . /etc/os-release 325 | # Get an adjusted ID independent of distro variants 326 | if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then 327 | ADJUSTED_ID="debian" 328 | elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then 329 | ADJUSTED_ID="rhel" 330 | elif [ "${ID}" = "alpine" ]; then 331 | ADJUSTED_ID="alpine" 332 | else 333 | echo "Linux distro ${ID} not supported." 334 | exit 1 335 | fi 336 | 337 | # Install packages for appropriate OS 338 | case "${ADJUSTED_ID}" in 339 | "debian") 340 | install_debian_packages 341 | ;; 342 | "rhel") 343 | install_redhat_packages 344 | ;; 345 | "alpine") 346 | install_alpine_packages 347 | ;; 348 | esac 349 | 350 | # If in automatic mode, determine if a user already exists, if not use vscode 351 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then 352 | if [ "${_REMOTE_USER}" != "root" ]; then 353 | USERNAME="${_REMOTE_USER}" 354 | else 355 | USERNAME="" 356 | POSSIBLE_USERS=("devcontainer" "vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") 357 | for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do 358 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then 359 | USERNAME=${CURRENT_USER} 360 | break 361 | fi 362 | done 363 | if [ "${USERNAME}" = "" ]; then 364 | USERNAME=vscode 365 | fi 366 | fi 367 | elif [ "${USERNAME}" = "none" ]; then 368 | USERNAME=root 369 | USER_UID=0 370 | USER_GID=0 371 | fi 372 | # Create or update a non-root user to match UID/GID. 373 | group_name="${USERNAME}" 374 | if id -u ${USERNAME} > /dev/null 2>&1; then 375 | # User exists, update if needed 376 | if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -g $USERNAME)" ]; then 377 | group_name="$(id -gn $USERNAME)" 378 | groupmod --gid $USER_GID ${group_name} 379 | usermod --gid $USER_GID $USERNAME 380 | fi 381 | if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then 382 | usermod --uid $USER_UID $USERNAME 383 | fi 384 | else 385 | # Create user 386 | if [ "${USER_GID}" = "automatic" ]; then 387 | groupadd $USERNAME 388 | else 389 | groupadd --gid $USER_GID $USERNAME 390 | fi 391 | if [ "${USER_UID}" = "automatic" ]; then 392 | useradd -s /bin/bash --gid $USERNAME -m $USERNAME 393 | else 394 | useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME 395 | fi 396 | fi 397 | 398 | # Add add sudo support for non-root user 399 | if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then 400 | echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME 401 | chmod 0440 /etc/sudoers.d/$USERNAME 402 | EXISTING_NON_ROOT_USER="${USERNAME}" 403 | fi 404 | 405 | # ********************************* 406 | # ** Shell customization section ** 407 | # ********************************* 408 | 409 | if [ "${USERNAME}" = "root" ]; then 410 | user_home="/root" 411 | # Check if user already has a home directory other than /home/${USERNAME} 412 | elif [ "/home/${USERNAME}" != $( getent passwd $USERNAME | cut -d: -f6 ) ]; then 413 | user_home=$( getent passwd $USERNAME | cut -d: -f6 ) 414 | else 415 | user_home="/home/${USERNAME}" 416 | if [ ! -d "${user_home}" ]; then 417 | mkdir -p "${user_home}" 418 | chown ${USERNAME}:${group_name} "${user_home}" 419 | fi 420 | fi 421 | 422 | # Restore user .bashrc / .profile / .zshrc defaults from skeleton file if it doesn't exist or is empty 423 | possible_rc_files=( ".bashrc" ".profile" ) 424 | [ "$INSTALL_OH_MY_ZSH_CONFIG" == "true" ] && possible_rc_files+=('.zshrc') 425 | [ "$INSTALL_ZSH" == "true" ] && possible_rc_files+=('.zprofile') 426 | for rc_file in "${possible_rc_files[@]}"; do 427 | if [ -f "/etc/skel/${rc_file}" ]; then 428 | if [ ! -e "${user_home}/${rc_file}" ] || [ ! -s "${user_home}/${rc_file}" ]; then 429 | cp "/etc/skel/${rc_file}" "${user_home}/${rc_file}" 430 | chown ${USERNAME}:${group_name} "${user_home}/${rc_file}" 431 | fi 432 | fi 433 | done 434 | 435 | # Add RC snippet and custom bash prompt 436 | if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then 437 | case "${ADJUSTED_ID}" in 438 | "debian") 439 | global_rc_path="/etc/bash.bashrc" 440 | ;; 441 | "rhel") 442 | global_rc_path="/etc/bashrc" 443 | ;; 444 | "alpine") 445 | global_rc_path="/etc/bash/bashrc" 446 | # /etc/bash/bashrc does not exist in alpine 3.14 & 3.15 447 | mkdir -p /etc/bash 448 | ;; 449 | esac 450 | cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> ${global_rc_path} 451 | cat "${FEATURE_DIR}/scripts/bash_theme_snippet.sh" >> "${user_home}/.bashrc" 452 | if [ "${USERNAME}" != "root" ]; then 453 | cat "${FEATURE_DIR}/scripts/bash_theme_snippet.sh" >> "/root/.bashrc" 454 | chown ${USERNAME}:${group_name} "${user_home}/.bashrc" 455 | fi 456 | RC_SNIPPET_ALREADY_ADDED="true" 457 | fi 458 | 459 | # Optionally configure zsh and Oh My Zsh! 460 | if [ "${INSTALL_ZSH}" = "true" ]; then 461 | if [ ! -f "${user_home}/.zprofile" ]; then 462 | touch "${user_home}/.zprofile" 463 | echo 'source $HOME/.profile' >> "${user_home}/.zprofile" # TODO: Reconsider adding '.profile' to '.zprofile' 464 | chown ${USERNAME}:${group_name} "${user_home}/.zprofile" 465 | fi 466 | 467 | if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then 468 | if [ "${ADJUSTED_ID}" = "rhel" ]; then 469 | global_rc_path="/etc/zshrc" 470 | else 471 | global_rc_path="/etc/zsh/zshrc" 472 | fi 473 | cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> ${global_rc_path} 474 | ZSH_ALREADY_INSTALLED="true" 475 | fi 476 | 477 | if [ "${CONFIGURE_ZSH_AS_DEFAULT_SHELL}" == "true" ]; then 478 | # Fixing chsh always asking for a password on alpine linux 479 | # ref: https://askubuntu.com/questions/812420/chsh-always-asking-a-password-and-get-pam-authentication-failure. 480 | if [ ! -f "/etc/pam.d/chsh" ] || ! grep -Eq '^auth(.*)pam_rootok\.so$' /etc/pam.d/chsh; then 481 | echo "auth sufficient pam_rootok.so" >> /etc/pam.d/chsh 482 | elif [[ -n "$(awk '/^auth(.*)pam_rootok\.so$/ && !/^auth[[:blank:]]+sufficient[[:blank:]]+pam_rootok\.so$/' /etc/pam.d/chsh)" ]]; then 483 | awk '/^auth(.*)pam_rootok\.so$/ { $2 = "sufficient" } { print }' /etc/pam.d/chsh > /tmp/chsh.tmp && mv /tmp/chsh.tmp /etc/pam.d/chsh 484 | fi 485 | 486 | chsh --shell /bin/zsh ${USERNAME} 487 | fi 488 | 489 | # Adapted, simplified inline Oh My Zsh! install steps that adds, defaults to a codespaces theme. 490 | # See https://github.com/ohmyzsh/ohmyzsh/blob/master/tools/install.sh for official script. 491 | if [ "${INSTALL_OH_MY_ZSH}" = "true" ]; then 492 | user_rc_file="${user_home}/.zshrc" 493 | oh_my_install_dir="${user_home}/.oh-my-zsh" 494 | template_path="${oh_my_install_dir}/templates/zshrc.zsh-template" 495 | if [ ! -d "${oh_my_install_dir}" ]; then 496 | umask g-w,o-w 497 | mkdir -p ${oh_my_install_dir} 498 | git clone --depth=1 \ 499 | -c core.eol=lf \ 500 | -c core.autocrlf=false \ 501 | -c fsck.zeroPaddedFilemode=ignore \ 502 | -c fetch.fsck.zeroPaddedFilemode=ignore \ 503 | -c receive.fsck.zeroPaddedFilemode=ignore \ 504 | "https://github.com/ohmyzsh/ohmyzsh" "${oh_my_install_dir}" 2>&1 505 | 506 | # Shrink git while still enabling updates 507 | cd "${oh_my_install_dir}" 508 | git repack -a -d -f --depth=1 --window=1 509 | fi 510 | 511 | # Add Dev Containers theme 512 | mkdir -p ${oh_my_install_dir}/custom/themes 513 | cp -f "${FEATURE_DIR}/scripts/devcontainers.zsh-theme" "${oh_my_install_dir}/custom/themes/devcontainers.zsh-theme" 514 | ln -sf "${oh_my_install_dir}/custom/themes/devcontainers.zsh-theme" "${oh_my_install_dir}/custom/themes/codespaces.zsh-theme" 515 | 516 | # Add devcontainer .zshrc template 517 | if [ "$INSTALL_OH_MY_ZSH_CONFIG" = "true" ]; then 518 | echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file} 519 | sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="devcontainers"/g' ${user_rc_file} 520 | fi 521 | 522 | # Copy to non-root user if one is specified 523 | if [ "${USERNAME}" != "root" ]; then 524 | copy_to_user_files=("${oh_my_install_dir}") 525 | [ -f "$user_rc_file" ] && copy_to_user_files+=("$user_rc_file") 526 | cp -rf "${copy_to_user_files[@]}" /root 527 | chown -R ${USERNAME}:${group_name} "${copy_to_user_files[@]}" 528 | fi 529 | fi 530 | fi 531 | 532 | # ********************************* 533 | # ** Ensure config directory ** 534 | # ********************************* 535 | user_config_dir="${user_home}/.config" 536 | if [ ! -d "${user_config_dir}" ]; then 537 | mkdir -p "${user_config_dir}" 538 | chown ${USERNAME}:${group_name} "${user_config_dir}" 539 | fi 540 | 541 | # **************************** 542 | # ** Utilities and commands ** 543 | # **************************** 544 | 545 | # code shim, it fallbacks to code-insiders if code is not available 546 | cp -f "${FEATURE_DIR}/bin/code" /usr/local/bin/ 547 | chmod +rx /usr/local/bin/code 548 | 549 | # systemctl shim for Debian/Ubuntu - tells people to use 'service' if systemd is not running 550 | if [ "${ADJUSTED_ID}" = "debian" ]; then 551 | cp -f "${FEATURE_DIR}/bin/systemctl" /usr/local/bin/systemctl 552 | chmod +rx /usr/local/bin/systemctl 553 | fi 554 | 555 | # Persist image metadata info, script if meta.env found in same directory 556 | if [ -f "/usr/local/etc/vscode-dev-containers/meta.env" ] || [ -f "/usr/local/etc/dev-containers/meta.env" ]; then 557 | cp -f "${FEATURE_DIR}/bin/devcontainer-info" /usr/local/bin/devcontainer-info 558 | chmod +rx /usr/local/bin/devcontainer-info 559 | fi 560 | 561 | # Write marker file 562 | if [ ! -d "/usr/local/etc/vscode-dev-containers" ]; then 563 | mkdir -p "$(dirname "${MARKER_FILE}")" 564 | fi 565 | echo -e "\ 566 | PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\ 567 | LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\ 568 | EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\ 569 | RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\ 570 | ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}" 571 | 572 | echo "Done!" 573 | -------------------------------------------------------------------------------- /docker/vendored/features/src/common-utils/main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #------------------------------------------------------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See https://github.com/devcontainers/features/blob/main/LICENSE for license information. 5 | #------------------------------------------------------------------------------------------------------------------------- 6 | # 7 | # Docs: https://github.com/devcontainers/features/tree/main/src/common-utils 8 | # Maintainer: The Dev Container spec maintainers 9 | 10 | set -e 11 | 12 | INSTALL_ZSH="${INSTALLZSH:-"true"}" 13 | CONFIGURE_ZSH_AS_DEFAULT_SHELL="${CONFIGUREZSHASDEFAULTSHELL:-"false"}" 14 | INSTALL_OH_MY_ZSH="${INSTALLOHMYZSH:-"true"}" 15 | INSTALL_OH_MY_ZSH_CONFIG="${INSTALLOHMYZSHCONFIG:-"true"}" 16 | UPGRADE_PACKAGES="${UPGRADEPACKAGES:-"true"}" 17 | USERNAME="${USERNAME:-"automatic"}" 18 | USER_UID="${USERUID:-"automatic"}" 19 | USER_GID="${USERGID:-"automatic"}" 20 | ADD_NON_FREE_PACKAGES="${NONFREEPACKAGES:-"false"}" 21 | 22 | MARKER_FILE="/usr/local/etc/vscode-dev-containers/common" 23 | 24 | FEATURE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 25 | 26 | # Debian / Ubuntu packages 27 | install_debian_packages() { 28 | # Ensure apt is in non-interactive to avoid prompts 29 | export DEBIAN_FRONTEND=noninteractive 30 | 31 | local package_list="" 32 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 33 | package_list="${package_list} \ 34 | apt-utils \ 35 | openssh-client \ 36 | gnupg2 \ 37 | dirmngr \ 38 | iproute2 \ 39 | procps \ 40 | lsof \ 41 | htop \ 42 | net-tools \ 43 | psmisc \ 44 | curl \ 45 | tree \ 46 | wget \ 47 | rsync \ 48 | ca-certificates \ 49 | unzip \ 50 | bzip2 \ 51 | zip \ 52 | nano \ 53 | vim-tiny \ 54 | less \ 55 | jq \ 56 | lsb-release \ 57 | apt-transport-https \ 58 | dialog \ 59 | libc6 \ 60 | libgcc1 \ 61 | libkrb5-3 \ 62 | libgssapi-krb5-2 \ 63 | libicu[0-9][0-9] \ 64 | liblttng-ust[0-9] \ 65 | libstdc++6 \ 66 | zlib1g \ 67 | locales \ 68 | sudo \ 69 | ncdu \ 70 | man-db \ 71 | strace \ 72 | manpages \ 73 | manpages-dev \ 74 | init-system-helpers" 75 | 76 | # Include libssl1.1 if available 77 | if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then 78 | package_list="${package_list} libssl1.1" 79 | fi 80 | 81 | # Include libssl3 if available 82 | if [[ ! -z $(apt-cache --names-only search ^libssl3$) ]]; then 83 | package_list="${package_list} libssl3" 84 | fi 85 | 86 | # Include appropriate version of libssl1.0.x if available 87 | local libssl_package=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '') 88 | if [ "$(echo "$libssl_package" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then 89 | if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then 90 | # Debian 9 91 | package_list="${package_list} libssl1.0.2" 92 | elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then 93 | # Ubuntu 18.04 94 | package_list="${package_list} libssl1.0.0" 95 | fi 96 | fi 97 | 98 | # Include git if not already installed (may be more recent than distro version) 99 | if ! type git > /dev/null 2>&1; then 100 | package_list="${package_list} git" 101 | fi 102 | fi 103 | 104 | # Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian 105 | if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then 106 | # Bring in variables from /etc/os-release like VERSION_CODENAME 107 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list 108 | sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list 109 | sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list 110 | sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list 111 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list 112 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list 113 | sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list 114 | sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list 115 | # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html 116 | sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list 117 | sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list 118 | echo "Running apt-get update..." 119 | package_list="${package_list} manpages-posix manpages-posix-dev" 120 | fi 121 | 122 | # Install the list of packages 123 | echo "Packages to verify are installed: ${package_list}" 124 | rm -rf /var/lib/apt/lists/* 125 | apt-get update -y 126 | apt-get -y install --no-install-recommends ${package_list} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 ) 127 | 128 | # Install zsh (and recommended packages) if needed 129 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 130 | apt-get install -y zsh 131 | fi 132 | 133 | # Get to latest versions of all packages 134 | if [ "${UPGRADE_PACKAGES}" = "true" ]; then 135 | apt-get -y upgrade --no-install-recommends 136 | apt-get autoremove -y 137 | fi 138 | 139 | # Ensure at least the en_US.UTF-8 UTF-8 locale is available = common need for both applications and things like the agnoster ZSH theme. 140 | if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then 141 | echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen 142 | locale-gen 143 | LOCALE_ALREADY_SET="true" 144 | fi 145 | 146 | PACKAGES_ALREADY_INSTALLED="true" 147 | 148 | # Clean up 149 | apt-get -y clean 150 | rm -rf /var/lib/apt/lists/* 151 | } 152 | 153 | # RedHat / RockyLinux / CentOS / Fedora packages 154 | install_redhat_packages() { 155 | local package_list="" 156 | local remove_epel="false" 157 | local install_cmd=dnf 158 | if ! type dnf > /dev/null 2>&1; then 159 | install_cmd=yum 160 | fi 161 | 162 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 163 | package_list="${package_list} \ 164 | gawk \ 165 | openssh-clients \ 166 | gnupg2 \ 167 | iproute \ 168 | procps \ 169 | lsof \ 170 | net-tools \ 171 | psmisc \ 172 | wget \ 173 | ca-certificates \ 174 | rsync \ 175 | unzip \ 176 | zip \ 177 | nano \ 178 | vim-minimal \ 179 | less \ 180 | jq \ 181 | openssl-libs \ 182 | krb5-libs \ 183 | libicu \ 184 | zlib \ 185 | sudo \ 186 | sed \ 187 | grep \ 188 | which \ 189 | man-db \ 190 | strace" 191 | 192 | # rockylinux:9 installs 'curl-minimal' which clashes with 'curl' 193 | # Install 'curl' for every OS except this rockylinux:9 194 | if [[ "${ID}" = "rocky" ]] && [[ "${VERSION}" != *"9."* ]]; then 195 | package_list="${package_list} curl" 196 | fi 197 | 198 | # Install OpenSSL 1.0 compat if needed 199 | if ${install_cmd} -q list compat-openssl10 >/dev/null 2>&1; then 200 | package_list="${package_list} compat-openssl10" 201 | fi 202 | 203 | # Install lsb_release if available 204 | if ${install_cmd} -q list redhat-lsb-core >/dev/null 2>&1; then 205 | package_list="${package_list} redhat-lsb-core" 206 | fi 207 | 208 | # Install git if not already installed (may be more recent than distro version) 209 | if ! type git > /dev/null 2>&1; then 210 | package_list="${package_list} git" 211 | fi 212 | 213 | # Install EPEL repository if needed (required to install 'jq' for CentOS) 214 | if ! ${install_cmd} -q list jq >/dev/null 2>&1; then 215 | ${install_cmd} -y install epel-release 216 | remove_epel="true" 217 | fi 218 | fi 219 | 220 | # Install zsh if needed 221 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 222 | package_list="${package_list} zsh" 223 | fi 224 | 225 | if [ -n "${package_list}" ]; then 226 | ${install_cmd} -y install ${package_list} 227 | fi 228 | 229 | # Get to latest versions of all packages 230 | if [ "${UPGRADE_PACKAGES}" = "true" ]; then 231 | ${install_cmd} upgrade -y 232 | fi 233 | 234 | if [[ "${remove_epel}" = "true" ]]; then 235 | ${install_cmd} -y remove epel-release 236 | fi 237 | 238 | PACKAGES_ALREADY_INSTALLED="true" 239 | } 240 | 241 | # Alpine Linux packages 242 | install_alpine_packages() { 243 | apk update 244 | 245 | if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then 246 | apk add --no-cache \ 247 | openssh-client \ 248 | gnupg \ 249 | procps \ 250 | lsof \ 251 | htop \ 252 | net-tools \ 253 | psmisc \ 254 | curl \ 255 | wget \ 256 | rsync \ 257 | ca-certificates \ 258 | unzip \ 259 | zip \ 260 | nano \ 261 | vim \ 262 | less \ 263 | jq \ 264 | libgcc \ 265 | libstdc++ \ 266 | krb5-libs \ 267 | libintl \ 268 | libssl1.1 \ 269 | lttng-ust \ 270 | tzdata \ 271 | userspace-rcu \ 272 | zlib \ 273 | sudo \ 274 | coreutils \ 275 | sed \ 276 | grep \ 277 | which \ 278 | ncdu \ 279 | shadow \ 280 | strace 281 | 282 | # Install man pages - package name varies between 3.12 and earlier versions 283 | if apk info man > /dev/null 2>&1; then 284 | apk add --no-cache man man-pages 285 | else 286 | apk add --no-cache mandoc man-pages 287 | fi 288 | 289 | # Install git if not already installed (may be more recent than distro version) 290 | if ! type git > /dev/null 2>&1; then 291 | apk add --no-cache git 292 | fi 293 | fi 294 | 295 | # Install zsh if needed 296 | if [ "${INSTALL_ZSH}" = "true" ] && ! type zsh > /dev/null 2>&1; then 297 | apk add --no-cache zsh 298 | fi 299 | 300 | PACKAGES_ALREADY_INSTALLED="true" 301 | } 302 | 303 | # ****************** 304 | # ** Main section ** 305 | # ****************** 306 | 307 | if [ "$(id -u)" -ne 0 ]; then 308 | echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' 309 | exit 1 310 | fi 311 | 312 | # Load markers to see which steps have already run 313 | if [ -f "${MARKER_FILE}" ]; then 314 | echo "Marker file found:" 315 | cat "${MARKER_FILE}" 316 | source "${MARKER_FILE}" 317 | fi 318 | 319 | # Ensure that login shells get the correct path if the user updated the PATH using ENV. 320 | rm -f /etc/profile.d/00-restore-env.sh 321 | echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh 322 | chmod +x /etc/profile.d/00-restore-env.sh 323 | 324 | # Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME 325 | . /etc/os-release 326 | # Get an adjusted ID independent of distro variants 327 | if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then 328 | ADJUSTED_ID="debian" 329 | elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then 330 | ADJUSTED_ID="rhel" 331 | elif [ "${ID}" = "alpine" ]; then 332 | ADJUSTED_ID="alpine" 333 | else 334 | echo "Linux distro ${ID} not supported." 335 | exit 1 336 | fi 337 | 338 | # Install packages for appropriate OS 339 | case "${ADJUSTED_ID}" in 340 | "debian") 341 | install_debian_packages 342 | ;; 343 | "rhel") 344 | install_redhat_packages 345 | ;; 346 | "alpine") 347 | install_alpine_packages 348 | ;; 349 | esac 350 | 351 | # If in automatic mode, determine if a user already exists, if not use vscode 352 | if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then 353 | if [ "${_REMOTE_USER}" != "root" ]; then 354 | USERNAME="${_REMOTE_USER}" 355 | else 356 | USERNAME="" 357 | POSSIBLE_USERS=("devcontainer" "vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") 358 | for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do 359 | if id -u ${CURRENT_USER} > /dev/null 2>&1; then 360 | USERNAME=${CURRENT_USER} 361 | break 362 | fi 363 | done 364 | if [ "${USERNAME}" = "" ]; then 365 | USERNAME=vscode 366 | fi 367 | fi 368 | elif [ "${USERNAME}" = "none" ]; then 369 | USERNAME=root 370 | USER_UID=0 371 | USER_GID=0 372 | fi 373 | # Create or update a non-root user to match UID/GID. 374 | group_name="${USERNAME}" 375 | if id -u ${USERNAME} > /dev/null 2>&1; then 376 | # User exists, update if needed 377 | if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -g $USERNAME)" ]; then 378 | group_name="$(id -gn $USERNAME)" 379 | groupmod --gid $USER_GID ${group_name} 380 | usermod --gid $USER_GID $USERNAME 381 | fi 382 | if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then 383 | usermod --uid $USER_UID $USERNAME 384 | fi 385 | else 386 | # Create user 387 | if [ "${USER_GID}" = "automatic" ]; then 388 | groupadd $USERNAME 389 | else 390 | groupadd --gid $USER_GID $USERNAME 391 | fi 392 | if [ "${USER_UID}" = "automatic" ]; then 393 | useradd -s /bin/bash --gid $USERNAME -m $USERNAME 394 | else 395 | useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME 396 | fi 397 | fi 398 | 399 | # Add add sudo support for non-root user 400 | if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then 401 | echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME 402 | chmod 0440 /etc/sudoers.d/$USERNAME 403 | EXISTING_NON_ROOT_USER="${USERNAME}" 404 | fi 405 | 406 | # ********************************* 407 | # ** Shell customization section ** 408 | # ********************************* 409 | 410 | if [ "${USERNAME}" = "root" ]; then 411 | user_home="/root" 412 | # Check if user already has a home directory other than /home/${USERNAME} 413 | elif [ "/home/${USERNAME}" != $( getent passwd $USERNAME | cut -d: -f6 ) ]; then 414 | user_home=$( getent passwd $USERNAME | cut -d: -f6 ) 415 | else 416 | user_home="/home/${USERNAME}" 417 | if [ ! -d "${user_home}" ]; then 418 | mkdir -p "${user_home}" 419 | chown ${USERNAME}:${group_name} "${user_home}" 420 | fi 421 | fi 422 | 423 | # Restore user .bashrc / .profile / .zshrc defaults from skeleton file if it doesn't exist or is empty 424 | possible_rc_files=( ".bashrc" ".profile" ) 425 | [ "$INSTALL_OH_MY_ZSH_CONFIG" == "true" ] && possible_rc_files+=('.zshrc') 426 | [ "$INSTALL_ZSH" == "true" ] && possible_rc_files+=('.zprofile') 427 | for rc_file in "${possible_rc_files[@]}"; do 428 | if [ -f "/etc/skel/${rc_file}" ]; then 429 | if [ ! -e "${user_home}/${rc_file}" ] || [ ! -s "${user_home}/${rc_file}" ]; then 430 | cp "/etc/skel/${rc_file}" "${user_home}/${rc_file}" 431 | chown ${USERNAME}:${group_name} "${user_home}/${rc_file}" 432 | fi 433 | fi 434 | done 435 | 436 | # Add RC snippet and custom bash prompt 437 | if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then 438 | case "${ADJUSTED_ID}" in 439 | "debian") 440 | global_rc_path="/etc/bash.bashrc" 441 | ;; 442 | "rhel") 443 | global_rc_path="/etc/bashrc" 444 | ;; 445 | "alpine") 446 | global_rc_path="/etc/bash/bashrc" 447 | # /etc/bash/bashrc does not exist in alpine 3.14 & 3.15 448 | mkdir -p /etc/bash 449 | ;; 450 | esac 451 | cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> ${global_rc_path} 452 | cat "${FEATURE_DIR}/scripts/bash_theme_snippet.sh" >> "${user_home}/.bashrc" 453 | if [ "${USERNAME}" != "root" ]; then 454 | cat "${FEATURE_DIR}/scripts/bash_theme_snippet.sh" >> "/root/.bashrc" 455 | chown ${USERNAME}:${group_name} "${user_home}/.bashrc" 456 | fi 457 | RC_SNIPPET_ALREADY_ADDED="true" 458 | fi 459 | 460 | # Optionally configure zsh and Oh My Zsh! 461 | if [ "${INSTALL_ZSH}" = "true" ]; then 462 | if [ ! -f "${user_home}/.zprofile" ]; then 463 | touch "${user_home}/.zprofile" 464 | echo 'source $HOME/.profile' >> "${user_home}/.zprofile" # TODO: Reconsider adding '.profile' to '.zprofile' 465 | chown ${USERNAME}:${group_name} "${user_home}/.zprofile" 466 | fi 467 | 468 | if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then 469 | if [ "${ADJUSTED_ID}" = "rhel" ]; then 470 | global_rc_path="/etc/zshrc" 471 | else 472 | global_rc_path="/etc/zsh/zshrc" 473 | fi 474 | cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> ${global_rc_path} 475 | ZSH_ALREADY_INSTALLED="true" 476 | fi 477 | 478 | if [ "${CONFIGURE_ZSH_AS_DEFAULT_SHELL}" == "true" ]; then 479 | # Fixing chsh always asking for a password on alpine linux 480 | # ref: https://askubuntu.com/questions/812420/chsh-always-asking-a-password-and-get-pam-authentication-failure. 481 | if [ ! -f "/etc/pam.d/chsh" ] || ! grep -Eq '^auth(.*)pam_rootok\.so$' /etc/pam.d/chsh; then 482 | echo "auth sufficient pam_rootok.so" >> /etc/pam.d/chsh 483 | elif [[ -n "$(awk '/^auth(.*)pam_rootok\.so$/ && !/^auth[[:blank:]]+sufficient[[:blank:]]+pam_rootok\.so$/' /etc/pam.d/chsh)" ]]; then 484 | awk '/^auth(.*)pam_rootok\.so$/ { $2 = "sufficient" } { print }' /etc/pam.d/chsh > /tmp/chsh.tmp && mv /tmp/chsh.tmp /etc/pam.d/chsh 485 | fi 486 | 487 | chsh --shell /bin/zsh ${USERNAME} 488 | fi 489 | 490 | # Adapted, simplified inline Oh My Zsh! install steps that adds, defaults to a codespaces theme. 491 | # See https://github.com/ohmyzsh/ohmyzsh/blob/master/tools/install.sh for official script. 492 | if [ "${INSTALL_OH_MY_ZSH}" = "true" ]; then 493 | user_rc_file="${user_home}/.zshrc" 494 | oh_my_install_dir="${user_home}/.oh-my-zsh" 495 | template_path="${oh_my_install_dir}/templates/zshrc.zsh-template" 496 | if [ ! -d "${oh_my_install_dir}" ]; then 497 | umask g-w,o-w 498 | mkdir -p ${oh_my_install_dir} 499 | git clone --depth=1 \ 500 | -c core.eol=lf \ 501 | -c core.autocrlf=false \ 502 | -c fsck.zeroPaddedFilemode=ignore \ 503 | -c fetch.fsck.zeroPaddedFilemode=ignore \ 504 | -c receive.fsck.zeroPaddedFilemode=ignore \ 505 | "https://github.com/ohmyzsh/ohmyzsh" "${oh_my_install_dir}" 2>&1 506 | 507 | # Shrink git while still enabling updates 508 | cd "${oh_my_install_dir}" 509 | git repack -a -d -f --depth=1 --window=1 510 | fi 511 | 512 | # Add Dev Containers theme 513 | mkdir -p ${oh_my_install_dir}/custom/themes 514 | cp -f "${FEATURE_DIR}/scripts/devcontainers.zsh-theme" "${oh_my_install_dir}/custom/themes/devcontainers.zsh-theme" 515 | ln -sf "${oh_my_install_dir}/custom/themes/devcontainers.zsh-theme" "${oh_my_install_dir}/custom/themes/codespaces.zsh-theme" 516 | 517 | # Add devcontainer .zshrc template 518 | if [ "$INSTALL_OH_MY_ZSH_CONFIG" = "true" ]; then 519 | echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file} 520 | sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="devcontainers"/g' ${user_rc_file} 521 | fi 522 | 523 | # Copy to non-root user if one is specified 524 | if [ "${USERNAME}" != "root" ]; then 525 | copy_to_user_files=("${oh_my_install_dir}") 526 | [ -f "$user_rc_file" ] && copy_to_user_files+=("$user_rc_file") 527 | cp -rf "${copy_to_user_files[@]}" /root 528 | chown -R ${USERNAME}:${group_name} "${copy_to_user_files[@]}" 529 | fi 530 | fi 531 | fi 532 | 533 | # ********************************* 534 | # ** Ensure config directory ** 535 | # ********************************* 536 | user_config_dir="${user_home}/.config" 537 | if [ ! -d "${user_config_dir}" ]; then 538 | mkdir -p "${user_config_dir}" 539 | chown ${USERNAME}:${group_name} "${user_config_dir}" 540 | fi 541 | 542 | # **************************** 543 | # ** Utilities and commands ** 544 | # **************************** 545 | 546 | # code shim, it fallbacks to code-insiders if code is not available 547 | cp -f "${FEATURE_DIR}/bin/code" /usr/local/bin/ 548 | chmod +rx /usr/local/bin/code 549 | 550 | # systemctl shim for Debian/Ubuntu - tells people to use 'service' if systemd is not running 551 | if [ "${ADJUSTED_ID}" = "debian" ]; then 552 | cp -f "${FEATURE_DIR}/bin/systemctl" /usr/local/bin/systemctl 553 | chmod +rx /usr/local/bin/systemctl 554 | fi 555 | 556 | # Persist image metadata info, script if meta.env found in same directory 557 | if [ -f "/usr/local/etc/vscode-dev-containers/meta.env" ] || [ -f "/usr/local/etc/dev-containers/meta.env" ]; then 558 | cp -f "${FEATURE_DIR}/bin/devcontainer-info" /usr/local/bin/devcontainer-info 559 | chmod +rx /usr/local/bin/devcontainer-info 560 | fi 561 | 562 | # Write marker file 563 | if [ ! -d "/usr/local/etc/vscode-dev-containers" ]; then 564 | mkdir -p "$(dirname "${MARKER_FILE}")" 565 | fi 566 | echo -e "\ 567 | PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\ 568 | LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\ 569 | EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\ 570 | RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\ 571 | ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}" 572 | 573 | echo "Done!" 574 | -------------------------------------------------------------------------------- /docker/vendored/features/src/common-utils/no-lsb-release.patch: -------------------------------------------------------------------------------- 1 | diff --git a/docker/vendored/features/src/common-utils/main.sh b/docker/vendored/features/src/common-utils/main.sh 2 | index 26f0a75..33c302f 100755 3 | --- a/docker/vendored/features/src/common-utils/main.sh 4 | +++ b/docker/vendored/features/src/common-utils/main.sh 5 | @@ -53,7 +53,6 @@ install_debian_packages() { 6 | vim-tiny \ 7 | less \ 8 | jq \ 9 | - lsb-release \ 10 | apt-transport-https \ 11 | dialog \ 12 | libc6 \ 13 | -------------------------------------------------------------------------------- /readme-images/devcontainer-extension-notifications.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mamba-org/micromamba-devcontainer/48a849f2f3a2cb75a1a878e308613cac932c138a/readme-images/devcontainer-extension-notifications.png -------------------------------------------------------------------------------- /readme-images/loaded-devcontainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mamba-org/micromamba-devcontainer/48a849f2f3a2cb75a1a878e308613cac932c138a/readme-images/loaded-devcontainer.png -------------------------------------------------------------------------------- /readme-images/reopen-in-devcontainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mamba-org/micromamba-devcontainer/48a849f2f3a2cb75a1a878e308613cac932c138a/readme-images/reopen-in-devcontainer.png -------------------------------------------------------------------------------- /scripts/update_base_image.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import json 4 | import os 5 | from pathlib import Path 6 | from typing import NamedTuple 7 | import functools 8 | 9 | from requests import get 10 | 11 | DOCKERFILE_PATH = Path() / "docker" / "Dockerfile" 12 | BASE_IMAGES_JSON = Path() / "base-images.json" 13 | BASE_IMAGE_PREFIX = "ARG BASE_IMAGE=mambaorg/micromamba" 14 | REGISTRY_QUERY_URL = ( 15 | "https://hub.docker.com/v2/repositories/mambaorg/micromamba/tags?page_size=200" 16 | ) 17 | GITHUB_ENV = Path(os.environ["GITHUB_ENV"]) if "GITHUB_ENV" in os.environ else None 18 | 19 | 20 | class DockerImageTag(NamedTuple): 21 | """Represents a Docker image tag in the following format: 22 | 23 | DockerImageTag( 24 | repository='mambaorg/micromamba', 25 | git_tag='c160e88', 26 | distro='jammy', 27 | digest='sha256:e3a59f560211ded26e65afafafd20eafc31bad2745db9a2932e39574847a7159' 28 | ) 29 | """ 30 | 31 | repository: str 32 | git_tag: str 33 | distro: str 34 | digest: str 35 | 36 | def to_str(self) -> str: 37 | """mambaorg/micromamba:git-515d637-jammy@sha256:f53e550...""" 38 | return f"{self.repository}:git-{self.git_tag}-{self.distro}@{self.digest}" 39 | 40 | @staticmethod 41 | def parse(s: str) -> DockerImageTag: 42 | repository, tag_digest = s.split(":", 1) 43 | tag, digest = tag_digest.split("@", 1) 44 | 45 | if tag.startswith("git-"): 46 | parts = tag.split("-", 2) 47 | git_tag = parts[1] 48 | distro = parts[2] if len(parts) > 2 else "" 49 | else: 50 | git_tag = "" 51 | distro = tag 52 | 53 | return DockerImageTag(repository, git_tag, distro, digest) 54 | 55 | 56 | def get_existing_base_images() -> dict[str, DockerImageTag]: 57 | if not BASE_IMAGES_JSON.exists(): 58 | raise ValueError("Base images file not found") 59 | raw_data = json.loads(BASE_IMAGES_JSON.read_text()) 60 | return {key: DockerImageTag.parse(value) for key, value in raw_data.items()} 61 | 62 | 63 | def update_base_images_json(new_base_images: dict[str, str]) -> None: 64 | if not BASE_IMAGES_JSON.exists(): 65 | raise ValueError("Base images file not found") 66 | BASE_IMAGES_JSON.write_text(json.dumps(new_base_images, indent=4) + "\n") 67 | 68 | 69 | def parse_dockerfile() -> tuple[DockerImageTag, int, list[str]]: 70 | if not DOCKERFILE_PATH.exists(): 71 | raise ValueError("Dockerfile not found") 72 | 73 | lines = DOCKERFILE_PATH.read_text().splitlines() 74 | for line_number, line in enumerate(lines): 75 | if line.startswith(BASE_IMAGE_PREFIX): 76 | tag_string = line.split("=", 1)[1] 77 | return DockerImageTag.parse(tag_string), line_number, lines 78 | 79 | raise ValueError("Base image line not found in Dockerfile") 80 | 81 | 82 | @functools.cache 83 | def get_registry_results() -> list[dict]: 84 | """Image metadata from the registry.""" 85 | response = get(REGISTRY_QUERY_URL) 86 | response.raise_for_status() 87 | return response.json()["results"] 88 | 89 | 90 | def fetch_new_image_info( 91 | image_tag: DockerImageTag, 92 | starts_with="git-", 93 | ) -> DockerImageTag: 94 | """Return the first result from the registry that ends with the distro name.""" 95 | results = get_registry_results() 96 | 97 | for result in results: 98 | tag = result["name"] 99 | if tag.startswith(starts_with) and tag.endswith(image_tag.distro): 100 | new_git_tag = tag.split("-", maxsplit=2)[1] 101 | new_digest = result["digest"] 102 | return DockerImageTag( 103 | repository=image_tag.repository, 104 | git_tag=new_git_tag, 105 | distro=image_tag.distro, 106 | digest=new_digest, 107 | ) 108 | 109 | # Return the original DockerImageTag if no update is found 110 | return image_tag 111 | 112 | 113 | def update_dockerfile( 114 | lines: list[str], line_number: int, image_tag: DockerImageTag 115 | ) -> str: 116 | new_docker_tag = f"git-{image_tag.git_tag}-{image_tag.distro}" 117 | replacement_line = f"{BASE_IMAGE_PREFIX}:{new_docker_tag}@{image_tag.digest}" 118 | lines[line_number] = replacement_line 119 | 120 | DOCKERFILE_PATH.write_text("\n".join(lines) + "\n") 121 | return new_docker_tag 122 | 123 | 124 | def main(): 125 | print("Updating Dockerfile...") 126 | current_image_tag, line_number, lines = parse_dockerfile() 127 | print(f"Base image tag: {current_image_tag.git_tag}") 128 | print(f"Base image distro: {current_image_tag.distro}") 129 | print(f"Base image digest: {current_image_tag.digest}") 130 | 131 | updated_image_tag = fetch_new_image_info(current_image_tag) 132 | 133 | if updated_image_tag == current_image_tag: 134 | print("No update needed for Dockerfile.") 135 | else: 136 | new_docker_tag = update_dockerfile(lines, line_number, updated_image_tag) 137 | if GITHUB_ENV: 138 | with open(GITHUB_ENV, "a") as f: 139 | f.write(f"NEW_DOCKER_TAG={new_docker_tag}\n") 140 | print(f"✅ Update successful: {new_docker_tag}") 141 | 142 | print("Updating workflow file...") 143 | existing_base_images = get_existing_base_images() 144 | updated_base_images = { 145 | name: fetch_new_image_info(image).to_str() 146 | for name, image in existing_base_images.items() 147 | } 148 | update_base_images_json(updated_base_images) 149 | print("✅ Workflow file updated successfully.") 150 | 151 | 152 | if __name__ == "__main__": 153 | main() 154 | -------------------------------------------------------------------------------- /scripts/update_base_image_reqs.txt: -------------------------------------------------------------------------------- 1 | requests==2.28.2 2 | --------------------------------------------------------------------------------