├── .dockerignore ├── .editorconfig ├── .gitattributes ├── .github ├── CONTRIBUTING.md ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── issue.bug.yml │ └── issue.feature.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── call_issue_pr_tracker.yml │ ├── call_issues_cron.yml │ ├── external_trigger.yml │ ├── external_trigger_scheduler.yml │ ├── greetings.yml │ ├── package_trigger_scheduler.yml │ └── permissions.yml ├── .gitignore ├── Dockerfile ├── Dockerfile.aarch64 ├── Dockerfile.riscv64 ├── Jenkinsfile ├── LICENSE ├── README.md ├── jenkins-vars.yml ├── package_versions.txt ├── readme-vars.yml └── root └── etc └── s6-overlay └── s6-rc.d ├── ci-service-check ├── dependencies.d │ └── legacy-services ├── type └── up ├── init-adduser ├── branding ├── dependencies.d │ └── init-migrations ├── run ├── type └── up ├── init-config-end ├── dependencies.d │ ├── init-config │ └── init-crontab-config ├── type └── up ├── init-config ├── dependencies.d │ └── init-os-end ├── type └── up ├── init-crontab-config ├── dependencies.d │ └── init-config ├── run ├── type └── up ├── init-custom-files ├── dependencies.d │ └── init-mods-end ├── run ├── type └── up ├── init-device-perms ├── dependencies.d │ └── init-adduser ├── run ├── type └── up ├── init-envfile ├── run ├── type └── up ├── init-migrations ├── run ├── type └── up ├── init-mods-end ├── dependencies.d │ └── init-mods-package-install ├── type └── up ├── init-mods-package-install ├── dependencies.d │ └── init-mods ├── type └── up ├── init-mods ├── dependencies.d │ └── init-config-end ├── type └── up ├── init-os-end ├── dependencies.d │ ├── init-adduser │ ├── init-device-perms │ └── init-envfile ├── type └── up ├── init-services ├── dependencies.d │ └── init-custom-files ├── type └── up ├── svc-cron ├── dependencies.d │ └── init-services ├── run └── type ├── user └── contents.d │ ├── init-adduser │ ├── init-config │ ├── init-config-end │ ├── init-crontab-config │ ├── init-custom-files │ ├── init-device-perms │ ├── init-envfile │ ├── init-migrations │ ├── init-mods │ ├── init-mods-end │ ├── init-mods-package-install │ ├── init-os-end │ ├── init-services │ └── svc-cron └── user2 └── contents.d └── ci-service-check /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | .github 4 | .gitattributes 5 | READMETEMPLATE.md 6 | README.md 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # This file is globally distributed to all container image projects from 2 | # https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig 3 | 4 | # top-most EditorConfig file 5 | root = true 6 | 7 | # Unix-style newlines with a newline ending every file 8 | [*] 9 | end_of_line = lf 10 | insert_final_newline = true 11 | # trim_trailing_whitespace may cause unintended issues and should not be globally set true 12 | trim_trailing_whitespace = false 13 | 14 | [{Dockerfile*,**.yml}] 15 | indent_style = space 16 | indent_size = 2 17 | 18 | [{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}] 19 | indent_style = space 20 | indent_size = 4 21 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to baseimage-alpine 2 | 3 | ## Gotchas 4 | 5 | * While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open. 6 | * Read, and fill the Pull Request template 7 | * If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR 8 | * If the PR is addressing an existing issue include, closes #\, in the body of the PR commit message 9 | * If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://linuxserver.io/discord) 10 | 11 | ## Common files 12 | 13 | | File | Use case | 14 | | :----: | --- | 15 | | `Dockerfile` | Dockerfile used to build amd64 images | 16 | | `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures | 17 | | `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures | 18 | | `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image | 19 | | `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process | 20 | | `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions | 21 | | `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries | 22 | | `readme-vars.yml` | This file is used to generate the `README.md` | 23 | 24 | ## Readme 25 | 26 | If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit. 27 | Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-baseimage-alpine/edit/master/readme-vars.yml). 28 | 29 | These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play. 30 | Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io) 31 | 32 | ### Fixing typos or clarify the text in the readme 33 | 34 | There are variables for multiple parts of the readme, the most common ones are: 35 | 36 | | Variable | Description | 37 | | :----: | --- | 38 | | `project_blurb` | This is the short excerpt shown above the project logo. | 39 | | `app_setup_block` | This is the text that shows up under "Application Setup" if enabled | 40 | 41 | ### Parameters 42 | 43 | The compose and run examples are also generated from these variables. 44 | 45 | We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder. 46 | 47 | These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`. 48 | Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file. 49 | 50 | Devices, environment variables, ports and volumes expects its variables in a certain way. 51 | 52 | ### Devices 53 | 54 | ```yml 55 | param_devices: 56 | - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } 57 | opt_param_devices: 58 | - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } 59 | ``` 60 | 61 | ### Environment variables 62 | 63 | ```yml 64 | param_env_vars: 65 | - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } 66 | opt_param_env_vars: 67 | - { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." } 68 | ``` 69 | 70 | ### Ports 71 | 72 | ```yml 73 | param_ports: 74 | - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } 75 | opt_param_ports: 76 | - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } 77 | ``` 78 | 79 | ### Volumes 80 | 81 | ```yml 82 | param_volumes: 83 | - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } 84 | opt_param_volumes: 85 | - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } 86 | ``` 87 | 88 | ### Testing template changes 89 | 90 | After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR. 91 | 92 | ## Dockerfiles 93 | 94 | We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work. 95 | If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order. 96 | 97 | ### Testing your changes 98 | 99 | ```bash 100 | git clone https://github.com/linuxserver/docker-baseimage-alpine.git 101 | cd docker-baseimage-alpine 102 | docker build \ 103 | --no-cache \ 104 | --pull \ 105 | -t linuxserver/baseimage-alpine:latest . 106 | ``` 107 | 108 | The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` 109 | 110 | ```bash 111 | docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset 112 | ``` 113 | 114 | Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. 115 | 116 | ## Update the changelog 117 | 118 | If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-baseimage-alpine/tree/master/root), add an entry to the changelog 119 | 120 | ```yml 121 | changelogs: 122 | - { date: "DD.MM.YY:", desc: "Added some love to templates" } 123 | ``` 124 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: linuxserver 2 | open_collective: linuxserver 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Discord chat support 4 | url: https://linuxserver.io/discord 5 | about: Realtime support / chat with the community and the team. 6 | 7 | - name: Discourse discussion forum 8 | url: https://discourse.linuxserver.io 9 | about: Post on our community forum. 10 | 11 | - name: Documentation 12 | url: https://docs.linuxserver.io 13 | about: Documentation - information about all of our containers. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.bug.yml: -------------------------------------------------------------------------------- 1 | # Based on the issue template 2 | name: Bug report 3 | description: Create a report to help us improve 4 | title: "[BUG] " 5 | labels: [Bug] 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is there an existing issue for this? 10 | description: Please search to see if an issue already exists for the bug you encountered. 11 | options: 12 | - label: I have searched the existing issues 13 | required: true 14 | - type: textarea 15 | attributes: 16 | label: Current Behavior 17 | description: Tell us what happens instead of the expected behavior. 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: Expected Behavior 23 | description: Tell us what should happen. 24 | validations: 25 | required: false 26 | - type: textarea 27 | attributes: 28 | label: Steps To Reproduce 29 | description: Steps to reproduce the behavior. 30 | placeholder: | 31 | 1. In this environment... 32 | 2. With this config... 33 | 3. Run '...' 34 | 4. See error... 35 | validations: 36 | required: true 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | examples: 42 | - **OS**: Ubuntu 20.04 43 | - **How docker service was installed**: distro's packagemanager 44 | value: | 45 | - OS: 46 | - How docker service was installed: 47 | render: markdown 48 | validations: 49 | required: false 50 | - type: textarea 51 | attributes: 52 | label: Docker creation 53 | description: | 54 | Command used to create docker container 55 | Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container 56 | render: bash 57 | validations: 58 | required: true 59 | - type: textarea 60 | attributes: 61 | description: | 62 | Provide a full docker log, output of "docker logs baseimage-alpine" 63 | label: Container logs 64 | placeholder: | 65 | Output of `docker logs baseimage-alpine` 66 | render: bash 67 | validations: 68 | required: true 69 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.feature.yml: -------------------------------------------------------------------------------- 1 | # Based on the issue template 2 | name: Feature request 3 | description: Suggest an idea for this project 4 | title: "[FEAT] <title>" 5 | labels: [enhancement] 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is this a new feature request? 10 | description: Please search to see if a feature request already exists. 11 | options: 12 | - label: I have searched the existing issues 13 | required: true 14 | - type: textarea 15 | attributes: 16 | label: Wanted change 17 | description: Tell us what you want to happen. 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: Reason for change 23 | description: Justify your request, why do you want it, what is the benefit. 24 | validations: 25 | required: true 26 | - type: textarea 27 | attributes: 28 | label: Proposed code change 29 | description: Do you have a potential code change in mind? 30 | validations: 31 | required: false 32 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | <!--- Provide a general summary of your changes in the Title above --> 2 | 3 | [linuxserverurl]: https://linuxserver.io 4 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] 5 | 6 | 7 | <!--- Before submitting a pull request please check the following --> 8 | 9 | <!--- If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR --> 10 | <!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ --> 11 | <!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message --> 12 | <!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message --> 13 | <!--- We maintain a changelog of major revisions to the container at the end of readme-vars.yml in the root of this repository, please add your changes there if appropriate --> 14 | 15 | 16 | <!--- Coding guidelines: --> 17 | <!--- 1. Installed packages in the Dockerfiles should be in alphabetical order --> 18 | <!--- 2. Changes to Dockerfile should be replicated in Dockerfile.armhf and Dockerfile.aarch64 if applicable --> 19 | <!--- 3. Indentation style (tabs vs 4 spaces vs 1 space) should match the rest of the document --> 20 | <!--- 4. Readme is auto generated from readme-vars.yml, make your changes there --> 21 | 22 | ------------------------------ 23 | 24 | - [ ] I have read the [contributing](https://github.com/linuxserver/docker-baseimage-alpine/blob/master/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications 25 | 26 | ------------------------------ 27 | 28 | <!--- We welcome all PR’s though this doesn’t guarantee it will be accepted. --> 29 | 30 | ## Description: 31 | <!--- Describe your changes in detail --> 32 | 33 | ## Benefits of this PR and context: 34 | <!--- Please explain why we should accept this PR. If this fixes an outstanding bug, please reference the issue # --> 35 | 36 | ## How Has This Been Tested? 37 | <!--- Please describe in detail how you tested your changes. --> 38 | <!--- Include details of your testing environment, and the tests you ran to --> 39 | <!--- see how your change affects other areas of the code, etc. --> 40 | 41 | 42 | ## Source / References: 43 | <!--- Please include any forum posts/github links relevant to the PR --> 44 | -------------------------------------------------------------------------------- /.github/workflows/call_issue_pr_tracker.yml: -------------------------------------------------------------------------------- 1 | name: Issue & PR Tracker 2 | 3 | on: 4 | issues: 5 | types: [opened,reopened,labeled,unlabeled,closed] 6 | pull_request_target: 7 | types: [opened,reopened,review_requested,review_request_removed,labeled,unlabeled,closed] 8 | pull_request_review: 9 | types: [submitted,edited,dismissed] 10 | 11 | jobs: 12 | manage-project: 13 | permissions: 14 | issues: write 15 | uses: linuxserver/github-workflows/.github/workflows/issue-pr-tracker.yml@v1 16 | secrets: inherit 17 | -------------------------------------------------------------------------------- /.github/workflows/call_issues_cron.yml: -------------------------------------------------------------------------------- 1 | name: Mark stale issues and pull requests 2 | on: 3 | schedule: 4 | - cron: '9 11 * * *' 5 | workflow_dispatch: 6 | 7 | jobs: 8 | stale: 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | uses: linuxserver/github-workflows/.github/workflows/issues-cron.yml@v1 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /.github/workflows/external_trigger.yml: -------------------------------------------------------------------------------- 1 | name: External Trigger Main 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | external-trigger-master: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4.1.1 11 | 12 | - name: External Trigger 13 | if: github.ref == 'refs/heads/master' 14 | env: 15 | SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }} 16 | run: | 17 | printf "# External trigger for docker-baseimage-alpine\n\n" >> $GITHUB_STEP_SUMMARY 18 | echo "Type is \`os\`" >> $GITHUB_STEP_SUMMARY 19 | echo "No external release, exiting" >> $GITHUB_STEP_SUMMARY 20 | exit 0 21 | if grep -q "^baseimage-alpine_master_${EXT_RELEASE}" <<< "${SKIP_EXTERNAL_TRIGGER}"; then 22 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 23 | echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` matches current external release; skipping trigger." >> $GITHUB_STEP_SUMMARY 24 | exit 0 25 | fi 26 | -------------------------------------------------------------------------------- /.github/workflows/external_trigger_scheduler.yml: -------------------------------------------------------------------------------- 1 | name: External Trigger Scheduler 2 | 3 | on: 4 | schedule: 5 | - cron: '20 * * * *' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | external-trigger-scheduler: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4.1.1 13 | with: 14 | fetch-depth: '0' 15 | 16 | - name: External Trigger Scheduler 17 | run: | 18 | printf "# External trigger scheduler for docker-baseimage-alpine\n\n" >> $GITHUB_STEP_SUMMARY 19 | printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY 20 | for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) 21 | do 22 | if [[ "${br}" == "HEAD" ]]; then 23 | printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY 24 | continue 25 | fi 26 | printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY 27 | ls_jenkins_vars=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/${br}/jenkins-vars.yml) 28 | ls_branch=$(echo "${ls_jenkins_vars}" | yq -r '.ls_branch') 29 | ls_trigger=$(echo "${ls_jenkins_vars}" | yq -r '.external_type') 30 | if [[ "${br}" == "${ls_branch}" ]] && [[ "${ls_trigger}" != "os" ]]; then 31 | echo "Branch appears to be live and trigger is not os; checking workflow." >> $GITHUB_STEP_SUMMARY 32 | if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then 33 | echo "Triggering external trigger workflow for branch." >> $GITHUB_STEP_SUMMARY 34 | curl -iX POST \ 35 | -H "Authorization: token ${{ secrets.CR_PAT }}" \ 36 | -H "Accept: application/vnd.github.v3+json" \ 37 | -d "{\"ref\":\"refs/heads/${br}\"}" \ 38 | https://api.github.com/repos/linuxserver/docker-baseimage-alpine/actions/workflows/external_trigger.yml/dispatches 39 | else 40 | echo "Skipping branch due to no external trigger workflow present." >> $GITHUB_STEP_SUMMARY 41 | fi 42 | else 43 | echo "Skipping branch due to being detected as dev branch or having no external version." >> $GITHUB_STEP_SUMMARY 44 | fi 45 | done 46 | -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/first-interaction@v1 10 | with: 11 | issue-message: 'Thanks for opening your first issue here! Be sure to follow the relevant issue templates, or risk having this issue marked as invalid.' 12 | pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-baseimage-alpine/blob/master/.github/PULL_REQUEST_TEMPLATE.md)!' 13 | repo-token: ${{ secrets.GITHUB_TOKEN }} 14 | -------------------------------------------------------------------------------- /.github/workflows/package_trigger_scheduler.yml: -------------------------------------------------------------------------------- 1 | name: Package Trigger Scheduler 2 | 3 | on: 4 | schedule: 5 | - cron: '15 13 * * 6' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | package-trigger-scheduler: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4.1.1 13 | with: 14 | fetch-depth: '0' 15 | 16 | - name: Package Trigger Scheduler 17 | env: 18 | SKIP_PACKAGE_TRIGGER: ${{ vars.SKIP_PACKAGE_TRIGGER }} 19 | run: | 20 | printf "# Package trigger scheduler for docker-baseimage-alpine\n\n" >> $GITHUB_STEP_SUMMARY 21 | printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY 22 | for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) 23 | do 24 | if [[ "${br}" == "HEAD" ]]; then 25 | printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY 26 | continue 27 | fi 28 | printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY 29 | JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/${br}/jenkins-vars.yml) 30 | if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/${br}/Jenkinsfile >/dev/null 2>&1; then 31 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 32 | echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY 33 | skipped_branches="${skipped_branches}${br} " 34 | elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then 35 | echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY 36 | README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/${br}/readme-vars.yml) 37 | if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then 38 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 39 | echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY 40 | skipped_branches="${skipped_branches}${br} " 41 | elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then 42 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 43 | echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY 44 | skipped_branches="${skipped_branches}${br} " 45 | elif grep -q "^baseimage-alpine_${br}" <<< "${SKIP_PACKAGE_TRIGGER}"; then 46 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 47 | echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`baseimage-alpine_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY 48 | skipped_branches="${skipped_branches}${br} " 49 | elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-baseimage-alpine/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then 50 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 51 | echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY 52 | skipped_branches="${skipped_branches}${br} " 53 | else 54 | echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY 55 | echo "> Triggering package trigger for branch ${br}" >> $GITHUB_STEP_SUMMARY 56 | printf "> To disable, add \`baseimage-alpine_%s\` into the Github organizational variable \`SKIP_PACKAGE_TRIGGER\`.\n\n" "${br}" >> $GITHUB_STEP_SUMMARY 57 | triggered_branches="${triggered_branches}${br} " 58 | response=$(curl -iX POST \ 59 | https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-baseimage-alpine/job/${br}/buildWithParameters?PACKAGE_CHECK=true \ 60 | --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") 61 | if [[ -z "${response}" ]]; then 62 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 63 | echo "> Jenkins build could not be triggered. Skipping branch." 64 | continue 65 | fi 66 | echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY 67 | echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY 68 | sleep 10 69 | buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') 70 | buildurl="${buildurl%$'\r'}" 71 | echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY 72 | echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY 73 | if ! curl -ifX POST \ 74 | "${buildurl}submitDescription" \ 75 | --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ 76 | --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ 77 | --data-urlencode "Submit=Submit"; then 78 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 79 | echo "> Unable to change the Jenkins job description." 80 | fi 81 | sleep 20 82 | fi 83 | else 84 | echo "Skipping branch ${br} due to being detected as dev branch." >> $GITHUB_STEP_SUMMARY 85 | fi 86 | done 87 | if [[ -n "${triggered_branches}" ]] || [[ -n "${skipped_branches}" ]]; then 88 | if [[ -n "${triggered_branches}" ]]; then 89 | NOTIFY_BRANCHES="**Triggered:** ${triggered_branches} \n" 90 | NOTIFY_BUILD_URL="**Build URL:** https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-baseimage-alpine/activity/ \n" 91 | echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****" 92 | fi 93 | if [[ -n "${skipped_branches}" ]]; then 94 | NOTIFY_BRANCHES="${NOTIFY_BRANCHES}**Skipped:** ${skipped_branches} \n" 95 | fi 96 | echo "**** Notifying Discord ****" 97 | curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, 98 | "description": "**Package Check Build(s) for baseimage-alpine** \n'"${NOTIFY_BRANCHES}"''"${NOTIFY_BUILD_URL}"'"}], 99 | "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} 100 | fi 101 | -------------------------------------------------------------------------------- /.github/workflows/permissions.yml: -------------------------------------------------------------------------------- 1 | name: Permission check 2 | on: 3 | pull_request_target: 4 | paths: 5 | - '**/run' 6 | - '**/finish' 7 | - '**/check' 8 | - 'root/migrations/*' 9 | 10 | jobs: 11 | permission_check: 12 | uses: linuxserver/github-workflows/.github/workflows/init-svc-executable-permissions.yml@v1 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows Installer files 12 | *.cab 13 | *.msi 14 | *.msm 15 | *.msp 16 | 17 | # Windows shortcuts 18 | *.lnk 19 | 20 | # ========================= 21 | # Operating System Files 22 | # ========================= 23 | 24 | # OSX 25 | # ========================= 26 | 27 | .DS_Store 28 | .AppleDouble 29 | .LSOverride 30 | 31 | # Thumbnails 32 | ._* 33 | 34 | # Files that might appear on external disk 35 | .Spotlight-V100 36 | .Trashes 37 | 38 | # Directories potentially created on remote AFP share 39 | .AppleDB 40 | .AppleDesktop 41 | Network Trash Folder 42 | Temporary Items 43 | .apdisk 44 | .jenkins-external 45 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3.21 AS rootfs-stage 4 | 5 | ARG S6_OVERLAY_VERSION="3.2.1.0" 6 | ARG ROOTFS=/root-out 7 | ARG REL=v3.22 8 | ARG ARCH=x86_64 9 | ARG MIRROR=http://dl-cdn.alpinelinux.org/alpine 10 | ARG PACKAGES=alpine-baselayout,\ 11 | alpine-keys,\ 12 | apk-tools,\ 13 | busybox,\ 14 | libc-utils 15 | 16 | # install packages 17 | RUN \ 18 | apk add --no-cache \ 19 | bash \ 20 | xz 21 | 22 | # build rootfs 23 | RUN \ 24 | mkdir -p "${ROOTFS}/etc/apk" && \ 25 | { \ 26 | echo "${MIRROR}/${REL}/main"; \ 27 | echo "${MIRROR}/${REL}/community"; \ 28 | } > "${ROOTFS}/etc/apk/repositories" && \ 29 | apk --root "${ROOTFS}" --no-cache --keys-dir /etc/apk/keys add --arch ${ARCH} --initdb ${PACKAGES//,/ } && \ 30 | sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow 31 | 32 | # add s6 overlay 33 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 34 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz 35 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${ARCH}.tar.xz /tmp 36 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${ARCH}.tar.xz 37 | 38 | # add s6 optional symlinks 39 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 40 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv 41 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 42 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 43 | 44 | # Runtime stage 45 | FROM scratch 46 | COPY --from=rootfs-stage /root-out/ / 47 | ARG BUILD_DATE 48 | ARG VERSION 49 | ARG MODS_VERSION="v3" 50 | ARG PKG_INST_VERSION="v1" 51 | ARG LSIOWN_VERSION="v1" 52 | ARG WITHCONTENV_VERSION="v1" 53 | LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" 54 | LABEL maintainer="TheLamer" 55 | 56 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" 57 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" 58 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" 59 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" 60 | 61 | # environment variables 62 | ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ 63 | HOME="/root" \ 64 | TERM="xterm" \ 65 | S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ 66 | S6_VERBOSITY=1 \ 67 | S6_STAGE2_HOOK=/docker-mods \ 68 | VIRTUAL_ENV=/lsiopy \ 69 | PATH="/lsiopy/bin:$PATH" 70 | 71 | RUN \ 72 | echo "**** install runtime packages ****" && \ 73 | apk add --no-cache \ 74 | alpine-release \ 75 | bash \ 76 | ca-certificates \ 77 | catatonit \ 78 | coreutils \ 79 | curl \ 80 | findutils \ 81 | jq \ 82 | netcat-openbsd \ 83 | procps-ng \ 84 | shadow \ 85 | tzdata && \ 86 | echo "**** create abc user and make our folders ****" && \ 87 | groupmod -g 1000 users && \ 88 | useradd -u 911 -U -d /config -s /bin/false abc && \ 89 | usermod -G users abc && \ 90 | mkdir -p \ 91 | /app \ 92 | /config \ 93 | /defaults \ 94 | /lsiopy && \ 95 | echo "**** cleanup ****" && \ 96 | rm -rf \ 97 | /tmp/* 98 | 99 | # add local files 100 | COPY root/ / 101 | 102 | ENTRYPOINT ["/init"] 103 | -------------------------------------------------------------------------------- /Dockerfile.aarch64: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3.21 AS rootfs-stage 4 | 5 | ARG S6_OVERLAY_VERSION="3.2.1.0" 6 | ARG ROOTFS=/root-out 7 | ARG REL=v3.22 8 | ARG ARCH=aarch64 9 | ARG MIRROR=http://dl-cdn.alpinelinux.org/alpine 10 | ARG PACKAGES=alpine-baselayout,\ 11 | alpine-keys,\ 12 | apk-tools,\ 13 | busybox,\ 14 | libc-utils 15 | 16 | # install packages 17 | RUN \ 18 | apk add --no-cache \ 19 | bash \ 20 | xz 21 | 22 | # build rootfs 23 | RUN \ 24 | mkdir -p "${ROOTFS}/etc/apk" && \ 25 | { \ 26 | echo "${MIRROR}/${REL}/main"; \ 27 | echo "${MIRROR}/${REL}/community"; \ 28 | } > "${ROOTFS}/etc/apk/repositories" && \ 29 | apk --root "${ROOTFS}" --no-cache --keys-dir /etc/apk/keys add --arch ${ARCH} --initdb ${PACKAGES//,/ } && \ 30 | sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow 31 | 32 | # add s6 overlay 33 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 34 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz 35 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${ARCH}.tar.xz /tmp 36 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${ARCH}.tar.xz 37 | 38 | # add s6 optional symlinks 39 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 40 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv 41 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 42 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 43 | 44 | # Runtime stage 45 | FROM scratch 46 | COPY --from=rootfs-stage /root-out/ / 47 | ARG BUILD_DATE 48 | ARG VERSION 49 | ARG MODS_VERSION="v3" 50 | ARG PKG_INST_VERSION="v1" 51 | ARG LSIOWN_VERSION="v1" 52 | ARG WITHCONTENV_VERSION="v1" 53 | LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" 54 | LABEL maintainer="TheLamer" 55 | 56 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" 57 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" 58 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" 59 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" 60 | 61 | # environment variables 62 | ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ 63 | HOME="/root" \ 64 | TERM="xterm" \ 65 | S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ 66 | S6_VERBOSITY=1 \ 67 | S6_STAGE2_HOOK=/docker-mods \ 68 | VIRTUAL_ENV=/lsiopy \ 69 | PATH="/lsiopy/bin:$PATH" 70 | 71 | RUN \ 72 | echo "**** install runtime packages ****" && \ 73 | apk add --no-cache \ 74 | alpine-release \ 75 | bash \ 76 | ca-certificates \ 77 | catatonit \ 78 | coreutils \ 79 | curl \ 80 | findutils \ 81 | jq \ 82 | netcat-openbsd \ 83 | procps-ng \ 84 | shadow \ 85 | tzdata && \ 86 | echo "**** create abc user and make our folders ****" && \ 87 | groupmod -g 1000 users && \ 88 | useradd -u 911 -U -d /config -s /bin/false abc && \ 89 | usermod -G users abc && \ 90 | mkdir -p \ 91 | /app \ 92 | /config \ 93 | /defaults \ 94 | /lsiopy && \ 95 | echo "**** cleanup ****" && \ 96 | rm -rf \ 97 | /tmp/* 98 | 99 | # add local files 100 | COPY root/ / 101 | 102 | ENTRYPOINT ["/init"] 103 | -------------------------------------------------------------------------------- /Dockerfile.riscv64: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3.21 AS rootfs-stage 4 | 5 | ARG S6_OVERLAY_VERSION="3.2.1.0" 6 | ARG ROOTFS=/root-out 7 | ARG REL=v3.22 8 | ARG ARCH=riscv64 9 | ARG MIRROR=http://dl-cdn.alpinelinux.org/alpine 10 | ARG PACKAGES=alpine-baselayout,\ 11 | alpine-keys,\ 12 | apk-tools,\ 13 | busybox,\ 14 | libc-utils 15 | 16 | # install packages 17 | RUN \ 18 | apk add --no-cache \ 19 | bash \ 20 | xz 21 | 22 | # build rootfs 23 | RUN \ 24 | mkdir -p "${ROOTFS}/etc/apk" && \ 25 | { \ 26 | echo "${MIRROR}/${REL}/main"; \ 27 | echo "${MIRROR}/${REL}/community"; \ 28 | } > "${ROOTFS}/etc/apk/repositories" && \ 29 | apk --root "${ROOTFS}" --no-cache --keys-dir /etc/apk/keys add --arch ${ARCH} --initdb ${PACKAGES//,/ } && \ 30 | sed -i -e 's/^root::/root:!:/' /root-out/etc/shadow 31 | 32 | # add s6 overlay 33 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 34 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz 35 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${ARCH}.tar.xz /tmp 36 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${ARCH}.tar.xz 37 | 38 | # add s6 optional symlinks 39 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 40 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv 41 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 42 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 43 | 44 | # Runtime stage 45 | FROM scratch 46 | COPY --from=rootfs-stage /root-out/ / 47 | ARG BUILD_DATE 48 | ARG VERSION 49 | ARG MODS_VERSION="v3" 50 | ARG PKG_INST_VERSION="v1" 51 | ARG LSIOWN_VERSION="v1" 52 | ARG WITHCONTENV_VERSION="v1" 53 | LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" 54 | LABEL maintainer="TheLamer" 55 | 56 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" 57 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" 58 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" 59 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" 60 | 61 | # environment variables 62 | ENV PS1="$(whoami)@$(hostname):$(pwd)\\$ " \ 63 | HOME="/root" \ 64 | TERM="xterm" \ 65 | S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ 66 | S6_VERBOSITY=1 \ 67 | S6_STAGE2_HOOK=/docker-mods \ 68 | VIRTUAL_ENV=/lsiopy \ 69 | PATH="/lsiopy/bin:$PATH" 70 | 71 | RUN \ 72 | echo "**** install runtime packages ****" && \ 73 | apk add --no-cache \ 74 | alpine-release \ 75 | bash \ 76 | ca-certificates \ 77 | catatonit \ 78 | coreutils \ 79 | curl \ 80 | findutils \ 81 | jq \ 82 | netcat-openbsd \ 83 | procps-ng \ 84 | shadow \ 85 | tzdata && \ 86 | echo "**** create abc user and make our folders ****" && \ 87 | groupmod -g 1000 users && \ 88 | useradd -u 911 -U -d /config -s /bin/false abc && \ 89 | usermod -G users abc && \ 90 | mkdir -p \ 91 | /app \ 92 | /config \ 93 | /defaults \ 94 | /lsiopy && \ 95 | echo "**** cleanup ****" && \ 96 | rm -rf \ 97 | /tmp/* 98 | 99 | # add local files 100 | COPY root/ / 101 | 102 | ENTRYPOINT ["/init"] 103 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | label 'X86-64-MULTI' 4 | } 5 | options { 6 | buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '60')) 7 | parallelsAlwaysFailFast() 8 | } 9 | // Input to determine if this is a package check 10 | parameters { 11 | string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') 12 | } 13 | // Configuration for the variables used for this specific repo 14 | environment { 15 | BUILDS_DISCORD=credentials('build_webhook_url') 16 | GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab') 17 | GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0') 18 | GITLAB_NAMESPACE=credentials('gitlab-namespace-id') 19 | DOCKERHUB_TOKEN=credentials('docker-hub-ci-pat') 20 | QUAYIO_API_TOKEN=credentials('quayio-repo-api-token') 21 | GIT_SIGNING_KEY=credentials('484fbca6-9a4f-455e-b9e3-97ac98785f5f') 22 | BUILD_VERSION_ARG = 'OS' 23 | LS_USER = 'linuxserver' 24 | LS_REPO = 'docker-baseimage-alpine' 25 | CONTAINER_NAME = 'baseimage-alpine' 26 | DOCKERHUB_IMAGE = 'lsiobase/alpine' 27 | DEV_DOCKERHUB_IMAGE = 'lsiodev/alpine' 28 | PR_DOCKERHUB_IMAGE = 'lspipepr/alpine' 29 | DIST_IMAGE = 'alpine' 30 | MULTIARCH='true' 31 | CI='true' 32 | CI_WEB='false' 33 | CI_PORT='80' 34 | CI_SSL='true' 35 | CI_DELAY='30' 36 | CI_DOCKERENV='LSIO_FIRST_PARTY=true' 37 | CI_AUTH='' 38 | CI_WEBPATH='' 39 | } 40 | stages { 41 | stage("Set git config"){ 42 | steps{ 43 | sh '''#!/bin/bash 44 | cat ${GIT_SIGNING_KEY} > /config/.ssh/id_sign 45 | chmod 600 /config/.ssh/id_sign 46 | ssh-keygen -y -f /config/.ssh/id_sign > /config/.ssh/id_sign.pub 47 | echo "Using $(ssh-keygen -lf /config/.ssh/id_sign) to sign commits" 48 | git config --global gpg.format ssh 49 | git config --global user.signingkey /config/.ssh/id_sign 50 | git config --global commit.gpgsign true 51 | ''' 52 | } 53 | } 54 | // Setup all the basic environment variables needed for the build 55 | stage("Set ENV Variables base"){ 56 | steps{ 57 | echo "Running on node: ${NODE_NAME}" 58 | sh '''#! /bin/bash 59 | echo "Pruning builder" 60 | docker builder prune -f --builder container || : 61 | containers=$(docker ps -q) 62 | if [[ -n "${containers}" ]]; then 63 | BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') 64 | for container in ${containers}; do 65 | if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then 66 | echo "skipping buildx container in docker stop" 67 | else 68 | echo "Stopping container ${container}" 69 | docker stop ${container} 70 | fi 71 | done 72 | fi 73 | docker system prune -f --volumes || : 74 | docker image prune -af || : 75 | ''' 76 | script{ 77 | env.EXIT_STATUS = '' 78 | env.LS_RELEASE = sh( 79 | script: '''docker run --rm quay.io/skopeo/stable:v1 inspect docker://ghcr.io/${LS_USER}/${CONTAINER_NAME}:3.22 2>/dev/null | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', 80 | returnStdout: true).trim() 81 | env.LS_RELEASE_NOTES = sh( 82 | script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''', 83 | returnStdout: true).trim() 84 | env.GITHUB_DATE = sh( 85 | script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', 86 | returnStdout: true).trim() 87 | env.COMMIT_SHA = sh( 88 | script: '''git rev-parse HEAD''', 89 | returnStdout: true).trim() 90 | env.GH_DEFAULT_BRANCH = sh( 91 | script: '''git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||' ''', 92 | returnStdout: true).trim() 93 | env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT 94 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' 95 | env.PULL_REQUEST = env.CHANGE_ID 96 | env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml' 97 | } 98 | sh '''#! /bin/bash 99 | echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" ''' 100 | script{ 101 | env.LS_RELEASE_NUMBER = sh( 102 | script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''', 103 | returnStdout: true).trim() 104 | } 105 | script{ 106 | env.LS_TAG_NUMBER = sh( 107 | script: '''#! /bin/bash 108 | tagsha=$(git rev-list -n 1 3.22-${LS_RELEASE} 2>/dev/null) 109 | if [ "${tagsha}" == "${COMMIT_SHA}" ]; then 110 | echo ${LS_RELEASE_NUMBER} 111 | elif [ -z "${GIT_COMMIT}" ]; then 112 | echo ${LS_RELEASE_NUMBER} 113 | else 114 | echo $((${LS_RELEASE_NUMBER} + 1)) 115 | fi''', 116 | returnStdout: true).trim() 117 | } 118 | } 119 | } 120 | /* ####################### 121 | Package Version Tagging 122 | ####################### */ 123 | // Grab the current package versions in Git to determine package tag 124 | stage("Set Package tag"){ 125 | steps{ 126 | script{ 127 | env.PACKAGE_TAG = sh( 128 | script: '''#!/bin/bash 129 | if [ -e package_versions.txt ] ; then 130 | cat package_versions.txt | md5sum | cut -c1-8 131 | else 132 | echo none 133 | fi''', 134 | returnStdout: true).trim() 135 | } 136 | } 137 | } 138 | /* ######################## 139 | External Release Tagging 140 | ######################## */ 141 | // If this is an os release set release type to none to indicate no external release 142 | stage("Set ENV os"){ 143 | steps{ 144 | script{ 145 | env.EXT_RELEASE = env.PACKAGE_TAG 146 | env.RELEASE_LINK = 'none' 147 | } 148 | } 149 | } 150 | // Sanitize the release tag and strip illegal docker or github characters 151 | stage("Sanitize tag"){ 152 | steps{ 153 | script{ 154 | env.EXT_RELEASE_CLEAN = sh( 155 | script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/ ]//g' ''', 156 | returnStdout: true).trim() 157 | 158 | def semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)\.(\d+)/ 159 | if (semver.find()) { 160 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" 161 | } else { 162 | semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)(?:\.(\d+))?(.*)/ 163 | if (semver.find()) { 164 | if (semver[0][3]) { 165 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" 166 | } else if (!semver[0][3] && !semver[0][4]) { 167 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${(new Date()).format('YYYYMMdd')}" 168 | } 169 | } 170 | } 171 | 172 | if (env.SEMVER != null) { 173 | if (BRANCH_NAME != "${env.GH_DEFAULT_BRANCH}") { 174 | env.SEMVER = "${env.SEMVER}-${BRANCH_NAME}" 175 | } 176 | println("SEMVER: ${env.SEMVER}") 177 | } else { 178 | println("No SEMVER detected") 179 | } 180 | 181 | } 182 | } 183 | } 184 | // If this is a master build use live docker endpoints 185 | stage("Set ENV live build"){ 186 | when { 187 | branch "master" 188 | environment name: 'CHANGE_ID', value: '' 189 | } 190 | steps { 191 | script{ 192 | env.IMAGE = env.DOCKERHUB_IMAGE 193 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME 194 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME 195 | env.QUAYIMAGE = 'quay.io/linuxserver.io/' + env.CONTAINER_NAME 196 | if (env.MULTIARCH == 'true') { 197 | env.CI_TAGS = 'amd64-3.22-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|riscv64-3.22-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-3.22-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 198 | } else { 199 | env.CI_TAGS = '3.22-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 200 | } 201 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 202 | env.META_TAG = '3.22-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 203 | env.EXT_RELEASE_TAG = '3.22-version-' + env.EXT_RELEASE_CLEAN 204 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 205 | } 206 | } 207 | } 208 | // If this is a dev build use dev docker endpoints 209 | stage("Set ENV dev build"){ 210 | when { 211 | not {branch "master"} 212 | environment name: 'CHANGE_ID', value: '' 213 | } 214 | steps { 215 | script{ 216 | env.IMAGE = env.DEV_DOCKERHUB_IMAGE 217 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME 218 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME 219 | env.QUAYIMAGE = 'quay.io/linuxserver.io/lsiodev-' + env.CONTAINER_NAME 220 | if (env.MULTIARCH == 'true') { 221 | env.CI_TAGS = 'amd64-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|riscv64-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 222 | } else { 223 | env.CI_TAGS = '3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 224 | } 225 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 226 | env.META_TAG = '3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 227 | env.EXT_RELEASE_TAG = '3.22-version-' + env.EXT_RELEASE_CLEAN 228 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' 229 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 230 | } 231 | } 232 | } 233 | // If this is a pull request build use dev docker endpoints 234 | stage("Set ENV PR build"){ 235 | when { 236 | not {environment name: 'CHANGE_ID', value: ''} 237 | } 238 | steps { 239 | script{ 240 | env.IMAGE = env.PR_DOCKERHUB_IMAGE 241 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME 242 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME 243 | env.QUAYIMAGE = 'quay.io/linuxserver.io/lspipepr-' + env.CONTAINER_NAME 244 | if (env.MULTIARCH == 'true') { 245 | env.CI_TAGS = 'amd64-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + '|riscv64-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + '|arm64v8-3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 246 | } else { 247 | env.CI_TAGS = '3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 248 | } 249 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 250 | env.META_TAG = '3.22-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 251 | env.EXT_RELEASE_TAG = '3.22-version-' + env.EXT_RELEASE_CLEAN 252 | env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST 253 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' 254 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 255 | } 256 | } 257 | } 258 | // Run ShellCheck 259 | stage('ShellCheck') { 260 | when { 261 | environment name: 'CI', value: 'true' 262 | } 263 | steps { 264 | withCredentials([ 265 | string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), 266 | string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET') 267 | ]) { 268 | script{ 269 | env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' 270 | } 271 | sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-jenkins-builder/master/checkrun.sh | /bin/bash''' 272 | sh '''#! /bin/bash 273 | docker run --rm \ 274 | -v ${WORKSPACE}:/mnt \ 275 | -e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \ 276 | -e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \ 277 | ghcr.io/linuxserver/baseimage-alpine:3.20 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\ 278 | apk add --no-cache python3 && \ 279 | python3 -m venv /lsiopy && \ 280 | pip install --no-cache-dir -U pip && \ 281 | pip install --no-cache-dir s3cmd && \ 282 | s3cmd put --no-preserve --acl-public -m text/xml /mnt/shellcheck-result.xml s3://ci-tests.linuxserver.io/${IMAGE}/${META_TAG}/shellcheck-result.xml" || :''' 283 | } 284 | } 285 | } 286 | // Use helper containers to render templated files 287 | stage('Update-Templates') { 288 | when { 289 | branch "master" 290 | environment name: 'CHANGE_ID', value: '' 291 | expression { 292 | env.CONTAINER_NAME != null 293 | } 294 | } 295 | steps { 296 | sh '''#! /bin/bash 297 | set -e 298 | TEMPDIR=$(mktemp -d) 299 | docker pull ghcr.io/linuxserver/jenkins-builder:latest 300 | # Cloned repo paths for templating: 301 | # ${TEMPDIR}/docker-${CONTAINER_NAME}: Cloned branch master of ${LS_USER}/${LS_REPO} for running the jenkins builder on 302 | # ${TEMPDIR}/repo/${LS_REPO}: Cloned branch master of ${LS_USER}/${LS_REPO} for commiting various templated file changes and pushing back to Github 303 | # ${TEMPDIR}/docs/docker-documentation: Cloned docs repo for pushing docs updates to Github 304 | # ${TEMPDIR}/unraid/docker-templates: Cloned docker-templates repo to check for logos 305 | # ${TEMPDIR}/unraid/templates: Cloned templates repo for commiting unraid template changes and pushing back to Github 306 | git clone --branch master --depth 1 https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/docker-${CONTAINER_NAME} 307 | docker run --rm -v ${TEMPDIR}/docker-${CONTAINER_NAME}:/tmp -e LOCAL=true -e PUID=$(id -u) -e PGID=$(id -g) ghcr.io/linuxserver/jenkins-builder:latest 308 | echo "Starting Stage 1 - Jenkinsfile update" 309 | if [[ "$(md5sum Jenkinsfile | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile | awk '{ print $1 }')" ]]; then 310 | mkdir -p ${TEMPDIR}/repo 311 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 312 | cd ${TEMPDIR}/repo/${LS_REPO} 313 | git checkout -f master 314 | cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/ 315 | git add Jenkinsfile 316 | git commit -m 'Bot Updating Templated Files' 317 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 318 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 319 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 320 | echo "Updating Jenkinsfile and exiting build, new one will trigger based on commit" 321 | rm -Rf ${TEMPDIR} 322 | exit 0 323 | else 324 | echo "Jenkinsfile is up to date." 325 | fi 326 | echo "Starting Stage 2 - Delete old templates" 327 | OLD_TEMPLATES=".github/ISSUE_TEMPLATE.md .github/ISSUE_TEMPLATE/issue.bug.md .github/ISSUE_TEMPLATE/issue.feature.md .github/workflows/call_invalid_helper.yml .github/workflows/stale.yml .github/workflows/package_trigger.yml" 328 | for i in ${OLD_TEMPLATES}; do 329 | if [[ -f "${i}" ]]; then 330 | TEMPLATES_TO_DELETE="${i} ${TEMPLATES_TO_DELETE}" 331 | fi 332 | done 333 | if [[ -n "${TEMPLATES_TO_DELETE}" ]]; then 334 | mkdir -p ${TEMPDIR}/repo 335 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 336 | cd ${TEMPDIR}/repo/${LS_REPO} 337 | git checkout -f master 338 | for i in ${TEMPLATES_TO_DELETE}; do 339 | git rm "${i}" 340 | done 341 | git commit -m 'Bot Updating Templated Files' 342 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 343 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 344 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 345 | echo "Deleting old/deprecated templates and exiting build, new one will trigger based on commit" 346 | rm -Rf ${TEMPDIR} 347 | exit 0 348 | else 349 | echo "No templates to delete" 350 | fi 351 | echo "Starting Stage 3 - Update templates" 352 | CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) 353 | cd ${TEMPDIR}/docker-${CONTAINER_NAME} 354 | NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) 355 | if [[ "${CURRENTHASH}" != "${NEWHASH}" ]] || ! grep -q '.jenkins-external' "${WORKSPACE}/.gitignore" 2>/dev/null; then 356 | mkdir -p ${TEMPDIR}/repo 357 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 358 | cd ${TEMPDIR}/repo/${LS_REPO} 359 | git checkout -f master 360 | cd ${TEMPDIR}/docker-${CONTAINER_NAME} 361 | mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows 362 | mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE 363 | cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || : 364 | cp --parents readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/ || : 365 | cd ${TEMPDIR}/repo/${LS_REPO}/ 366 | if ! grep -q '.jenkins-external' .gitignore 2>/dev/null; then 367 | echo ".jenkins-external" >> .gitignore 368 | git add .gitignore 369 | fi 370 | git add readme-vars.yml ${TEMPLATED_FILES} 371 | git commit -m 'Bot Updating Templated Files' 372 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 373 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 374 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 375 | echo "Updating templates and exiting build, new one will trigger based on commit" 376 | rm -Rf ${TEMPDIR} 377 | exit 0 378 | else 379 | echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 380 | echo "No templates to update" 381 | fi 382 | echo "Starting Stage 4 - External repo updates: Docs, Unraid Template and Readme Sync to Docker Hub" 383 | mkdir -p ${TEMPDIR}/docs 384 | git clone --depth=1 https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/docs/docker-documentation 385 | if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then 386 | cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md ${TEMPDIR}/docs/docker-documentation/docs/images/ 387 | cd ${TEMPDIR}/docs/docker-documentation 388 | GH_DOCS_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') 389 | git add docs/images/docker-${CONTAINER_NAME}.md 390 | echo "Updating docs repo" 391 | git commit -m 'Bot Updating Documentation' 392 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase 393 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} || \ 394 | (MAXWAIT="10" && echo "Push to docs failed, trying again in ${MAXWAIT} seconds" && \ 395 | sleep $((RANDOM % MAXWAIT)) && \ 396 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase && \ 397 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH}) 398 | else 399 | echo "Docs update not needed, skipping" 400 | fi 401 | if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]]; then 402 | if [[ $(cat ${TEMPDIR}/docker-${CONTAINER_NAME}/README.md | wc -m) -gt 25000 ]]; then 403 | echo "Readme is longer than 25,000 characters. Syncing the lite version to Docker Hub" 404 | DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/README.lite" 405 | else 406 | echo "Syncing readme to Docker Hub" 407 | DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/README.md" 408 | fi 409 | if curl -s https://hub.docker.com/v2/namespaces/${DOCKERHUB_IMAGE%%/*}/repositories/${DOCKERHUB_IMAGE##*/}/tags | jq -r '.message' | grep -q 404; then 410 | echo "Docker Hub endpoint doesn't exist. Creating endpoint first." 411 | DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') 412 | curl -s \ 413 | -H "Authorization: JWT ${DH_TOKEN}" \ 414 | -H "Content-Type: application/json" \ 415 | -X POST \ 416 | -d '{"name":"'${DOCKERHUB_IMAGE##*/}'", "namespace":"'${DOCKERHUB_IMAGE%%/*}'"}' \ 417 | https://hub.docker.com/v2/repositories/ || : 418 | fi 419 | DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') 420 | curl -s \ 421 | -H "Authorization: JWT ${DH_TOKEN}" \ 422 | -H "Content-Type: application/json" \ 423 | -X PATCH \ 424 | -d "{\\"full_description\\":$(jq -Rsa . ${DH_README_SYNC_PATH})}" \ 425 | https://hub.docker.com/v2/repositories/${DOCKERHUB_IMAGE} || : 426 | else 427 | echo "Not the default Github branch. Skipping readme sync to Docker Hub." 428 | fi 429 | rm -Rf ${TEMPDIR}''' 430 | script{ 431 | env.FILES_UPDATED = sh( 432 | script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''', 433 | returnStdout: true).trim() 434 | } 435 | } 436 | } 437 | // Exit the build if the Templated files were just updated 438 | stage('Template-exit') { 439 | when { 440 | branch "master" 441 | environment name: 'CHANGE_ID', value: '' 442 | environment name: 'FILES_UPDATED', value: 'true' 443 | expression { 444 | env.CONTAINER_NAME != null 445 | } 446 | } 447 | steps { 448 | script{ 449 | env.EXIT_STATUS = 'ABORTED' 450 | } 451 | } 452 | } 453 | // If this is a master build check the S6 service file perms 454 | stage("Check S6 Service file Permissions"){ 455 | when { 456 | branch "master" 457 | environment name: 'CHANGE_ID', value: '' 458 | environment name: 'EXIT_STATUS', value: '' 459 | } 460 | steps { 461 | script{ 462 | sh '''#! /bin/bash 463 | WRONG_PERM=$(find ./ -path "./.git" -prune -o \\( -name "run" -o -name "finish" -o -name "check" \\) -not -perm -u=x,g=x,o=x -print) 464 | if [[ -n "${WRONG_PERM}" ]]; then 465 | echo "The following S6 service files are missing the executable bit; canceling the faulty build: ${WRONG_PERM}" 466 | exit 1 467 | else 468 | echo "S6 service file perms look good." 469 | fi ''' 470 | } 471 | } 472 | } 473 | /* ####################### 474 | GitLab Mirroring and Quay.io Repo Visibility 475 | ####################### */ 476 | // Ping into Gitlab to mirror this repo and have a registry endpoint & mark this repo on Quay.io as public 477 | stage("GitLab Mirror and Quay.io Visibility"){ 478 | when { 479 | environment name: 'EXIT_STATUS', value: '' 480 | } 481 | steps{ 482 | sh '''curl -H "Content-Type: application/json" -H "Private-Token: ${GITLAB_TOKEN}" -X POST https://gitlab.com/api/v4/projects \ 483 | -d '{"namespace_id":'${GITLAB_NAMESPACE}',\ 484 | "name":"'${LS_REPO}'", 485 | "mirror":true,\ 486 | "import_url":"https://github.com/linuxserver/'${LS_REPO}'.git",\ 487 | "issues_access_level":"disabled",\ 488 | "merge_requests_access_level":"disabled",\ 489 | "repository_access_level":"enabled",\ 490 | "visibility":"public"}' ''' 491 | sh '''curl -H "Private-Token: ${GITLAB_TOKEN}" -X PUT "https://gitlab.com/api/v4/projects/Linuxserver.io%2F${LS_REPO}" \ 492 | -d "mirror=true&import_url=https://github.com/linuxserver/${LS_REPO}.git" ''' 493 | sh '''curl -H "Content-Type: application/json" -H "Authorization: Bearer ${QUAYIO_API_TOKEN}" -X POST "https://quay.io/api/v1/repository${QUAYIMAGE/quay.io/}/changevisibility" \ 494 | -d '{"visibility":"public"}' ||: ''' 495 | } 496 | } 497 | /* ############### 498 | Build Container 499 | ############### */ 500 | // Build Docker container for push to LS Repo 501 | stage('Build-Single') { 502 | when { 503 | expression { 504 | env.MULTIARCH == 'false' || params.PACKAGE_CHECK == 'true' 505 | } 506 | environment name: 'EXIT_STATUS', value: '' 507 | } 508 | steps { 509 | echo "Running on node: ${NODE_NAME}" 510 | sh "docker buildx build \ 511 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 512 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 513 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-alpine/packages\" \ 514 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-alpine\" \ 515 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-alpine\" \ 516 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 517 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 518 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 519 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 520 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 521 | --label \"org.opencontainers.image.title=Baseimage-alpine\" \ 522 | --label \"org.opencontainers.image.description=baseimage-alpine image by linuxserver.io\" \ 523 | --no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \ 524 | --provenance=true --sbom=true --builder=container --load \ 525 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 526 | sh '''#! /bin/bash 527 | set -e 528 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 529 | for i in "${CACHE[@]}"; do 530 | docker tag ${IMAGE}:${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 531 | done 532 | ''' 533 | withCredentials([ 534 | [ 535 | $class: 'UsernamePasswordMultiBinding', 536 | credentialsId: 'Quay.io-Robot', 537 | usernameVariable: 'QUAYUSER', 538 | passwordVariable: 'QUAYPASS' 539 | ] 540 | ]) { 541 | retry_backoff(5,5) { 542 | sh '''#! /bin/bash 543 | set -e 544 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 545 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 546 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 547 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 548 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 549 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 550 | for i in "${CACHE[@]}"; do 551 | docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & 552 | done 553 | for p in $(jobs -p); do 554 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 555 | done 556 | fi 557 | ''' 558 | } 559 | } 560 | } 561 | } 562 | // Build MultiArch Docker containers for push to LS Repo 563 | stage('Build-Multi') { 564 | when { 565 | allOf { 566 | environment name: 'MULTIARCH', value: 'true' 567 | expression { params.PACKAGE_CHECK == 'false' } 568 | } 569 | environment name: 'EXIT_STATUS', value: '' 570 | } 571 | parallel { 572 | stage('Build X86') { 573 | steps { 574 | echo "Running on node: ${NODE_NAME}" 575 | sh "docker buildx build \ 576 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 577 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 578 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-alpine/packages\" \ 579 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-alpine\" \ 580 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-alpine\" \ 581 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 582 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 583 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 584 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 585 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 586 | --label \"org.opencontainers.image.title=Baseimage-alpine\" \ 587 | --label \"org.opencontainers.image.description=baseimage-alpine image by linuxserver.io\" \ 588 | --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \ 589 | --provenance=true --sbom=true --builder=container --load \ 590 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 591 | sh '''#! /bin/bash 592 | set -e 593 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 594 | for i in "${CACHE[@]}"; do 595 | docker tag ${IMAGE}:amd64-${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 596 | done 597 | ''' 598 | withCredentials([ 599 | [ 600 | $class: 'UsernamePasswordMultiBinding', 601 | credentialsId: 'Quay.io-Robot', 602 | usernameVariable: 'QUAYUSER', 603 | passwordVariable: 'QUAYPASS' 604 | ] 605 | ]) { 606 | retry_backoff(5,5) { 607 | sh '''#! /bin/bash 608 | set -e 609 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 610 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 611 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 612 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 613 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 614 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 615 | for i in "${CACHE[@]}"; do 616 | docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & 617 | done 618 | for p in $(jobs -p); do 619 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 620 | done 621 | fi 622 | ''' 623 | } 624 | } 625 | } 626 | } 627 | stage('Build ARM64') { 628 | agent { 629 | label 'ARM64' 630 | } 631 | steps { 632 | echo "Running on node: ${NODE_NAME}" 633 | sh "docker buildx build \ 634 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 635 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 636 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-alpine/packages\" \ 637 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-alpine\" \ 638 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-alpine\" \ 639 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 640 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 641 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 642 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 643 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 644 | --label \"org.opencontainers.image.title=Baseimage-alpine\" \ 645 | --label \"org.opencontainers.image.description=baseimage-alpine image by linuxserver.io\" \ 646 | --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \ 647 | --provenance=true --sbom=true --builder=container --load \ 648 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 649 | sh '''#! /bin/bash 650 | set -e 651 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 652 | for i in "${CACHE[@]}"; do 653 | docker tag ${IMAGE}:arm64v8-${META_TAG} ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 654 | done 655 | ''' 656 | withCredentials([ 657 | [ 658 | $class: 'UsernamePasswordMultiBinding', 659 | credentialsId: 'Quay.io-Robot', 660 | usernameVariable: 'QUAYUSER', 661 | passwordVariable: 'QUAYPASS' 662 | ] 663 | ]) { 664 | retry_backoff(5,5) { 665 | sh '''#! /bin/bash 666 | set -e 667 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 668 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 669 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 670 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 671 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 672 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 673 | for i in "${CACHE[@]}"; do 674 | docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} & 675 | done 676 | for p in $(jobs -p); do 677 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 678 | done 679 | fi 680 | ''' 681 | } 682 | } 683 | sh '''#! /bin/bash 684 | containers=$(docker ps -aq) 685 | if [[ -n "${containers}" ]]; then 686 | docker stop ${containers} 687 | fi 688 | docker system prune -f --volumes || : 689 | docker image prune -af || : 690 | ''' 691 | } 692 | } 693 | stage('Build RISCV64') { 694 | agent { 695 | label 'RISCV64' 696 | } 697 | steps { 698 | echo "Running on node: ${NODE_NAME}" 699 | sh "docker buildx build \ 700 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 701 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 702 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-alpine/packages\" \ 703 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-alpine\" \ 704 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-alpine\" \ 705 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 706 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 707 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 708 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 709 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 710 | --label \"org.opencontainers.image.title=Baseimage-alpine\" \ 711 | --label \"org.opencontainers.image.description=baseimage-alpine image by linuxserver.io\" \ 712 | --no-cache --pull -f Dockerfile.riscv64 -t ${IMAGE}:riscv64-${META_TAG} --platform=linux/riscv64 \ 713 | --provenance=true --sbom=true --builder=container --load \ 714 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 715 | sh '''#! /bin/bash 716 | set -e 717 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 718 | for i in "${CACHE[@]}"; do 719 | docker tag ${IMAGE}:riscv64-${META_TAG} ${i}:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} 720 | done 721 | ''' 722 | withCredentials([ 723 | [ 724 | $class: 'UsernamePasswordMultiBinding', 725 | credentialsId: 'Quay.io-Robot', 726 | usernameVariable: 'QUAYUSER', 727 | passwordVariable: 'QUAYPASS' 728 | ] 729 | ]) { 730 | retry_backoff(5,5) { 731 | sh '''#! /bin/bash 732 | set -e 733 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 734 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 735 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 736 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 737 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 738 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 739 | for i in "${CACHE[@]}"; do 740 | docker push ${i}:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} & 741 | done 742 | for p in $(jobs -p); do 743 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 744 | done 745 | fi 746 | ''' 747 | } 748 | } 749 | sh '''#! /bin/bash 750 | containers=$(docker ps -aq) 751 | if [[ -n "${containers}" ]]; then 752 | docker stop ${containers} 753 | fi 754 | docker system prune -f --volumes || : 755 | docker image prune -af || : 756 | ''' 757 | } 758 | } 759 | } 760 | } 761 | // Take the image we just built and dump package versions for comparison 762 | stage('Update-packages') { 763 | when { 764 | branch "master" 765 | environment name: 'CHANGE_ID', value: '' 766 | environment name: 'EXIT_STATUS', value: '' 767 | } 768 | steps { 769 | sh '''#! /bin/bash 770 | set -e 771 | TEMPDIR=$(mktemp -d) 772 | if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" != "true" ]; then 773 | LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG} 774 | else 775 | LOCAL_CONTAINER=${IMAGE}:${META_TAG} 776 | fi 777 | touch ${TEMPDIR}/package_versions.txt 778 | docker run --rm \ 779 | -v /var/run/docker.sock:/var/run/docker.sock:ro \ 780 | -v ${TEMPDIR}:/tmp \ 781 | ghcr.io/anchore/syft:latest \ 782 | ${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt 783 | NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) 784 | echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" 785 | if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then 786 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO} 787 | git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f master 788 | cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/ 789 | cd ${TEMPDIR}/${LS_REPO}/ 790 | wait 791 | git add package_versions.txt 792 | git commit -m 'Bot Updating Package Versions' 793 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 794 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master 795 | echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} 796 | echo "Package tag updated, stopping build process" 797 | else 798 | echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} 799 | echo "Package tag is same as previous continue with build process" 800 | fi 801 | rm -Rf ${TEMPDIR}''' 802 | script{ 803 | env.PACKAGE_UPDATED = sh( 804 | script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''', 805 | returnStdout: true).trim() 806 | } 807 | } 808 | } 809 | // Exit the build if the package file was just updated 810 | stage('PACKAGE-exit') { 811 | when { 812 | branch "master" 813 | environment name: 'CHANGE_ID', value: '' 814 | environment name: 'PACKAGE_UPDATED', value: 'true' 815 | environment name: 'EXIT_STATUS', value: '' 816 | } 817 | steps { 818 | script{ 819 | env.EXIT_STATUS = 'ABORTED' 820 | } 821 | } 822 | } 823 | // Exit the build if this is just a package check and there are no changes to push 824 | stage('PACKAGECHECK-exit') { 825 | when { 826 | branch "master" 827 | environment name: 'CHANGE_ID', value: '' 828 | environment name: 'PACKAGE_UPDATED', value: 'false' 829 | environment name: 'EXIT_STATUS', value: '' 830 | expression { 831 | params.PACKAGE_CHECK == 'true' 832 | } 833 | } 834 | steps { 835 | script{ 836 | env.EXIT_STATUS = 'ABORTED' 837 | } 838 | } 839 | } 840 | /* ####### 841 | Testing 842 | ####### */ 843 | // Run Container tests 844 | stage('Test') { 845 | when { 846 | environment name: 'CI', value: 'true' 847 | environment name: 'EXIT_STATUS', value: '' 848 | } 849 | steps { 850 | withCredentials([ 851 | string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), 852 | string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET') 853 | ]) { 854 | script{ 855 | env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html' 856 | env.CI_JSON_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/report.json' 857 | } 858 | sh '''#! /bin/bash 859 | set -e 860 | if grep -q 'docker-baseimage' <<< "${LS_REPO}"; then 861 | echo "Detected baseimage, setting LSIO_FIRST_PARTY=true" 862 | if [ -n "${CI_DOCKERENV}" ]; then 863 | CI_DOCKERENV="LSIO_FIRST_PARTY=true|${CI_DOCKERENV}" 864 | else 865 | CI_DOCKERENV="LSIO_FIRST_PARTY=true" 866 | fi 867 | fi 868 | docker pull ghcr.io/linuxserver/ci:latest 869 | if [ "${MULTIARCH}" == "true" ]; then 870 | docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64 871 | docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} 872 | docker pull ghcr.io/linuxserver/lsiodev-buildcache:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} --platform=riscv64 873 | docker tag ghcr.io/linuxserver/lsiodev-buildcache:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:riscv64-${META_TAG} 874 | fi 875 | docker run --rm \ 876 | --shm-size=1gb \ 877 | -v /var/run/docker.sock:/var/run/docker.sock \ 878 | -e IMAGE=\"${IMAGE}\" \ 879 | -e DOCKER_LOGS_TIMEOUT=\"${CI_DELAY}\" \ 880 | -e TAGS=\"${CI_TAGS}\" \ 881 | -e META_TAG=\"${META_TAG}\" \ 882 | -e RELEASE_TAG=\"3.22\" \ 883 | -e PORT=\"${CI_PORT}\" \ 884 | -e SSL=\"${CI_SSL}\" \ 885 | -e BASE=\"${DIST_IMAGE}\" \ 886 | -e SECRET_KEY=\"${S3_SECRET}\" \ 887 | -e ACCESS_KEY=\"${S3_KEY}\" \ 888 | -e DOCKER_ENV=\"${CI_DOCKERENV}\" \ 889 | -e WEB_SCREENSHOT=\"${CI_WEB}\" \ 890 | -e WEB_AUTH=\"${CI_AUTH}\" \ 891 | -e WEB_PATH=\"${CI_WEBPATH}\" \ 892 | -e NODE_NAME=\"${NODE_NAME}\" \ 893 | -t ghcr.io/linuxserver/ci:latest \ 894 | python3 test_build.py''' 895 | } 896 | } 897 | } 898 | /* ################## 899 | Release Logic 900 | ################## */ 901 | // If this is an amd64 only image only push a single image 902 | stage('Docker-Push-Single') { 903 | when { 904 | environment name: 'MULTIARCH', value: 'false' 905 | environment name: 'EXIT_STATUS', value: '' 906 | } 907 | steps { 908 | retry_backoff(5,5) { 909 | sh '''#! /bin/bash 910 | set -e 911 | for PUSHIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 912 | [[ ${PUSHIMAGE%%/*} =~ \\. ]] && PUSHIMAGEPLUS="${PUSHIMAGE}" || PUSHIMAGEPLUS="docker.io/${PUSHIMAGE}" 913 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 914 | for i in "${CACHE[@]}"; do 915 | if [[ "${PUSHIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then 916 | CACHEIMAGE=${i} 917 | fi 918 | done 919 | docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:3.22 -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 920 | if [ -n "${SEMVER}" ]; then 921 | docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 922 | fi 923 | done 924 | ''' 925 | } 926 | } 927 | } 928 | // If this is a multi arch release push all images and define the manifest 929 | stage('Docker-Push-Multi') { 930 | when { 931 | environment name: 'MULTIARCH', value: 'true' 932 | environment name: 'EXIT_STATUS', value: '' 933 | } 934 | steps { 935 | retry_backoff(5,5) { 936 | sh '''#! /bin/bash 937 | set -e 938 | for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 939 | [[ ${MANIFESTIMAGE%%/*} =~ \\. ]] && MANIFESTIMAGEPLUS="${MANIFESTIMAGE}" || MANIFESTIMAGEPLUS="docker.io/${MANIFESTIMAGE}" 940 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 941 | for i in "${CACHE[@]}"; do 942 | if [[ "${MANIFESTIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then 943 | CACHEIMAGE=${i} 944 | fi 945 | done 946 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-3.22 -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 947 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-3.22 -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 948 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:riscv64-${META_TAG} -t ${MANIFESTIMAGE}:riscv64-3.22 -t ${MANIFESTIMAGE}:riscv64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} 949 | if [ -n "${SEMVER}" ]; then 950 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 951 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 952 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:riscv64-${SEMVER} ${CACHEIMAGE}:riscv64-${COMMIT_SHA}-${BUILD_NUMBER} 953 | fi 954 | done 955 | for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 956 | docker buildx imagetools create -t ${MANIFESTIMAGE}:3.22 ${MANIFESTIMAGE}:amd64-3.22 ${MANIFESTIMAGE}:riscv64-3.22 ${MANIFESTIMAGE}:arm64v8-3.22 957 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:riscv64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} 958 | 959 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:riscv64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} 960 | if [ -n "${SEMVER}" ]; then 961 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:riscv64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} 962 | fi 963 | done 964 | ''' 965 | } 966 | } 967 | } 968 | // If this is a public release tag it in the LS Github 969 | stage('Github-Tag-Push-Release') { 970 | when { 971 | branch "master" 972 | expression { 973 | env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 974 | } 975 | environment name: 'CHANGE_ID', value: '' 976 | environment name: 'EXIT_STATUS', value: '' 977 | } 978 | steps { 979 | echo "Pushing New tag for current commit ${META_TAG}" 980 | sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ 981 | -d '{"tag":"'${META_TAG}'",\ 982 | "object": "'${COMMIT_SHA}'",\ 983 | "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\ 984 | "type": "commit",\ 985 | "tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}' ''' 986 | echo "Pushing New release for Tag" 987 | sh '''#! /bin/bash 988 | echo "Updating base packages to ${PACKAGE_TAG}" > releasebody.json 989 | echo '{"tag_name":"'${META_TAG}'",\ 990 | "target_commitish": "master",\ 991 | "name": "'${META_TAG}'",\ 992 | "body": "**CI Report:**\\n\\n'${CI_URL:-N/A}'\\n\\n**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n\\n**Remote Changes:**\\n\\n' > start 993 | printf '","draft": false,"prerelease": false}' >> releasebody.json 994 | paste -d'\\0' start releasebody.json > releasebody.json.done 995 | curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done''' 996 | } 997 | } 998 | // Add protection to the release branch 999 | stage('Github-Release-Branch-Protection') { 1000 | when { 1001 | branch "master" 1002 | environment name: 'CHANGE_ID', value: '' 1003 | environment name: 'EXIT_STATUS', value: '' 1004 | } 1005 | steps { 1006 | echo "Setting up protection for release branch master" 1007 | sh '''#! /bin/bash 1008 | curl -H "Authorization: token ${GITHUB_TOKEN}" -X PUT https://api.github.com/repos/${LS_USER}/${LS_REPO}/branches/master/protection \ 1009 | -d $(jq -c . << EOF 1010 | { 1011 | "required_status_checks": null, 1012 | "enforce_admins": false, 1013 | "required_pull_request_reviews": { 1014 | "dismiss_stale_reviews": false, 1015 | "require_code_owner_reviews": false, 1016 | "require_last_push_approval": false, 1017 | "required_approving_review_count": 1 1018 | }, 1019 | "restrictions": null, 1020 | "required_linear_history": false, 1021 | "allow_force_pushes": false, 1022 | "allow_deletions": false, 1023 | "block_creations": false, 1024 | "required_conversation_resolution": true, 1025 | "lock_branch": false, 1026 | "allow_fork_syncing": false, 1027 | "required_signatures": false 1028 | } 1029 | EOF 1030 | ) ''' 1031 | } 1032 | } 1033 | // If this is a Pull request send the CI link as a comment on it 1034 | stage('Pull Request Comment') { 1035 | when { 1036 | not {environment name: 'CHANGE_ID', value: ''} 1037 | environment name: 'EXIT_STATUS', value: '' 1038 | } 1039 | steps { 1040 | sh '''#! /bin/bash 1041 | # Function to retrieve JSON data from URL 1042 | get_json() { 1043 | local url="$1" 1044 | local response=$(curl -s "$url") 1045 | if [ $? -ne 0 ]; then 1046 | echo "Failed to retrieve JSON data from $url" 1047 | return 1 1048 | fi 1049 | local json=$(echo "$response" | jq .) 1050 | if [ $? -ne 0 ]; then 1051 | echo "Failed to parse JSON data from $url" 1052 | return 1 1053 | fi 1054 | echo "$json" 1055 | } 1056 | 1057 | build_table() { 1058 | local data="$1" 1059 | 1060 | # Get the keys in the JSON data 1061 | local keys=$(echo "$data" | jq -r 'to_entries | map(.key) | .[]') 1062 | 1063 | # Check if keys are empty 1064 | if [ -z "$keys" ]; then 1065 | echo "JSON report data does not contain any keys or the report does not exist." 1066 | return 1 1067 | fi 1068 | 1069 | # Build table header 1070 | local header="| Tag | Passed |\\n| --- | --- |\\n" 1071 | 1072 | # Loop through the JSON data to build the table rows 1073 | local rows="" 1074 | for build in $keys; do 1075 | local status=$(echo "$data" | jq -r ".[\\"$build\\"].test_success") 1076 | if [ "$status" = "true" ]; then 1077 | status="✅" 1078 | else 1079 | status="❌" 1080 | fi 1081 | local row="| "$build" | "$status" |\\n" 1082 | rows="${rows}${row}" 1083 | done 1084 | 1085 | local table="${header}${rows}" 1086 | local escaped_table=$(echo "$table" | sed 's/\"/\\\\"/g') 1087 | echo "$escaped_table" 1088 | } 1089 | 1090 | if [[ "${CI}" = "true" ]]; then 1091 | # Retrieve JSON data from URL 1092 | data=$(get_json "$CI_JSON_URL") 1093 | # Create table from JSON data 1094 | table=$(build_table "$data") 1095 | echo -e "$table" 1096 | 1097 | curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ 1098 | -H "Accept: application/vnd.github.v3+json" \ 1099 | "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ 1100 | -d "{\\"body\\": \\"I am a bot, here are the test results for this PR: \\n${CI_URL}\\n${SHELLCHECK_URL}\\n${table}\\"}" 1101 | else 1102 | curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ 1103 | -H "Accept: application/vnd.github.v3+json" \ 1104 | "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ 1105 | -d "{\\"body\\": \\"I am a bot, here is the pushed image/manifest for this PR: \\n\\n\\`${GITHUBIMAGE}:${META_TAG}\\`\\"}" 1106 | fi 1107 | ''' 1108 | 1109 | } 1110 | } 1111 | } 1112 | /* ###################### 1113 | Send status to Discord 1114 | ###################### */ 1115 | post { 1116 | always { 1117 | sh '''#!/bin/bash 1118 | rm -rf /config/.ssh/id_sign 1119 | rm -rf /config/.ssh/id_sign.pub 1120 | git config --global --unset gpg.format 1121 | git config --global --unset user.signingkey 1122 | git config --global --unset commit.gpgsign 1123 | ''' 1124 | script{ 1125 | env.JOB_DATE = sh( 1126 | script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', 1127 | returnStdout: true).trim() 1128 | if (env.EXIT_STATUS == "ABORTED"){ 1129 | sh 'echo "build aborted"' 1130 | }else{ 1131 | if (currentBuild.currentResult == "SUCCESS"){ 1132 | if (env.GITHUBIMAGE =~ /lspipepr/){ 1133 | env.JOB_WEBHOOK_STATUS='Success' 1134 | env.JOB_WEBHOOK_COLOUR=3957028 1135 | env.JOB_WEBHOOK_FOOTER='PR Build' 1136 | }else if (env.GITHUBIMAGE =~ /lsiodev/){ 1137 | env.JOB_WEBHOOK_STATUS='Success' 1138 | env.JOB_WEBHOOK_COLOUR=3957028 1139 | env.JOB_WEBHOOK_FOOTER='Dev Build' 1140 | }else{ 1141 | env.JOB_WEBHOOK_STATUS='Success' 1142 | env.JOB_WEBHOOK_COLOUR=1681177 1143 | env.JOB_WEBHOOK_FOOTER='Live Build' 1144 | } 1145 | }else{ 1146 | if (env.GITHUBIMAGE =~ /lspipepr/){ 1147 | env.JOB_WEBHOOK_STATUS='Failure' 1148 | env.JOB_WEBHOOK_COLOUR=12669523 1149 | env.JOB_WEBHOOK_FOOTER='PR Build' 1150 | }else if (env.GITHUBIMAGE =~ /lsiodev/){ 1151 | env.JOB_WEBHOOK_STATUS='Failure' 1152 | env.JOB_WEBHOOK_COLOUR=12669523 1153 | env.JOB_WEBHOOK_FOOTER='Dev Build' 1154 | }else{ 1155 | env.JOB_WEBHOOK_STATUS='Failure' 1156 | env.JOB_WEBHOOK_COLOUR=16711680 1157 | env.JOB_WEBHOOK_FOOTER='Live Build' 1158 | } 1159 | } 1160 | sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"'color'": '${JOB_WEBHOOK_COLOUR}',\ 1161 | "footer": {"text" : "'"${JOB_WEBHOOK_FOOTER}"'"},\ 1162 | "timestamp": "'${JOB_DATE}'",\ 1163 | "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** '${JOB_WEBHOOK_STATUS}'\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ 1164 | "username": "Jenkins"}' ${BUILDS_DISCORD} ''' 1165 | } 1166 | } 1167 | } 1168 | cleanup { 1169 | sh '''#! /bin/bash 1170 | echo "Pruning builder!!" 1171 | docker builder prune -f --builder container || : 1172 | containers=$(docker ps -q) 1173 | if [[ -n "${containers}" ]]; then 1174 | BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') 1175 | for container in ${containers}; do 1176 | if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then 1177 | echo "skipping buildx container in docker stop" 1178 | else 1179 | echo "Stopping container ${container}" 1180 | docker stop ${container} 1181 | fi 1182 | done 1183 | fi 1184 | docker system prune -f --volumes || : 1185 | docker image prune -af || : 1186 | ''' 1187 | cleanWs() 1188 | } 1189 | } 1190 | } 1191 | 1192 | def retry_backoff(int max_attempts, int power_base, Closure c) { 1193 | int n = 0 1194 | while (n < max_attempts) { 1195 | try { 1196 | c() 1197 | return 1198 | } catch (err) { 1199 | if ((n + 1) >= max_attempts) { 1200 | throw err 1201 | } 1202 | sleep(power_base ** n) 1203 | n++ 1204 | } 1205 | } 1206 | return 1207 | } 1208 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | <one line to give the program's name and a brief idea of what it does.> 635 | Copyright (C) <year> <name of author> 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see <https://www.gnu.org/licenses/>. 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | <program> Copyright (C) <year> <name of author> 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | <https://www.gnu.org/licenses/>. 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | <https://www.gnu.org/licenses/why-not-lgpl.html>. 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | <!-- DO NOT EDIT THIS FILE MANUALLY --> 2 | <!-- Please read https://github.com/linuxserver/docker-baseimage-alpine/blob/master/.github/CONTRIBUTING.md --> 3 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) 4 | 5 | ## Contact information:- 6 | 7 | | Type | Address/Details | 8 | | :---: | --- | 9 | | Discord | [Discord](https://linuxserver.io/discord) | 10 | | IRC | `#linuxserver.io` on irc.libera.chat | 11 | | Forum | [Discourse](https://discourse.linuxserver.io/) | 12 | 13 | A custom base image built with [Alpine Linux](https://alpinelinux.org) and [s6-overlay](https://github.com/just-containers/s6-overlay). 14 | 15 | - Support for using our base images in your own projects is provided on a Reasonable Endeavours basis, please see our [Support Policy](https://www.linuxserver.io/supportpolicy) for details. 16 | - There is no `latest` tag for any of our base images, by design. We often make breaking changes between versions, and we don't publish release notes like we do for the downstream images. 17 | - If you're intending to distribute an image using one of our bases, please read our [docs on container branding](https://docs.linuxserver.io/general/container-branding/) first. 18 | - Alpine releases are supported for 2 years, after which we will stop building new base images for that version. 19 | 20 | The following line is only in this repo for loop testing: 21 | 22 | - { date: "01.01.50:", desc: "I am the release message for this internal repo." } 23 | -------------------------------------------------------------------------------- /jenkins-vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # jenkins variables 4 | project_name: docker-baseimage-alpine 5 | external_type: os 6 | release_type: stable 7 | release_tag: "3.22" 8 | ls_branch: master 9 | build_riscv64: true 10 | repo_vars: 11 | - BUILD_VERSION_ARG = 'OS' 12 | - LS_USER = 'linuxserver' 13 | - LS_REPO = 'docker-baseimage-alpine' 14 | - CONTAINER_NAME = 'baseimage-alpine' 15 | - DOCKERHUB_IMAGE = 'lsiobase/alpine' 16 | - DEV_DOCKERHUB_IMAGE = 'lsiodev/alpine' 17 | - PR_DOCKERHUB_IMAGE = 'lspipepr/alpine' 18 | - DIST_IMAGE = 'alpine' 19 | - MULTIARCH='true' 20 | - CI='true' 21 | - CI_WEB='false' 22 | - CI_PORT='80' 23 | - CI_SSL='true' 24 | - CI_DELAY='30' 25 | - CI_DOCKERENV='LSIO_FIRST_PARTY=true' 26 | - CI_AUTH='' 27 | - CI_WEBPATH='' 28 | -------------------------------------------------------------------------------- /package_versions.txt: -------------------------------------------------------------------------------- 1 | NAME VERSION TYPE 2 | acl-libs 2.3.2-r1 apk 3 | alpine-baselayout 3.7.0-r0 apk 4 | alpine-baselayout-data 3.7.0-r0 apk 5 | alpine-keys 2.5-r0 apk 6 | alpine-release 3.22.0-r0 apk 7 | apk-tools 2.14.9-r2 apk 8 | bash 5.2.37-r0 apk 9 | brotli-libs 1.1.0-r2 apk 10 | busybox 1.37.0-r18 apk 11 | busybox-binsh 1.37.0-r18 apk 12 | c-ares 1.34.5-r0 apk 13 | ca-certificates 20241121-r2 apk 14 | ca-certificates-bundle 20241121-r2 apk 15 | catatonit 0.2.1-r0 apk 16 | coreutils 9.7-r1 apk 17 | coreutils-env 9.7-r1 apk 18 | coreutils-fmt 9.7-r1 apk 19 | coreutils-sha512sum 9.7-r1 apk 20 | curl 8.14.0-r2 apk 21 | findutils 4.10.0-r0 apk 22 | jq 1.7.1-r0 apk 23 | libapk2 2.14.9-r2 apk 24 | libattr 2.5.2-r2 apk 25 | libbsd 0.12.2-r0 apk 26 | libcrypto3 3.5.0-r0 apk 27 | libcurl 8.14.0-r2 apk 28 | libidn2 2.3.7-r0 apk 29 | libintl 0.24.1-r0 apk 30 | libmd 1.1.0-r0 apk 31 | libncursesw 6.5_p20250503-r0 apk 32 | libproc2 4.0.4-r3 apk 33 | libpsl 0.21.5-r3 apk 34 | libssl3 3.5.0-r0 apk 35 | libunistring 1.3-r0 apk 36 | linux-pam 1.7.0-r4 apk 37 | musl 1.2.5-r10 apk 38 | musl-utils 1.2.5-r10 apk 39 | ncurses-terminfo-base 6.5_p20250503-r0 apk 40 | netcat-openbsd 1.229.1-r0 apk 41 | nghttp2-libs 1.65.0-r0 apk 42 | oniguruma 6.9.10-r0 apk 43 | procps-ng 4.0.4-r3 apk 44 | readline 8.2.13-r1 apk 45 | scanelf 1.3.8-r1 apk 46 | shadow 4.17.3-r0 apk 47 | skalibs-libs 2.14.4.0-r0 apk 48 | ssl_client 1.37.0-r18 apk 49 | tzdata 2025b-r0 apk 50 | utmps-libs 0.1.3.1-r0 apk 51 | zlib 1.3.1-r2 apk 52 | zstd-libs 1.5.7-r0 apk 53 | -------------------------------------------------------------------------------- /readme-vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # project information 4 | project_name: baseimage-alpine 5 | full_custom_readme: | 6 | {% raw -%} 7 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) 8 | 9 | ## Contact information:- 10 | 11 | | Type | Address/Details | 12 | | :---: | --- | 13 | | Discord | [Discord](https://linuxserver.io/discord) | 14 | | IRC | `#linuxserver.io` on irc.libera.chat | 15 | | Forum | [Discourse](https://discourse.linuxserver.io/) | 16 | 17 | A custom base image built with [Alpine Linux](https://alpinelinux.org) and [s6-overlay](https://github.com/just-containers/s6-overlay). 18 | 19 | - Support for using our base images in your own projects is provided on a Reasonable Endeavours basis, please see our [Support Policy](https://www.linuxserver.io/supportpolicy) for details. 20 | - There is no `latest` tag for any of our base images, by design. We often make breaking changes between versions, and we don't publish release notes like we do for the downstream images. 21 | - If you're intending to distribute an image using one of our bases, please read our [docs on container branding](https://docs.linuxserver.io/general/container-branding/) first. 22 | - Alpine releases are supported for 2 years, after which we will stop building new base images for that version. 23 | 24 | The following line is only in this repo for loop testing: 25 | 26 | - { date: "01.01.50:", desc: "I am the release message for this internal repo." } 27 | {%- endraw %} 28 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/up: -------------------------------------------------------------------------------- 1 | echo "[ls.io-init] done." -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/branding: -------------------------------------------------------------------------------- 1 | ─────────────────────────────────────── 2 | _____ __ __ _____ _____ _____ _____ 3 | | | | | __|_ _| | | 4 | | --| | |__ | | | | | | | | | 5 | |_____|_____|_____| |_| |_____|_|_|_| 6 | _____ __ __ _ __ ____ 7 | | __ | | | | | | \ 8 | | __ -| | | | |__| | | 9 | |_____|_____|_|_____|____/ 10 | 11 | Based on images from linuxserver.io 12 | ─────────────────────────────────────── 13 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | PUID=${PUID:-911} 5 | PGID=${PGID:-911} 6 | 7 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 8 | USERHOME=$(grep abc /etc/passwd | cut -d ":" -f6) 9 | usermod -d "/root" abc 10 | 11 | groupmod -o -g "${PGID}" abc 12 | usermod -o -u "${PUID}" abc 13 | 14 | usermod -d "${USERHOME}" abc 15 | fi 16 | 17 | if { [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; } || [[ ! ${LSIO_FIRST_PARTY} = "true" ]]; then 18 | cat /etc/s6-overlay/s6-rc.d/init-adduser/branding 19 | else 20 | cat /run/branding 21 | fi 22 | 23 | if [[ -f /donate.txt ]]; then 24 | echo ' 25 | To support the app dev(s) visit:' 26 | cat /donate.txt 27 | fi 28 | echo ' 29 | To support LSIO projects visit: 30 | https://www.linuxserver.io/donate/ 31 | 32 | ─────────────────────────────────────── 33 | GID/UID 34 | ───────────────────────────────────────' 35 | if [[ -z ${LSIO_NON_ROOT_USER} ]]; then 36 | echo " 37 | User UID: $(id -u abc) 38 | User GID: $(id -g abc) 39 | ───────────────────────────────────────" 40 | else 41 | echo " 42 | User UID: $(stat /run -c %u) 43 | User GID: $(stat /run -c %g) 44 | ───────────────────────────────────────" 45 | fi 46 | if [[ -f /build_version ]]; then 47 | cat /build_version 48 | echo ' 49 | ─────────────────────────────────────── 50 | ' 51 | fi 52 | 53 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 54 | lsiown abc:abc /app 55 | lsiown abc:abc /config 56 | lsiown abc:abc /defaults 57 | fi 58 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-adduser/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the downstream image init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the start of the downstream image init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | for cron_user in abc root; do 5 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 6 | if [[ -f "/etc/crontabs/${cron_user}" ]]; then 7 | lsiown "${cron_user}":"${cron_user}" "/etc/crontabs/${cron_user}" 8 | crontab -u "${cron_user}" "/etc/crontabs/${cron_user}" 9 | fi 10 | fi 11 | 12 | if [[ -f "/defaults/crontabs/${cron_user}" ]]; then 13 | # make folders 14 | mkdir -p \ 15 | /config/crontabs 16 | 17 | # if crontabs do not exist in config 18 | if [[ ! -f "/config/crontabs/${cron_user}" ]]; then 19 | # copy crontab from system 20 | if crontab -l -u "${cron_user}" >/dev/null 2>&1; then 21 | crontab -l -u "${cron_user}" >"/config/crontabs/${cron_user}" 22 | fi 23 | 24 | # if crontabs still do not exist in config (were not copied from system) 25 | # copy crontab from image defaults (using -n, do not overwrite an existing file) 26 | cp -n "/defaults/crontabs/${cron_user}" /config/crontabs/ 27 | fi 28 | 29 | # set permissions and import user crontabs 30 | lsiown "${cron_user}":"${cron_user}" "/config/crontabs/${cron_user}" 31 | crontab -u "${cron_user}" "/config/crontabs/${cron_user}" 32 | fi 33 | done 34 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-crontab-config/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | # Directories 5 | SCRIPTS_DIR="/custom-cont-init.d" 6 | 7 | # Make sure custom init directory exists and has files in it 8 | if [[ -e "${SCRIPTS_DIR}" ]] && [[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]]; then 9 | echo "[custom-init] Files found, executing" 10 | for SCRIPT in "${SCRIPTS_DIR}"/*; do 11 | NAME="$(basename "${SCRIPT}")" 12 | if [[ -f "${SCRIPT}" ]]; then 13 | echo "[custom-init] ${NAME}: executing..." 14 | /bin/bash "${SCRIPT}" 15 | echo "[custom-init] ${NAME}: exited $?" 16 | elif [[ ! -f "${SCRIPT}" ]]; then 17 | echo "[custom-init] ${NAME}: is not a file" 18 | fi 19 | done 20 | else 21 | echo "[custom-init] No custom files found, skipping..." 22 | fi 23 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-custom-files/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if [[ -z ${LSIO_NON_ROOT_USER} ]] && [[ -n ${ATTACHED_DEVICES_PERMS} ]]; then 5 | FILES=$(find ${ATTACHED_DEVICES_PERMS} -print 2>/dev/null) 6 | 7 | for i in ${FILES}; do 8 | FILE_GID=$(stat -c '%g' "${i}") 9 | FILE_UID=$(stat -c '%u' "${i}") 10 | # check if user matches device 11 | if id -u abc | grep -qw "${FILE_UID}"; then 12 | echo "**** permissions for ${i} are good ****" 13 | else 14 | # check if group matches and that device has group rw 15 | if id -G abc | grep -qw "${FILE_GID}" && [[ $(stat -c '%A' "${i}" | cut -b 5,6) == "rw" ]]; then 16 | echo "**** permissions for ${i} are good ****" 17 | # check if device needs to be added to group 18 | elif ! id -G abc | grep -qw "${FILE_GID}"; then 19 | # check if group needs to be created 20 | GROUP_NAME=$(getent group "${FILE_GID}" | awk -F: '{print $1}') 21 | if [[ -z "${GROUP_NAME}" ]]; then 22 | GROUP_NAME="group$(head /dev/urandom | tr -dc 'a-z0-9' | head -c4)" 23 | groupadd "${GROUP_NAME}" 24 | groupmod -g "${FILE_GID}" "${GROUP_NAME}" 25 | echo "**** creating group ${GROUP_NAME} with id ${FILE_GID} ****" 26 | fi 27 | echo "**** adding ${i} to group ${GROUP_NAME} with id ${FILE_GID} ****" 28 | usermod -a -G "${GROUP_NAME}" abc 29 | fi 30 | # check if device has group rw 31 | if [[ $(stat -c '%A' "${i}" | cut -b 5,6) != "rw" ]]; then 32 | echo -e "**** The device ${i} does not have group read/write permissions, attempting to fix inside the container. ****" 33 | chmod g+rw "${i}" 34 | fi 35 | fi 36 | done 37 | fi 38 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-device-perms/run -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if find /run/s6/container_environment/FILE__* -maxdepth 1 > /dev/null 2>&1; then 5 | for FILENAME in /run/s6/container_environment/FILE__*; do 6 | SECRETFILE=$(cat "${FILENAME}") 7 | if [[ -f ${SECRETFILE} ]]; then 8 | FILESTRIP=${FILENAME//FILE__/} 9 | if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then 10 | echo "[env-init] Your secret: ${FILENAME##*/}" 11 | echo " contains a trailing newline and may not work as expected" 12 | fi 13 | cat "${SECRETFILE}" >"${FILESTRIP}" 14 | echo "[env-init] ${FILESTRIP##*/} set from ${FILENAME##*/}" 15 | else 16 | echo "[env-init] cannot find secret in ${FILENAME##*/}" 17 | fi 18 | done 19 | fi 20 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-envfile/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | MIGRATIONS_DIR="/migrations" 5 | MIGRATIONS_HISTORY="/config/.migrations" 6 | 7 | echo "[migrations] started" 8 | 9 | if [[ ! -d ${MIGRATIONS_DIR} ]]; then 10 | echo "[migrations] no migrations found" 11 | exit 12 | fi 13 | 14 | for MIGRATION in $(find ${MIGRATIONS_DIR}/* | sort -n); do 15 | NAME="$(basename "${MIGRATION}")" 16 | if [[ -f ${MIGRATIONS_HISTORY} ]] && grep -Fxq "${NAME}" ${MIGRATIONS_HISTORY}; then 17 | echo "[migrations] ${NAME}: skipped" 18 | continue 19 | fi 20 | echo "[migrations] ${NAME}: executing..." 21 | # Execute migration script in a subshell to prevent it from modifying the current environment 22 | ("${MIGRATION}") 23 | EXIT_CODE=$? 24 | if [[ ${EXIT_CODE} -ne 0 ]]; then 25 | echo "[migrations] ${NAME}: failed with exit code ${EXIT_CODE}, contact support" 26 | exit "${EXIT_CODE}" 27 | fi 28 | echo "${NAME}" >>${MIGRATIONS_HISTORY} 29 | echo "[migrations] ${NAME}: succeeded" 30 | done 31 | 32 | echo "[migrations] done" 33 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-migrations/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-mods-package-install/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the start of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it just signals that services can start 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if builtin command -v crontab >/dev/null 2>&1 && [[ -n "$(crontab -l -u abc 2>/dev/null || true)" || -n "$(crontab -l -u root 2>/dev/null || true)" ]]; then 5 | if builtin command -v busybox >/dev/null 2>&1 && [[ $(busybox || true) =~ [[:space:]](crond)([,]|$) ]]; then 6 | exec busybox crond -f -S -l 5 7 | elif [[ -f /usr/bin/apt ]] && [[ -f /usr/sbin/cron ]]; then 8 | exec /usr/sbin/cron -f -L 5 9 | else 10 | echo "**** cron not found ****" 11 | sleep infinity 12 | fi 13 | else 14 | sleep infinity 15 | fi 16 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/type: -------------------------------------------------------------------------------- 1 | longrun 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-alpine/23bf232ff11a9cfc5b7a8a3069c47c0f26fd5b6e/root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check --------------------------------------------------------------------------------