├── .dockerignore ├── .editorconfig ├── .gitattributes ├── .github ├── CONTRIBUTING.md ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── config.yml │ ├── issue.bug.yml │ └── issue.feature.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── call_issue_pr_tracker.yml │ ├── call_issues_cron.yml │ ├── external_trigger.yml │ ├── external_trigger_scheduler.yml │ ├── greetings.yml │ ├── package_trigger_scheduler.yml │ └── permissions.yml ├── .gitignore ├── Dockerfile ├── Dockerfile.aarch64 ├── Jenkinsfile ├── LICENSE ├── README.md ├── jenkins-vars.yml ├── package_versions.txt ├── readme-vars.yml ├── root └── etc │ └── s6-overlay │ └── s6-rc.d │ ├── ci-service-check │ ├── dependencies.d │ │ └── legacy-services │ ├── type │ └── up │ ├── init-adduser │ ├── branding │ ├── dependencies.d │ │ └── init-migrations │ ├── run │ ├── type │ └── up │ ├── init-config-end │ ├── dependencies.d │ │ ├── init-config │ │ └── init-crontab-config │ ├── type │ └── up │ ├── init-config │ ├── dependencies.d │ │ └── init-os-end │ ├── type │ └── up │ ├── init-crontab-config │ ├── dependencies.d │ │ └── init-config │ ├── run │ ├── type │ └── up │ ├── init-custom-files │ ├── dependencies.d │ │ └── init-mods-end │ ├── run │ ├── type │ └── up │ ├── init-device-perms │ ├── dependencies.d │ │ └── init-adduser │ ├── run │ ├── type │ └── up │ ├── init-envfile │ ├── run │ ├── type │ └── up │ ├── init-migrations │ ├── run │ ├── type │ └── up │ ├── init-mods-end │ ├── dependencies.d │ │ └── init-mods-package-install │ ├── type │ └── up │ ├── init-mods-package-install │ ├── dependencies.d │ │ └── init-mods │ ├── type │ └── up │ ├── init-mods │ ├── dependencies.d │ │ └── init-config-end │ ├── type │ └── up │ ├── init-os-end │ ├── dependencies.d │ │ ├── init-adduser │ │ ├── init-device-perms │ │ └── init-envfile │ ├── type │ └── up │ ├── init-services │ ├── dependencies.d │ │ └── init-custom-files │ ├── type │ └── up │ ├── svc-cron │ ├── dependencies.d │ │ └── init-services │ ├── run │ └── type │ ├── user │ └── contents.d │ │ ├── init-adduser │ │ ├── init-config │ │ ├── init-config-end │ │ ├── init-crontab-config │ │ ├── init-custom-files │ │ ├── init-device-perms │ │ ├── init-envfile │ │ ├── init-migrations │ │ ├── init-mods │ │ ├── init-mods-end │ │ ├── init-mods-package-install │ │ ├── init-os-end │ │ ├── init-services │ │ └── svc-cron │ └── user2 │ └── contents.d │ └── ci-service-check ├── sources.list └── sources.list.arm /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | .github 4 | .gitattributes 5 | READMETEMPLATE.md 6 | README.md 7 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # This file is globally distributed to all container image projects from 2 | # https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig 3 | 4 | # top-most EditorConfig file 5 | root = true 6 | 7 | # Unix-style newlines with a newline ending every file 8 | [*] 9 | end_of_line = lf 10 | insert_final_newline = true 11 | # trim_trailing_whitespace may cause unintended issues and should not be globally set true 12 | trim_trailing_whitespace = false 13 | 14 | [{Dockerfile*,**.yml}] 15 | indent_style = space 16 | indent_size = 2 17 | 18 | [{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}] 19 | indent_style = space 20 | indent_size = 4 21 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to baseimage-ubuntu 2 | 3 | ## Gotchas 4 | 5 | * While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open. 6 | * Read, and fill the Pull Request template 7 | * If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR 8 | * If the PR is addressing an existing issue include, closes #\, in the body of the PR commit message 9 | * If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://linuxserver.io/discord) 10 | 11 | ## Common files 12 | 13 | | File | Use case | 14 | | :----: | --- | 15 | | `Dockerfile` | Dockerfile used to build amd64 images | 16 | | `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures | 17 | | `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures | 18 | | `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image | 19 | | `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process | 20 | | `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions | 21 | | `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries | 22 | | `readme-vars.yml` | This file is used to generate the `README.md` | 23 | 24 | ## Readme 25 | 26 | If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit. 27 | Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-baseimage-ubuntu/edit/noble/readme-vars.yml). 28 | 29 | These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play. 30 | Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io) 31 | 32 | ### Fixing typos or clarify the text in the readme 33 | 34 | There are variables for multiple parts of the readme, the most common ones are: 35 | 36 | | Variable | Description | 37 | | :----: | --- | 38 | | `project_blurb` | This is the short excerpt shown above the project logo. | 39 | | `app_setup_block` | This is the text that shows up under "Application Setup" if enabled | 40 | 41 | ### Parameters 42 | 43 | The compose and run examples are also generated from these variables. 44 | 45 | We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder. 46 | 47 | These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`. 48 | Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file. 49 | 50 | Devices, environment variables, ports and volumes expects its variables in a certain way. 51 | 52 | ### Devices 53 | 54 | ```yml 55 | param_devices: 56 | - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } 57 | opt_param_devices: 58 | - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } 59 | ``` 60 | 61 | ### Environment variables 62 | 63 | ```yml 64 | param_env_vars: 65 | - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } 66 | opt_param_env_vars: 67 | - { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." } 68 | ``` 69 | 70 | ### Ports 71 | 72 | ```yml 73 | param_ports: 74 | - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } 75 | opt_param_ports: 76 | - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } 77 | ``` 78 | 79 | ### Volumes 80 | 81 | ```yml 82 | param_volumes: 83 | - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } 84 | opt_param_volumes: 85 | - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } 86 | ``` 87 | 88 | ### Testing template changes 89 | 90 | After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR. 91 | 92 | ## Dockerfiles 93 | 94 | We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work. 95 | If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order. 96 | 97 | ### Testing your changes 98 | 99 | ```bash 100 | git clone https://github.com/linuxserver/docker-baseimage-ubuntu.git 101 | cd docker-baseimage-ubuntu 102 | docker build \ 103 | --no-cache \ 104 | --pull \ 105 | -t linuxserver/baseimage-ubuntu:latest . 106 | ``` 107 | 108 | The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` 109 | 110 | ```bash 111 | docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset 112 | ``` 113 | 114 | Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. 115 | 116 | ## Update the changelog 117 | 118 | If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-baseimage-ubuntu/tree/noble/root), add an entry to the changelog 119 | 120 | ```yml 121 | changelogs: 122 | - { date: "DD.MM.YY:", desc: "Added some love to templates" } 123 | ``` 124 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: linuxserver 2 | open_collective: linuxserver 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Discord chat support 4 | url: https://linuxserver.io/discord 5 | about: Realtime support / chat with the community and the team. 6 | 7 | - name: Discourse discussion forum 8 | url: https://discourse.linuxserver.io 9 | about: Post on our community forum. 10 | 11 | - name: Documentation 12 | url: https://docs.linuxserver.io 13 | about: Documentation - information about all of our containers. 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.bug.yml: -------------------------------------------------------------------------------- 1 | # Based on the issue template 2 | name: Bug report 3 | description: Create a report to help us improve 4 | title: "[BUG] " 5 | labels: [Bug] 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is there an existing issue for this? 10 | description: Please search to see if an issue already exists for the bug you encountered. 11 | options: 12 | - label: I have searched the existing issues 13 | required: true 14 | - type: textarea 15 | attributes: 16 | label: Current Behavior 17 | description: Tell us what happens instead of the expected behavior. 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: Expected Behavior 23 | description: Tell us what should happen. 24 | validations: 25 | required: false 26 | - type: textarea 27 | attributes: 28 | label: Steps To Reproduce 29 | description: Steps to reproduce the behavior. 30 | placeholder: | 31 | 1. In this environment... 32 | 2. With this config... 33 | 3. Run '...' 34 | 4. See error... 35 | validations: 36 | required: true 37 | - type: textarea 38 | attributes: 39 | label: Environment 40 | description: | 41 | examples: 42 | - **OS**: Ubuntu 20.04 43 | - **How docker service was installed**: distro's packagemanager 44 | value: | 45 | - OS: 46 | - How docker service was installed: 47 | render: markdown 48 | validations: 49 | required: false 50 | - type: textarea 51 | attributes: 52 | label: Docker creation 53 | description: | 54 | Command used to create docker container 55 | Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container 56 | render: bash 57 | validations: 58 | required: true 59 | - type: textarea 60 | attributes: 61 | description: | 62 | Provide a full docker log, output of "docker logs baseimage-ubuntu" 63 | label: Container logs 64 | placeholder: | 65 | Output of `docker logs baseimage-ubuntu` 66 | render: bash 67 | validations: 68 | required: true 69 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/issue.feature.yml: -------------------------------------------------------------------------------- 1 | # Based on the issue template 2 | name: Feature request 3 | description: Suggest an idea for this project 4 | title: "[FEAT] <title>" 5 | labels: [enhancement] 6 | body: 7 | - type: checkboxes 8 | attributes: 9 | label: Is this a new feature request? 10 | description: Please search to see if a feature request already exists. 11 | options: 12 | - label: I have searched the existing issues 13 | required: true 14 | - type: textarea 15 | attributes: 16 | label: Wanted change 17 | description: Tell us what you want to happen. 18 | validations: 19 | required: true 20 | - type: textarea 21 | attributes: 22 | label: Reason for change 23 | description: Justify your request, why do you want it, what is the benefit. 24 | validations: 25 | required: true 26 | - type: textarea 27 | attributes: 28 | label: Proposed code change 29 | description: Do you have a potential code change in mind? 30 | validations: 31 | required: false 32 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | <!--- Provide a general summary of your changes in the Title above --> 2 | 3 | [linuxserverurl]: https://linuxserver.io 4 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] 5 | 6 | 7 | <!--- Before submitting a pull request please check the following --> 8 | 9 | <!--- If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR --> 10 | <!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ --> 11 | <!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message --> 12 | <!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message --> 13 | <!--- We maintain a changelog of major revisions to the container at the end of readme-vars.yml in the root of this repository, please add your changes there if appropriate --> 14 | 15 | 16 | <!--- Coding guidelines: --> 17 | <!--- 1. Installed packages in the Dockerfiles should be in alphabetical order --> 18 | <!--- 2. Changes to Dockerfile should be replicated in Dockerfile.armhf and Dockerfile.aarch64 if applicable --> 19 | <!--- 3. Indentation style (tabs vs 4 spaces vs 1 space) should match the rest of the document --> 20 | <!--- 4. Readme is auto generated from readme-vars.yml, make your changes there --> 21 | 22 | ------------------------------ 23 | 24 | - [ ] I have read the [contributing](https://github.com/linuxserver/docker-baseimage-ubuntu/blob/noble/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications 25 | 26 | ------------------------------ 27 | 28 | <!--- We welcome all PR’s though this doesn’t guarantee it will be accepted. --> 29 | 30 | ## Description: 31 | <!--- Describe your changes in detail --> 32 | 33 | ## Benefits of this PR and context: 34 | <!--- Please explain why we should accept this PR. If this fixes an outstanding bug, please reference the issue # --> 35 | 36 | ## How Has This Been Tested? 37 | <!--- Please describe in detail how you tested your changes. --> 38 | <!--- Include details of your testing environment, and the tests you ran to --> 39 | <!--- see how your change affects other areas of the code, etc. --> 40 | 41 | 42 | ## Source / References: 43 | <!--- Please include any forum posts/github links relevant to the PR --> 44 | -------------------------------------------------------------------------------- /.github/workflows/call_issue_pr_tracker.yml: -------------------------------------------------------------------------------- 1 | name: Issue & PR Tracker 2 | 3 | on: 4 | issues: 5 | types: [opened,reopened,labeled,unlabeled,closed] 6 | pull_request_target: 7 | types: [opened,reopened,review_requested,review_request_removed,labeled,unlabeled,closed] 8 | pull_request_review: 9 | types: [submitted,edited,dismissed] 10 | 11 | jobs: 12 | manage-project: 13 | permissions: 14 | issues: write 15 | uses: linuxserver/github-workflows/.github/workflows/issue-pr-tracker.yml@v1 16 | secrets: inherit 17 | -------------------------------------------------------------------------------- /.github/workflows/call_issues_cron.yml: -------------------------------------------------------------------------------- 1 | name: Mark stale issues and pull requests 2 | on: 3 | schedule: 4 | - cron: '13 7 * * *' 5 | workflow_dispatch: 6 | 7 | jobs: 8 | stale: 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | uses: linuxserver/github-workflows/.github/workflows/issues-cron.yml@v1 13 | secrets: inherit 14 | -------------------------------------------------------------------------------- /.github/workflows/external_trigger.yml: -------------------------------------------------------------------------------- 1 | name: External Trigger Main 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | external-trigger-noble: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4.1.1 11 | 12 | - name: External Trigger 13 | if: github.ref == 'refs/heads/noble' 14 | env: 15 | SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }} 16 | run: | 17 | printf "# External trigger for docker-baseimage-ubuntu\n\n" >> $GITHUB_STEP_SUMMARY 18 | echo "Type is \`os\`" >> $GITHUB_STEP_SUMMARY 19 | echo "No external release, exiting" >> $GITHUB_STEP_SUMMARY 20 | exit 0 21 | if grep -q "^baseimage-ubuntu_noble_${EXT_RELEASE}" <<< "${SKIP_EXTERNAL_TRIGGER}"; then 22 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 23 | echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` matches current external release; skipping trigger." >> $GITHUB_STEP_SUMMARY 24 | exit 0 25 | fi 26 | -------------------------------------------------------------------------------- /.github/workflows/external_trigger_scheduler.yml: -------------------------------------------------------------------------------- 1 | name: External Trigger Scheduler 2 | 3 | on: 4 | schedule: 5 | - cron: '18 * * * *' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | external-trigger-scheduler: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4.1.1 13 | with: 14 | fetch-depth: '0' 15 | 16 | - name: External Trigger Scheduler 17 | run: | 18 | printf "# External trigger scheduler for docker-baseimage-ubuntu\n\n" >> $GITHUB_STEP_SUMMARY 19 | printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY 20 | for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) 21 | do 22 | if [[ "${br}" == "HEAD" ]]; then 23 | printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY 24 | continue 25 | fi 26 | printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY 27 | ls_jenkins_vars=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/${br}/jenkins-vars.yml) 28 | ls_branch=$(echo "${ls_jenkins_vars}" | yq -r '.ls_branch') 29 | ls_trigger=$(echo "${ls_jenkins_vars}" | yq -r '.external_type') 30 | if [[ "${br}" == "${ls_branch}" ]] && [[ "${ls_trigger}" != "os" ]]; then 31 | echo "Branch appears to be live and trigger is not os; checking workflow." >> $GITHUB_STEP_SUMMARY 32 | if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then 33 | echo "Triggering external trigger workflow for branch." >> $GITHUB_STEP_SUMMARY 34 | curl -iX POST \ 35 | -H "Authorization: token ${{ secrets.CR_PAT }}" \ 36 | -H "Accept: application/vnd.github.v3+json" \ 37 | -d "{\"ref\":\"refs/heads/${br}\"}" \ 38 | https://api.github.com/repos/linuxserver/docker-baseimage-ubuntu/actions/workflows/external_trigger.yml/dispatches 39 | else 40 | echo "Skipping branch due to no external trigger workflow present." >> $GITHUB_STEP_SUMMARY 41 | fi 42 | else 43 | echo "Skipping branch due to being detected as dev branch or having no external version." >> $GITHUB_STEP_SUMMARY 44 | fi 45 | done 46 | -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/first-interaction@v1 10 | with: 11 | issue-message: 'Thanks for opening your first issue here! Be sure to follow the relevant issue templates, or risk having this issue marked as invalid.' 12 | pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-baseimage-ubuntu/blob/noble/.github/PULL_REQUEST_TEMPLATE.md)!' 13 | repo-token: ${{ secrets.GITHUB_TOKEN }} 14 | -------------------------------------------------------------------------------- /.github/workflows/package_trigger_scheduler.yml: -------------------------------------------------------------------------------- 1 | name: Package Trigger Scheduler 2 | 3 | on: 4 | schedule: 5 | - cron: '19 23 * * 2' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | package-trigger-scheduler: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v4.1.1 13 | with: 14 | fetch-depth: '0' 15 | 16 | - name: Package Trigger Scheduler 17 | env: 18 | SKIP_PACKAGE_TRIGGER: ${{ vars.SKIP_PACKAGE_TRIGGER }} 19 | run: | 20 | printf "# Package trigger scheduler for docker-baseimage-ubuntu\n\n" >> $GITHUB_STEP_SUMMARY 21 | printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY 22 | for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) 23 | do 24 | if [[ "${br}" == "HEAD" ]]; then 25 | printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY 26 | continue 27 | fi 28 | printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY 29 | JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/${br}/jenkins-vars.yml) 30 | if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/${br}/Jenkinsfile >/dev/null 2>&1; then 31 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 32 | echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY 33 | skipped_branches="${skipped_branches}${br} " 34 | elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then 35 | echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY 36 | README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/${br}/readme-vars.yml) 37 | if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then 38 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 39 | echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY 40 | skipped_branches="${skipped_branches}${br} " 41 | elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then 42 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 43 | echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY 44 | skipped_branches="${skipped_branches}${br} " 45 | elif grep -q "^baseimage-ubuntu_${br}" <<< "${SKIP_PACKAGE_TRIGGER}"; then 46 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 47 | echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`baseimage-ubuntu_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY 48 | skipped_branches="${skipped_branches}${br} " 49 | elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-baseimage-ubuntu/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then 50 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 51 | echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY 52 | skipped_branches="${skipped_branches}${br} " 53 | else 54 | echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY 55 | echo "> Triggering package trigger for branch ${br}" >> $GITHUB_STEP_SUMMARY 56 | printf "> To disable, add \`baseimage-ubuntu_%s\` into the Github organizational variable \`SKIP_PACKAGE_TRIGGER\`.\n\n" "${br}" >> $GITHUB_STEP_SUMMARY 57 | triggered_branches="${triggered_branches}${br} " 58 | response=$(curl -iX POST \ 59 | https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-baseimage-ubuntu/job/${br}/buildWithParameters?PACKAGE_CHECK=true \ 60 | --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") 61 | if [[ -z "${response}" ]]; then 62 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 63 | echo "> Jenkins build could not be triggered. Skipping branch." 64 | continue 65 | fi 66 | echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY 67 | echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY 68 | sleep 10 69 | buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') 70 | buildurl="${buildurl%$'\r'}" 71 | echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY 72 | echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY 73 | if ! curl -ifX POST \ 74 | "${buildurl}submitDescription" \ 75 | --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ 76 | --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ 77 | --data-urlencode "Submit=Submit"; then 78 | echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY 79 | echo "> Unable to change the Jenkins job description." 80 | fi 81 | sleep 20 82 | fi 83 | else 84 | echo "Skipping branch ${br} due to being detected as dev branch." >> $GITHUB_STEP_SUMMARY 85 | fi 86 | done 87 | if [[ -n "${triggered_branches}" ]] || [[ -n "${skipped_branches}" ]]; then 88 | if [[ -n "${triggered_branches}" ]]; then 89 | NOTIFY_BRANCHES="**Triggered:** ${triggered_branches} \n" 90 | NOTIFY_BUILD_URL="**Build URL:** https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-baseimage-ubuntu/activity/ \n" 91 | echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****" 92 | fi 93 | if [[ -n "${skipped_branches}" ]]; then 94 | NOTIFY_BRANCHES="${NOTIFY_BRANCHES}**Skipped:** ${skipped_branches} \n" 95 | fi 96 | echo "**** Notifying Discord ****" 97 | curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, 98 | "description": "**Package Check Build(s) for baseimage-ubuntu** \n'"${NOTIFY_BRANCHES}"''"${NOTIFY_BUILD_URL}"'"}], 99 | "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} 100 | fi 101 | -------------------------------------------------------------------------------- /.github/workflows/permissions.yml: -------------------------------------------------------------------------------- 1 | name: Permission check 2 | on: 3 | pull_request_target: 4 | paths: 5 | - '**/run' 6 | - '**/finish' 7 | - '**/check' 8 | - 'root/migrations/*' 9 | 10 | jobs: 11 | permission_check: 12 | uses: linuxserver/github-workflows/.github/workflows/init-svc-executable-permissions.yml@v1 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | 8 | # Recycle Bin used on file shares 9 | $RECYCLE.BIN/ 10 | 11 | # Windows Installer files 12 | *.cab 13 | *.msi 14 | *.msm 15 | *.msp 16 | 17 | # Windows shortcuts 18 | *.lnk 19 | 20 | # ========================= 21 | # Operating System Files 22 | # ========================= 23 | 24 | # OSX 25 | # ========================= 26 | 27 | .DS_Store 28 | .AppleDouble 29 | .LSOverride 30 | 31 | # Thumbnails 32 | ._* 33 | 34 | # Files that might appear on external disk 35 | .Spotlight-V100 36 | .Trashes 37 | 38 | # Directories potentially created on remote AFP share 39 | .AppleDB 40 | .AppleDesktop 41 | Network Trash Folder 42 | Temporary Items 43 | .apdisk 44 | .jenkins-external 45 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3 AS rootfs-stage 4 | 5 | # environment 6 | ENV REL=noble 7 | ENV ARCH=amd64 8 | ENV TAG=oci-noble-24.04 9 | 10 | # install packages 11 | RUN \ 12 | apk add --no-cache \ 13 | bash \ 14 | curl \ 15 | git \ 16 | jq \ 17 | tzdata \ 18 | xz 19 | 20 | # grab base tarball 21 | RUN \ 22 | git clone --depth=1 https://git.launchpad.net/cloud-images/+oci/ubuntu-base -b ${TAG} /build && \ 23 | cd /build/oci && \ 24 | DIGEST=$(jq -r '.manifests[0].digest[7:]' < index.json) && \ 25 | cd /build/oci/blobs/sha256 && \ 26 | if jq -e '.layers // empty' < "${DIGEST}" >/dev/null 2>&1; then \ 27 | TARBALL=$(jq -r '.layers[0].digest[7:]' < ${DIGEST}); \ 28 | else \ 29 | MULTIDIGEST=$(jq -r ".manifests[] | select(.platform.architecture == \"${ARCH}\") | .digest[7:]" < ${DIGEST}) && \ 30 | TARBALL=$(jq -r '.layers[0].digest[7:]' < ${MULTIDIGEST}); \ 31 | fi && \ 32 | mkdir /root-out && \ 33 | tar xf \ 34 | ${TARBALL} -C \ 35 | /root-out && \ 36 | rm -rf \ 37 | /root-out/var/log/* \ 38 | /root-out/home/ubuntu \ 39 | /root-out/root/{.ssh,.bashrc,.profile} \ 40 | /build 41 | 42 | # set version for s6 overlay 43 | ARG S6_OVERLAY_VERSION="3.2.0.2" 44 | ARG S6_OVERLAY_ARCH="x86_64" 45 | 46 | # add s6 overlay 47 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 48 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz 49 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp 50 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz 51 | 52 | # add s6 optional symlinks 53 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 54 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv 55 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 56 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 57 | 58 | # Runtime stage 59 | FROM scratch 60 | COPY --from=rootfs-stage /root-out/ / 61 | ARG BUILD_DATE 62 | ARG VERSION 63 | ARG MODS_VERSION="v3" 64 | ARG PKG_INST_VERSION="v1" 65 | ARG LSIOWN_VERSION="v1" 66 | ARG WITHCONTENV_VERSION="v1" 67 | LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" 68 | LABEL maintainer="TheLamer" 69 | 70 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" 71 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" 72 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" 73 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" 74 | 75 | # set environment variables 76 | ARG DEBIAN_FRONTEND="noninteractive" 77 | ENV HOME="/root" \ 78 | LANGUAGE="en_US.UTF-8" \ 79 | LANG="en_US.UTF-8" \ 80 | TERM="xterm" \ 81 | S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ 82 | S6_VERBOSITY=1 \ 83 | S6_STAGE2_HOOK=/docker-mods \ 84 | VIRTUAL_ENV=/lsiopy \ 85 | PATH="/lsiopy/bin:$PATH" 86 | 87 | # copy sources 88 | COPY sources.list /etc/apt/ 89 | 90 | RUN \ 91 | echo "**** Ripped from Ubuntu Docker Logic ****" && \ 92 | rm -f /etc/apt/sources.list.d/ubuntu.sources && \ 93 | set -xe && \ 94 | echo '#!/bin/sh' \ 95 | > /usr/sbin/policy-rc.d && \ 96 | echo 'exit 101' \ 97 | >> /usr/sbin/policy-rc.d && \ 98 | chmod +x \ 99 | /usr/sbin/policy-rc.d && \ 100 | dpkg-divert --local --rename --add /sbin/initctl && \ 101 | cp -a \ 102 | /usr/sbin/policy-rc.d \ 103 | /sbin/initctl && \ 104 | sed -i \ 105 | 's/^exit.*/exit 0/' \ 106 | /sbin/initctl && \ 107 | echo 'force-unsafe-io' \ 108 | > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup && \ 109 | echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' \ 110 | > /etc/apt/apt.conf.d/docker-clean && \ 111 | echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' \ 112 | >> /etc/apt/apt.conf.d/docker-clean && \ 113 | echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' \ 114 | >> /etc/apt/apt.conf.d/docker-clean && \ 115 | echo 'Acquire::Languages "none";' \ 116 | > /etc/apt/apt.conf.d/docker-no-languages && \ 117 | echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' \ 118 | > /etc/apt/apt.conf.d/docker-gzip-indexes && \ 119 | echo 'Apt::AutoRemove::SuggestsImportant "false";' \ 120 | > /etc/apt/apt.conf.d/docker-autoremove-suggests && \ 121 | mkdir -p /run/systemd && \ 122 | echo 'docker' \ 123 | > /run/systemd/container && \ 124 | echo "**** install apt-utils and locales ****" && \ 125 | apt-get update && \ 126 | apt-get upgrade -y && \ 127 | apt-get install -y \ 128 | apt-utils \ 129 | locales && \ 130 | echo "**** install packages ****" && \ 131 | apt-get install -y \ 132 | catatonit \ 133 | cron \ 134 | curl \ 135 | gnupg \ 136 | jq \ 137 | netcat-openbsd \ 138 | systemd-standalone-sysusers \ 139 | tzdata && \ 140 | echo "**** generate locale ****" && \ 141 | locale-gen en_US.UTF-8 && \ 142 | echo "**** create abc user and make our folders ****" && \ 143 | useradd -u 911 -U -d /config -s /bin/false abc && \ 144 | usermod -G users abc && \ 145 | mkdir -p \ 146 | /app \ 147 | /config \ 148 | /defaults \ 149 | /lsiopy && \ 150 | echo "**** cleanup ****" && \ 151 | userdel ubuntu && \ 152 | apt-get autoremove && \ 153 | apt-get clean && \ 154 | rm -rf \ 155 | /tmp/* \ 156 | /var/lib/apt/lists/* \ 157 | /var/tmp/* \ 158 | /var/log/* 159 | 160 | # add local files 161 | COPY root/ / 162 | 163 | ENTRYPOINT ["/init"] 164 | -------------------------------------------------------------------------------- /Dockerfile.aarch64: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | FROM alpine:3 AS rootfs-stage 4 | 5 | # environment 6 | ENV REL=noble 7 | ENV ARCH=arm64 8 | ENV TAG=oci-noble-24.04 9 | 10 | # install packages 11 | RUN \ 12 | apk add --no-cache \ 13 | bash \ 14 | curl \ 15 | git \ 16 | jq \ 17 | tzdata \ 18 | xz 19 | 20 | # grab base tarball 21 | RUN \ 22 | git clone --depth=1 https://git.launchpad.net/cloud-images/+oci/ubuntu-base -b ${TAG} /build && \ 23 | cd /build/oci && \ 24 | DIGEST=$(jq -r '.manifests[0].digest[7:]' < index.json) && \ 25 | cd /build/oci/blobs/sha256 && \ 26 | if jq -e '.layers // empty' < "${DIGEST}" >/dev/null 2>&1; then \ 27 | TARBALL=$(jq -r '.layers[0].digest[7:]' < ${DIGEST}); \ 28 | else \ 29 | MULTIDIGEST=$(jq -r ".manifests[] | select(.platform.architecture == \"${ARCH}\") | .digest[7:]" < ${DIGEST}) && \ 30 | TARBALL=$(jq -r '.layers[0].digest[7:]' < ${MULTIDIGEST}); \ 31 | fi && \ 32 | mkdir /root-out && \ 33 | tar xf \ 34 | ${TARBALL} -C \ 35 | /root-out && \ 36 | rm -rf \ 37 | /root-out/var/log/* \ 38 | /root-out/home/ubuntu \ 39 | /root-out/root/{.ssh,.bashrc,.profile} \ 40 | /build 41 | 42 | # set version for s6 overlay 43 | ARG S6_OVERLAY_VERSION="3.2.0.2" 44 | ARG S6_OVERLAY_ARCH="aarch64" 45 | 46 | # add s6 overlay 47 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-noarch.tar.xz /tmp 48 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-noarch.tar.xz 49 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz /tmp 50 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-${S6_OVERLAY_ARCH}.tar.xz 51 | 52 | # add s6 optional symlinks 53 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-noarch.tar.xz /tmp 54 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-noarch.tar.xz && unlink /root-out/usr/bin/with-contenv 55 | ADD https://github.com/just-containers/s6-overlay/releases/download/v${S6_OVERLAY_VERSION}/s6-overlay-symlinks-arch.tar.xz /tmp 56 | RUN tar -C /root-out -Jxpf /tmp/s6-overlay-symlinks-arch.tar.xz 57 | 58 | # Runtime stage 59 | FROM scratch 60 | COPY --from=rootfs-stage /root-out/ / 61 | ARG BUILD_DATE 62 | ARG VERSION 63 | ARG MODS_VERSION="v3" 64 | ARG PKG_INST_VERSION="v1" 65 | ARG LSIOWN_VERSION="v1" 66 | ARG WITHCONTENV_VERSION="v1" 67 | LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" 68 | LABEL maintainer="TheLamer" 69 | 70 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/docker-mods.${MODS_VERSION}" "/docker-mods" 71 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/package-install.${PKG_INST_VERSION}" "/etc/s6-overlay/s6-rc.d/init-mods-package-install/run" 72 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/lsiown.${LSIOWN_VERSION}" "/usr/bin/lsiown" 73 | ADD --chmod=755 "https://raw.githubusercontent.com/linuxserver/docker-mods/mod-scripts/with-contenv.${WITHCONTENV_VERSION}" "/usr/bin/with-contenv" 74 | 75 | # set environment variables 76 | ARG DEBIAN_FRONTEND="noninteractive" 77 | ENV HOME="/root" \ 78 | LANGUAGE="en_US.UTF-8" \ 79 | LANG="en_US.UTF-8" \ 80 | TERM="xterm" \ 81 | S6_CMD_WAIT_FOR_SERVICES_MAXTIME="0" \ 82 | S6_VERBOSITY=1 \ 83 | S6_STAGE2_HOOK=/docker-mods \ 84 | VIRTUAL_ENV=/lsiopy \ 85 | PATH="/lsiopy/bin:$PATH" 86 | 87 | # copy sources 88 | COPY sources.list.arm /etc/apt/sources.list 89 | 90 | RUN \ 91 | echo "**** Ripped from Ubuntu Docker Logic ****" && \ 92 | rm -f /etc/apt/sources.list.d/ubuntu.sources && \ 93 | set -xe && \ 94 | echo '#!/bin/sh' \ 95 | > /usr/sbin/policy-rc.d && \ 96 | echo 'exit 101' \ 97 | >> /usr/sbin/policy-rc.d && \ 98 | chmod +x \ 99 | /usr/sbin/policy-rc.d && \ 100 | dpkg-divert --local --rename --add /sbin/initctl && \ 101 | cp -a \ 102 | /usr/sbin/policy-rc.d \ 103 | /sbin/initctl && \ 104 | sed -i \ 105 | 's/^exit.*/exit 0/' \ 106 | /sbin/initctl && \ 107 | echo 'force-unsafe-io' \ 108 | > /etc/dpkg/dpkg.cfg.d/docker-apt-speedup && \ 109 | echo 'DPkg::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' \ 110 | > /etc/apt/apt.conf.d/docker-clean && \ 111 | echo 'APT::Update::Post-Invoke { "rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true"; };' \ 112 | >> /etc/apt/apt.conf.d/docker-clean && \ 113 | echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' \ 114 | >> /etc/apt/apt.conf.d/docker-clean && \ 115 | echo 'Acquire::Languages "none";' \ 116 | > /etc/apt/apt.conf.d/docker-no-languages && \ 117 | echo 'Acquire::GzipIndexes "true"; Acquire::CompressionTypes::Order:: "gz";' \ 118 | > /etc/apt/apt.conf.d/docker-gzip-indexes && \ 119 | echo 'Apt::AutoRemove::SuggestsImportant "false";' \ 120 | > /etc/apt/apt.conf.d/docker-autoremove-suggests && \ 121 | mkdir -p /run/systemd && \ 122 | echo 'docker' \ 123 | > /run/systemd/container && \ 124 | echo "**** install apt-utils and locales ****" && \ 125 | apt-get update && \ 126 | apt-get upgrade -y && \ 127 | apt-get install -y \ 128 | apt-utils \ 129 | locales && \ 130 | echo "**** install packages ****" && \ 131 | apt-get install -y \ 132 | catatonit \ 133 | cron \ 134 | curl \ 135 | gnupg \ 136 | jq \ 137 | netcat-openbsd \ 138 | systemd-standalone-sysusers \ 139 | tzdata && \ 140 | echo "**** generate locale ****" && \ 141 | locale-gen en_US.UTF-8 && \ 142 | echo "**** create abc user and make our folders ****" && \ 143 | useradd -u 911 -U -d /config -s /bin/false abc && \ 144 | usermod -G users abc && \ 145 | mkdir -p \ 146 | /app \ 147 | /config \ 148 | /defaults \ 149 | /lsiopy && \ 150 | echo "**** cleanup ****" && \ 151 | userdel ubuntu && \ 152 | apt-get autoremove && \ 153 | apt-get clean && \ 154 | rm -rf \ 155 | /tmp/* \ 156 | /var/lib/apt/lists/* \ 157 | /var/tmp/* \ 158 | /var/log/* 159 | 160 | # add local files 161 | COPY root/ / 162 | 163 | ENTRYPOINT ["/init"] 164 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | label 'X86-64-MULTI' 4 | } 5 | options { 6 | buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '60')) 7 | parallelsAlwaysFailFast() 8 | } 9 | // Input to determine if this is a package check 10 | parameters { 11 | string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') 12 | } 13 | // Configuration for the variables used for this specific repo 14 | environment { 15 | BUILDS_DISCORD=credentials('build_webhook_url') 16 | GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab') 17 | GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0') 18 | GITLAB_NAMESPACE=credentials('gitlab-namespace-id') 19 | DOCKERHUB_TOKEN=credentials('docker-hub-ci-pat') 20 | QUAYIO_API_TOKEN=credentials('quayio-repo-api-token') 21 | GIT_SIGNING_KEY=credentials('484fbca6-9a4f-455e-b9e3-97ac98785f5f') 22 | BUILD_VERSION_ARG = 'OS' 23 | LS_USER = 'linuxserver' 24 | LS_REPO = 'docker-baseimage-ubuntu' 25 | CONTAINER_NAME = 'baseimage-ubuntu' 26 | DOCKERHUB_IMAGE = 'lsiobase/ubuntu' 27 | DEV_DOCKERHUB_IMAGE = 'lsiodev/ubuntu' 28 | PR_DOCKERHUB_IMAGE = 'lspipepr/ubuntu' 29 | DIST_IMAGE = 'ubuntu' 30 | MULTIARCH='true' 31 | CI='true' 32 | CI_WEB='false' 33 | CI_PORT='80' 34 | CI_SSL='true' 35 | CI_DELAY='30' 36 | CI_DOCKERENV='LSIO_FIRST_PARTY=true' 37 | CI_AUTH='' 38 | CI_WEBPATH='' 39 | } 40 | stages { 41 | stage("Set git config"){ 42 | steps{ 43 | sh '''#!/bin/bash 44 | cat ${GIT_SIGNING_KEY} > /config/.ssh/id_sign 45 | chmod 600 /config/.ssh/id_sign 46 | ssh-keygen -y -f /config/.ssh/id_sign > /config/.ssh/id_sign.pub 47 | echo "Using $(ssh-keygen -lf /config/.ssh/id_sign) to sign commits" 48 | git config --global gpg.format ssh 49 | git config --global user.signingkey /config/.ssh/id_sign 50 | git config --global commit.gpgsign true 51 | ''' 52 | } 53 | } 54 | // Setup all the basic environment variables needed for the build 55 | stage("Set ENV Variables base"){ 56 | steps{ 57 | echo "Running on node: ${NODE_NAME}" 58 | sh '''#! /bin/bash 59 | echo "Pruning builder" 60 | docker builder prune -f --builder container || : 61 | containers=$(docker ps -q) 62 | if [[ -n "${containers}" ]]; then 63 | BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') 64 | for container in ${containers}; do 65 | if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then 66 | echo "skipping buildx container in docker stop" 67 | else 68 | echo "Stopping container ${container}" 69 | docker stop ${container} 70 | fi 71 | done 72 | fi 73 | docker system prune -f --volumes || : 74 | docker image prune -af || : 75 | ''' 76 | script{ 77 | env.EXIT_STATUS = '' 78 | env.LS_RELEASE = sh( 79 | script: '''docker run --rm quay.io/skopeo/stable:v1 inspect docker://ghcr.io/${LS_USER}/${CONTAINER_NAME}:noble 2>/dev/null | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', 80 | returnStdout: true).trim() 81 | env.LS_RELEASE_NOTES = sh( 82 | script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''', 83 | returnStdout: true).trim() 84 | env.GITHUB_DATE = sh( 85 | script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', 86 | returnStdout: true).trim() 87 | env.COMMIT_SHA = sh( 88 | script: '''git rev-parse HEAD''', 89 | returnStdout: true).trim() 90 | env.GH_DEFAULT_BRANCH = sh( 91 | script: '''git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||' ''', 92 | returnStdout: true).trim() 93 | env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT 94 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' 95 | env.PULL_REQUEST = env.CHANGE_ID 96 | env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml' 97 | } 98 | sh '''#! /bin/bash 99 | echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" ''' 100 | script{ 101 | env.LS_RELEASE_NUMBER = sh( 102 | script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''', 103 | returnStdout: true).trim() 104 | } 105 | script{ 106 | env.LS_TAG_NUMBER = sh( 107 | script: '''#! /bin/bash 108 | tagsha=$(git rev-list -n 1 noble-${LS_RELEASE} 2>/dev/null) 109 | if [ "${tagsha}" == "${COMMIT_SHA}" ]; then 110 | echo ${LS_RELEASE_NUMBER} 111 | elif [ -z "${GIT_COMMIT}" ]; then 112 | echo ${LS_RELEASE_NUMBER} 113 | else 114 | echo $((${LS_RELEASE_NUMBER} + 1)) 115 | fi''', 116 | returnStdout: true).trim() 117 | } 118 | } 119 | } 120 | /* ####################### 121 | Package Version Tagging 122 | ####################### */ 123 | // Grab the current package versions in Git to determine package tag 124 | stage("Set Package tag"){ 125 | steps{ 126 | script{ 127 | env.PACKAGE_TAG = sh( 128 | script: '''#!/bin/bash 129 | if [ -e package_versions.txt ] ; then 130 | cat package_versions.txt | md5sum | cut -c1-8 131 | else 132 | echo none 133 | fi''', 134 | returnStdout: true).trim() 135 | } 136 | } 137 | } 138 | /* ######################## 139 | External Release Tagging 140 | ######################## */ 141 | // If this is an os release set release type to none to indicate no external release 142 | stage("Set ENV os"){ 143 | steps{ 144 | script{ 145 | env.EXT_RELEASE = env.PACKAGE_TAG 146 | env.RELEASE_LINK = 'none' 147 | } 148 | } 149 | } 150 | // Sanitize the release tag and strip illegal docker or github characters 151 | stage("Sanitize tag"){ 152 | steps{ 153 | script{ 154 | env.EXT_RELEASE_CLEAN = sh( 155 | script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/ ]//g' ''', 156 | returnStdout: true).trim() 157 | 158 | def semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)\.(\d+)/ 159 | if (semver.find()) { 160 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" 161 | } else { 162 | semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)(?:\.(\d+))?(.*)/ 163 | if (semver.find()) { 164 | if (semver[0][3]) { 165 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" 166 | } else if (!semver[0][3] && !semver[0][4]) { 167 | env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${(new Date()).format('YYYYMMdd')}" 168 | } 169 | } 170 | } 171 | 172 | if (env.SEMVER != null) { 173 | if (BRANCH_NAME != "${env.GH_DEFAULT_BRANCH}") { 174 | env.SEMVER = "${env.SEMVER}-${BRANCH_NAME}" 175 | } 176 | println("SEMVER: ${env.SEMVER}") 177 | } else { 178 | println("No SEMVER detected") 179 | } 180 | 181 | } 182 | } 183 | } 184 | // If this is a noble build use live docker endpoints 185 | stage("Set ENV live build"){ 186 | when { 187 | branch "noble" 188 | environment name: 'CHANGE_ID', value: '' 189 | } 190 | steps { 191 | script{ 192 | env.IMAGE = env.DOCKERHUB_IMAGE 193 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME 194 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME 195 | env.QUAYIMAGE = 'quay.io/linuxserver.io/' + env.CONTAINER_NAME 196 | if (env.MULTIARCH == 'true') { 197 | env.CI_TAGS = 'amd64-noble-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-noble-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 198 | } else { 199 | env.CI_TAGS = 'noble-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 200 | } 201 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 202 | env.META_TAG = 'noble-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 203 | env.EXT_RELEASE_TAG = 'noble-version-' + env.EXT_RELEASE_CLEAN 204 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 205 | } 206 | } 207 | } 208 | // If this is a dev build use dev docker endpoints 209 | stage("Set ENV dev build"){ 210 | when { 211 | not {branch "noble"} 212 | environment name: 'CHANGE_ID', value: '' 213 | } 214 | steps { 215 | script{ 216 | env.IMAGE = env.DEV_DOCKERHUB_IMAGE 217 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME 218 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME 219 | env.QUAYIMAGE = 'quay.io/linuxserver.io/lsiodev-' + env.CONTAINER_NAME 220 | if (env.MULTIARCH == 'true') { 221 | env.CI_TAGS = 'amd64-noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 222 | } else { 223 | env.CI_TAGS = 'noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 224 | } 225 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 226 | env.META_TAG = 'noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA 227 | env.EXT_RELEASE_TAG = 'noble-version-' + env.EXT_RELEASE_CLEAN 228 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' 229 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 230 | } 231 | } 232 | } 233 | // If this is a pull request build use dev docker endpoints 234 | stage("Set ENV PR build"){ 235 | when { 236 | not {environment name: 'CHANGE_ID', value: ''} 237 | } 238 | steps { 239 | script{ 240 | env.IMAGE = env.PR_DOCKERHUB_IMAGE 241 | env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME 242 | env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME 243 | env.QUAYIMAGE = 'quay.io/linuxserver.io/lspipepr-' + env.CONTAINER_NAME 244 | if (env.MULTIARCH == 'true') { 245 | env.CI_TAGS = 'amd64-noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + '|arm64v8-noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 246 | } else { 247 | env.CI_TAGS = 'noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 248 | } 249 | env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 250 | env.META_TAG = 'noble-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST 251 | env.EXT_RELEASE_TAG = 'noble-version-' + env.EXT_RELEASE_CLEAN 252 | env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST 253 | env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' 254 | env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' 255 | } 256 | } 257 | } 258 | // Run ShellCheck 259 | stage('ShellCheck') { 260 | when { 261 | environment name: 'CI', value: 'true' 262 | } 263 | steps { 264 | withCredentials([ 265 | string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), 266 | string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET') 267 | ]) { 268 | script{ 269 | env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' 270 | } 271 | sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-jenkins-builder/master/checkrun.sh | /bin/bash''' 272 | sh '''#! /bin/bash 273 | docker run --rm \ 274 | -v ${WORKSPACE}:/mnt \ 275 | -e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \ 276 | -e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \ 277 | ghcr.io/linuxserver/baseimage-alpine:3.20 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\ 278 | apk add --no-cache python3 && \ 279 | python3 -m venv /lsiopy && \ 280 | pip install --no-cache-dir -U pip && \ 281 | pip install --no-cache-dir s3cmd && \ 282 | s3cmd put --no-preserve --acl-public -m text/xml /mnt/shellcheck-result.xml s3://ci-tests.linuxserver.io/${IMAGE}/${META_TAG}/shellcheck-result.xml" || :''' 283 | } 284 | } 285 | } 286 | // Use helper containers to render templated files 287 | stage('Update-Templates') { 288 | when { 289 | branch "noble" 290 | environment name: 'CHANGE_ID', value: '' 291 | expression { 292 | env.CONTAINER_NAME != null 293 | } 294 | } 295 | steps { 296 | sh '''#! /bin/bash 297 | set -e 298 | TEMPDIR=$(mktemp -d) 299 | docker pull ghcr.io/linuxserver/jenkins-builder:latest 300 | # Cloned repo paths for templating: 301 | # ${TEMPDIR}/docker-${CONTAINER_NAME}: Cloned branch noble of ${LS_USER}/${LS_REPO} for running the jenkins builder on 302 | # ${TEMPDIR}/repo/${LS_REPO}: Cloned branch noble of ${LS_USER}/${LS_REPO} for commiting various templated file changes and pushing back to Github 303 | # ${TEMPDIR}/docs/docker-documentation: Cloned docs repo for pushing docs updates to Github 304 | # ${TEMPDIR}/unraid/docker-templates: Cloned docker-templates repo to check for logos 305 | # ${TEMPDIR}/unraid/templates: Cloned templates repo for commiting unraid template changes and pushing back to Github 306 | git clone --branch noble --depth 1 https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/docker-${CONTAINER_NAME} 307 | docker run --rm -v ${TEMPDIR}/docker-${CONTAINER_NAME}:/tmp -e LOCAL=true -e PUID=$(id -u) -e PGID=$(id -g) ghcr.io/linuxserver/jenkins-builder:latest 308 | echo "Starting Stage 1 - Jenkinsfile update" 309 | if [[ "$(md5sum Jenkinsfile | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile | awk '{ print $1 }')" ]]; then 310 | mkdir -p ${TEMPDIR}/repo 311 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 312 | cd ${TEMPDIR}/repo/${LS_REPO} 313 | git checkout -f noble 314 | cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/ 315 | git add Jenkinsfile 316 | git commit -m 'Bot Updating Templated Files' 317 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 318 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 319 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 320 | echo "Updating Jenkinsfile and exiting build, new one will trigger based on commit" 321 | rm -Rf ${TEMPDIR} 322 | exit 0 323 | else 324 | echo "Jenkinsfile is up to date." 325 | fi 326 | echo "Starting Stage 2 - Delete old templates" 327 | OLD_TEMPLATES=".github/ISSUE_TEMPLATE.md .github/ISSUE_TEMPLATE/issue.bug.md .github/ISSUE_TEMPLATE/issue.feature.md .github/workflows/call_invalid_helper.yml .github/workflows/stale.yml .github/workflows/package_trigger.yml" 328 | for i in ${OLD_TEMPLATES}; do 329 | if [[ -f "${i}" ]]; then 330 | TEMPLATES_TO_DELETE="${i} ${TEMPLATES_TO_DELETE}" 331 | fi 332 | done 333 | if [[ -n "${TEMPLATES_TO_DELETE}" ]]; then 334 | mkdir -p ${TEMPDIR}/repo 335 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 336 | cd ${TEMPDIR}/repo/${LS_REPO} 337 | git checkout -f noble 338 | for i in ${TEMPLATES_TO_DELETE}; do 339 | git rm "${i}" 340 | done 341 | git commit -m 'Bot Updating Templated Files' 342 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 343 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 344 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 345 | echo "Deleting old/deprecated templates and exiting build, new one will trigger based on commit" 346 | rm -Rf ${TEMPDIR} 347 | exit 0 348 | else 349 | echo "No templates to delete" 350 | fi 351 | echo "Starting Stage 3 - Update templates" 352 | CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) 353 | cd ${TEMPDIR}/docker-${CONTAINER_NAME} 354 | NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) 355 | if [[ "${CURRENTHASH}" != "${NEWHASH}" ]] || ! grep -q '.jenkins-external' "${WORKSPACE}/.gitignore" 2>/dev/null; then 356 | mkdir -p ${TEMPDIR}/repo 357 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} 358 | cd ${TEMPDIR}/repo/${LS_REPO} 359 | git checkout -f noble 360 | cd ${TEMPDIR}/docker-${CONTAINER_NAME} 361 | mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows 362 | mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE 363 | cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || : 364 | cp --parents readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/ || : 365 | cd ${TEMPDIR}/repo/${LS_REPO}/ 366 | if ! grep -q '.jenkins-external' .gitignore 2>/dev/null; then 367 | echo ".jenkins-external" >> .gitignore 368 | git add .gitignore 369 | fi 370 | git add readme-vars.yml ${TEMPLATED_FILES} 371 | git commit -m 'Bot Updating Templated Files' 372 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 373 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 374 | echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 375 | echo "Updating templates and exiting build, new one will trigger based on commit" 376 | rm -Rf ${TEMPDIR} 377 | exit 0 378 | else 379 | echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} 380 | echo "No templates to update" 381 | fi 382 | echo "Starting Stage 4 - External repo updates: Docs, Unraid Template and Readme Sync to Docker Hub" 383 | mkdir -p ${TEMPDIR}/docs 384 | git clone --depth=1 https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/docs/docker-documentation 385 | if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then 386 | cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md ${TEMPDIR}/docs/docker-documentation/docs/images/ 387 | cd ${TEMPDIR}/docs/docker-documentation 388 | GH_DOCS_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') 389 | git add docs/images/docker-${CONTAINER_NAME}.md 390 | echo "Updating docs repo" 391 | git commit -m 'Bot Updating Documentation' 392 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase 393 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} || \ 394 | (MAXWAIT="10" && echo "Push to docs failed, trying again in ${MAXWAIT} seconds" && \ 395 | sleep $((RANDOM % MAXWAIT)) && \ 396 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase && \ 397 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH}) 398 | else 399 | echo "Docs update not needed, skipping" 400 | fi 401 | if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]]; then 402 | if [[ $(cat ${TEMPDIR}/docker-${CONTAINER_NAME}/README.md | wc -m) -gt 25000 ]]; then 403 | echo "Readme is longer than 25,000 characters. Syncing the lite version to Docker Hub" 404 | DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/README.lite" 405 | else 406 | echo "Syncing readme to Docker Hub" 407 | DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/README.md" 408 | fi 409 | if curl -s https://hub.docker.com/v2/namespaces/${DOCKERHUB_IMAGE%%/*}/repositories/${DOCKERHUB_IMAGE##*/}/tags | jq -r '.message' | grep -q 404; then 410 | echo "Docker Hub endpoint doesn't exist. Creating endpoint first." 411 | DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') 412 | curl -s \ 413 | -H "Authorization: JWT ${DH_TOKEN}" \ 414 | -H "Content-Type: application/json" \ 415 | -X POST \ 416 | -d '{"name":"'${DOCKERHUB_IMAGE##*/}'", "namespace":"'${DOCKERHUB_IMAGE%%/*}'"}' \ 417 | https://hub.docker.com/v2/repositories/ || : 418 | fi 419 | DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') 420 | curl -s \ 421 | -H "Authorization: JWT ${DH_TOKEN}" \ 422 | -H "Content-Type: application/json" \ 423 | -X PATCH \ 424 | -d "{\\"full_description\\":$(jq -Rsa . ${DH_README_SYNC_PATH})}" \ 425 | https://hub.docker.com/v2/repositories/${DOCKERHUB_IMAGE} || : 426 | else 427 | echo "Not the default Github branch. Skipping readme sync to Docker Hub." 428 | fi 429 | rm -Rf ${TEMPDIR}''' 430 | script{ 431 | env.FILES_UPDATED = sh( 432 | script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''', 433 | returnStdout: true).trim() 434 | } 435 | } 436 | } 437 | // Exit the build if the Templated files were just updated 438 | stage('Template-exit') { 439 | when { 440 | branch "noble" 441 | environment name: 'CHANGE_ID', value: '' 442 | environment name: 'FILES_UPDATED', value: 'true' 443 | expression { 444 | env.CONTAINER_NAME != null 445 | } 446 | } 447 | steps { 448 | script{ 449 | env.EXIT_STATUS = 'ABORTED' 450 | } 451 | } 452 | } 453 | // If this is a noble build check the S6 service file perms 454 | stage("Check S6 Service file Permissions"){ 455 | when { 456 | branch "noble" 457 | environment name: 'CHANGE_ID', value: '' 458 | environment name: 'EXIT_STATUS', value: '' 459 | } 460 | steps { 461 | script{ 462 | sh '''#! /bin/bash 463 | WRONG_PERM=$(find ./ -path "./.git" -prune -o \\( -name "run" -o -name "finish" -o -name "check" \\) -not -perm -u=x,g=x,o=x -print) 464 | if [[ -n "${WRONG_PERM}" ]]; then 465 | echo "The following S6 service files are missing the executable bit; canceling the faulty build: ${WRONG_PERM}" 466 | exit 1 467 | else 468 | echo "S6 service file perms look good." 469 | fi ''' 470 | } 471 | } 472 | } 473 | /* ####################### 474 | GitLab Mirroring and Quay.io Repo Visibility 475 | ####################### */ 476 | // Ping into Gitlab to mirror this repo and have a registry endpoint & mark this repo on Quay.io as public 477 | stage("GitLab Mirror and Quay.io Visibility"){ 478 | when { 479 | environment name: 'EXIT_STATUS', value: '' 480 | } 481 | steps{ 482 | sh '''curl -H "Content-Type: application/json" -H "Private-Token: ${GITLAB_TOKEN}" -X POST https://gitlab.com/api/v4/projects \ 483 | -d '{"namespace_id":'${GITLAB_NAMESPACE}',\ 484 | "name":"'${LS_REPO}'", 485 | "mirror":true,\ 486 | "import_url":"https://github.com/linuxserver/'${LS_REPO}'.git",\ 487 | "issues_access_level":"disabled",\ 488 | "merge_requests_access_level":"disabled",\ 489 | "repository_access_level":"enabled",\ 490 | "visibility":"public"}' ''' 491 | sh '''curl -H "Private-Token: ${GITLAB_TOKEN}" -X PUT "https://gitlab.com/api/v4/projects/Linuxserver.io%2F${LS_REPO}" \ 492 | -d "mirror=true&import_url=https://github.com/linuxserver/${LS_REPO}.git" ''' 493 | sh '''curl -H "Content-Type: application/json" -H "Authorization: Bearer ${QUAYIO_API_TOKEN}" -X POST "https://quay.io/api/v1/repository${QUAYIMAGE/quay.io/}/changevisibility" \ 494 | -d '{"visibility":"public"}' ||: ''' 495 | } 496 | } 497 | /* ############### 498 | Build Container 499 | ############### */ 500 | // Build Docker container for push to LS Repo 501 | stage('Build-Single') { 502 | when { 503 | expression { 504 | env.MULTIARCH == 'false' || params.PACKAGE_CHECK == 'true' 505 | } 506 | environment name: 'EXIT_STATUS', value: '' 507 | } 508 | steps { 509 | echo "Running on node: ${NODE_NAME}" 510 | sh "docker buildx build \ 511 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 512 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 513 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-ubuntu/packages\" \ 514 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-ubuntu\" \ 515 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-ubuntu\" \ 516 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 517 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 518 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 519 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 520 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 521 | --label \"org.opencontainers.image.title=Baseimage-ubuntu\" \ 522 | --label \"org.opencontainers.image.description=baseimage-ubuntu image by linuxserver.io\" \ 523 | --no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \ 524 | --provenance=true --sbom=true --builder=container --load \ 525 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 526 | sh '''#! /bin/bash 527 | set -e 528 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 529 | for i in "${CACHE[@]}"; do 530 | docker tag ${IMAGE}:${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 531 | done 532 | ''' 533 | withCredentials([ 534 | [ 535 | $class: 'UsernamePasswordMultiBinding', 536 | credentialsId: 'Quay.io-Robot', 537 | usernameVariable: 'QUAYUSER', 538 | passwordVariable: 'QUAYPASS' 539 | ] 540 | ]) { 541 | retry_backoff(5,5) { 542 | sh '''#! /bin/bash 543 | set -e 544 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 545 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 546 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 547 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 548 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 549 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 550 | for i in "${CACHE[@]}"; do 551 | docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & 552 | done 553 | for p in $(jobs -p); do 554 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 555 | done 556 | fi 557 | ''' 558 | } 559 | } 560 | } 561 | } 562 | // Build MultiArch Docker containers for push to LS Repo 563 | stage('Build-Multi') { 564 | when { 565 | allOf { 566 | environment name: 'MULTIARCH', value: 'true' 567 | expression { params.PACKAGE_CHECK == 'false' } 568 | } 569 | environment name: 'EXIT_STATUS', value: '' 570 | } 571 | parallel { 572 | stage('Build X86') { 573 | steps { 574 | echo "Running on node: ${NODE_NAME}" 575 | sh "docker buildx build \ 576 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 577 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 578 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-ubuntu/packages\" \ 579 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-ubuntu\" \ 580 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-ubuntu\" \ 581 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 582 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 583 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 584 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 585 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 586 | --label \"org.opencontainers.image.title=Baseimage-ubuntu\" \ 587 | --label \"org.opencontainers.image.description=baseimage-ubuntu image by linuxserver.io\" \ 588 | --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \ 589 | --provenance=true --sbom=true --builder=container --load \ 590 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 591 | sh '''#! /bin/bash 592 | set -e 593 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 594 | for i in "${CACHE[@]}"; do 595 | docker tag ${IMAGE}:amd64-${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 596 | done 597 | ''' 598 | withCredentials([ 599 | [ 600 | $class: 'UsernamePasswordMultiBinding', 601 | credentialsId: 'Quay.io-Robot', 602 | usernameVariable: 'QUAYUSER', 603 | passwordVariable: 'QUAYPASS' 604 | ] 605 | ]) { 606 | retry_backoff(5,5) { 607 | sh '''#! /bin/bash 608 | set -e 609 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 610 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 611 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 612 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 613 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 614 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 615 | for i in "${CACHE[@]}"; do 616 | docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & 617 | done 618 | for p in $(jobs -p); do 619 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 620 | done 621 | fi 622 | ''' 623 | } 624 | } 625 | } 626 | } 627 | stage('Build ARM64') { 628 | agent { 629 | label 'ARM64' 630 | } 631 | steps { 632 | echo "Running on node: ${NODE_NAME}" 633 | sh "docker buildx build \ 634 | --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ 635 | --label \"org.opencontainers.image.authors=linuxserver.io\" \ 636 | --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-baseimage-ubuntu/packages\" \ 637 | --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-baseimage-ubuntu\" \ 638 | --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-baseimage-ubuntu\" \ 639 | --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ 640 | --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ 641 | --label \"org.opencontainers.image.vendor=linuxserver.io\" \ 642 | --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ 643 | --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ 644 | --label \"org.opencontainers.image.title=Baseimage-ubuntu\" \ 645 | --label \"org.opencontainers.image.description=baseimage-ubuntu image by linuxserver.io\" \ 646 | --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \ 647 | --provenance=true --sbom=true --builder=container --load \ 648 | --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." 649 | sh '''#! /bin/bash 650 | set -e 651 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 652 | for i in "${CACHE[@]}"; do 653 | docker tag ${IMAGE}:arm64v8-${META_TAG} ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 654 | done 655 | ''' 656 | withCredentials([ 657 | [ 658 | $class: 'UsernamePasswordMultiBinding', 659 | credentialsId: 'Quay.io-Robot', 660 | usernameVariable: 'QUAYUSER', 661 | passwordVariable: 'QUAYPASS' 662 | ] 663 | ]) { 664 | retry_backoff(5,5) { 665 | sh '''#! /bin/bash 666 | set -e 667 | echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin 668 | echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin 669 | echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin 670 | echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin 671 | if [[ "${PACKAGE_CHECK}" != "true" ]]; then 672 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 673 | for i in "${CACHE[@]}"; do 674 | docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} & 675 | done 676 | for p in $(jobs -p); do 677 | wait "$p" || { echo "job $p failed" >&2; exit 1; } 678 | done 679 | fi 680 | ''' 681 | } 682 | } 683 | sh '''#! /bin/bash 684 | containers=$(docker ps -aq) 685 | if [[ -n "${containers}" ]]; then 686 | docker stop ${containers} 687 | fi 688 | docker system prune -f --volumes || : 689 | docker image prune -af || : 690 | ''' 691 | } 692 | } 693 | } 694 | } 695 | // Take the image we just built and dump package versions for comparison 696 | stage('Update-packages') { 697 | when { 698 | branch "noble" 699 | environment name: 'CHANGE_ID', value: '' 700 | environment name: 'EXIT_STATUS', value: '' 701 | } 702 | steps { 703 | sh '''#! /bin/bash 704 | set -e 705 | TEMPDIR=$(mktemp -d) 706 | if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" != "true" ]; then 707 | LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG} 708 | else 709 | LOCAL_CONTAINER=${IMAGE}:${META_TAG} 710 | fi 711 | touch ${TEMPDIR}/package_versions.txt 712 | docker run --rm \ 713 | -v /var/run/docker.sock:/var/run/docker.sock:ro \ 714 | -v ${TEMPDIR}:/tmp \ 715 | ghcr.io/anchore/syft:latest \ 716 | ${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt 717 | NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) 718 | echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" 719 | if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then 720 | git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO} 721 | git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f noble 722 | cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/ 723 | cd ${TEMPDIR}/${LS_REPO}/ 724 | wait 725 | git add package_versions.txt 726 | git commit -m 'Bot Updating Package Versions' 727 | git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 728 | git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git noble 729 | echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} 730 | echo "Package tag updated, stopping build process" 731 | else 732 | echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} 733 | echo "Package tag is same as previous continue with build process" 734 | fi 735 | rm -Rf ${TEMPDIR}''' 736 | script{ 737 | env.PACKAGE_UPDATED = sh( 738 | script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''', 739 | returnStdout: true).trim() 740 | } 741 | } 742 | } 743 | // Exit the build if the package file was just updated 744 | stage('PACKAGE-exit') { 745 | when { 746 | branch "noble" 747 | environment name: 'CHANGE_ID', value: '' 748 | environment name: 'PACKAGE_UPDATED', value: 'true' 749 | environment name: 'EXIT_STATUS', value: '' 750 | } 751 | steps { 752 | script{ 753 | env.EXIT_STATUS = 'ABORTED' 754 | } 755 | } 756 | } 757 | // Exit the build if this is just a package check and there are no changes to push 758 | stage('PACKAGECHECK-exit') { 759 | when { 760 | branch "noble" 761 | environment name: 'CHANGE_ID', value: '' 762 | environment name: 'PACKAGE_UPDATED', value: 'false' 763 | environment name: 'EXIT_STATUS', value: '' 764 | expression { 765 | params.PACKAGE_CHECK == 'true' 766 | } 767 | } 768 | steps { 769 | script{ 770 | env.EXIT_STATUS = 'ABORTED' 771 | } 772 | } 773 | } 774 | /* ####### 775 | Testing 776 | ####### */ 777 | // Run Container tests 778 | stage('Test') { 779 | when { 780 | environment name: 'CI', value: 'true' 781 | environment name: 'EXIT_STATUS', value: '' 782 | } 783 | steps { 784 | withCredentials([ 785 | string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), 786 | string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET') 787 | ]) { 788 | script{ 789 | env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html' 790 | env.CI_JSON_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/report.json' 791 | } 792 | sh '''#! /bin/bash 793 | set -e 794 | if grep -q 'docker-baseimage' <<< "${LS_REPO}"; then 795 | echo "Detected baseimage, setting LSIO_FIRST_PARTY=true" 796 | if [ -n "${CI_DOCKERENV}" ]; then 797 | CI_DOCKERENV="LSIO_FIRST_PARTY=true|${CI_DOCKERENV}" 798 | else 799 | CI_DOCKERENV="LSIO_FIRST_PARTY=true" 800 | fi 801 | fi 802 | docker pull ghcr.io/linuxserver/ci:latest 803 | if [ "${MULTIARCH}" == "true" ]; then 804 | docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64 805 | docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} 806 | fi 807 | docker run --rm \ 808 | --shm-size=1gb \ 809 | -v /var/run/docker.sock:/var/run/docker.sock \ 810 | -e IMAGE=\"${IMAGE}\" \ 811 | -e DOCKER_LOGS_TIMEOUT=\"${CI_DELAY}\" \ 812 | -e TAGS=\"${CI_TAGS}\" \ 813 | -e META_TAG=\"${META_TAG}\" \ 814 | -e RELEASE_TAG=\"noble\" \ 815 | -e PORT=\"${CI_PORT}\" \ 816 | -e SSL=\"${CI_SSL}\" \ 817 | -e BASE=\"${DIST_IMAGE}\" \ 818 | -e SECRET_KEY=\"${S3_SECRET}\" \ 819 | -e ACCESS_KEY=\"${S3_KEY}\" \ 820 | -e DOCKER_ENV=\"${CI_DOCKERENV}\" \ 821 | -e WEB_SCREENSHOT=\"${CI_WEB}\" \ 822 | -e WEB_AUTH=\"${CI_AUTH}\" \ 823 | -e WEB_PATH=\"${CI_WEBPATH}\" \ 824 | -e NODE_NAME=\"${NODE_NAME}\" \ 825 | -t ghcr.io/linuxserver/ci:latest \ 826 | python3 test_build.py''' 827 | } 828 | } 829 | } 830 | /* ################## 831 | Release Logic 832 | ################## */ 833 | // If this is an amd64 only image only push a single image 834 | stage('Docker-Push-Single') { 835 | when { 836 | environment name: 'MULTIARCH', value: 'false' 837 | environment name: 'EXIT_STATUS', value: '' 838 | } 839 | steps { 840 | retry_backoff(5,5) { 841 | sh '''#! /bin/bash 842 | set -e 843 | for PUSHIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 844 | [[ ${PUSHIMAGE%%/*} =~ \\. ]] && PUSHIMAGEPLUS="${PUSHIMAGE}" || PUSHIMAGEPLUS="docker.io/${PUSHIMAGE}" 845 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 846 | for i in "${CACHE[@]}"; do 847 | if [[ "${PUSHIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then 848 | CACHEIMAGE=${i} 849 | fi 850 | done 851 | docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:noble -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 852 | if [ -n "${SEMVER}" ]; then 853 | docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 854 | fi 855 | done 856 | ''' 857 | } 858 | } 859 | } 860 | // If this is a multi arch release push all images and define the manifest 861 | stage('Docker-Push-Multi') { 862 | when { 863 | environment name: 'MULTIARCH', value: 'true' 864 | environment name: 'EXIT_STATUS', value: '' 865 | } 866 | steps { 867 | retry_backoff(5,5) { 868 | sh '''#! /bin/bash 869 | set -e 870 | for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 871 | [[ ${MANIFESTIMAGE%%/*} =~ \\. ]] && MANIFESTIMAGEPLUS="${MANIFESTIMAGE}" || MANIFESTIMAGEPLUS="docker.io/${MANIFESTIMAGE}" 872 | IFS=',' read -ra CACHE <<< "$BUILDCACHE" 873 | for i in "${CACHE[@]}"; do 874 | if [[ "${MANIFESTIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then 875 | CACHEIMAGE=${i} 876 | fi 877 | done 878 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-noble -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 879 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-noble -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 880 | if [ -n "${SEMVER}" ]; then 881 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} 882 | docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} 883 | fi 884 | done 885 | for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do 886 | docker buildx imagetools create -t ${MANIFESTIMAGE}:noble ${MANIFESTIMAGE}:amd64-noble ${MANIFESTIMAGE}:arm64v8-noble 887 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} 888 | 889 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} 890 | if [ -n "${SEMVER}" ]; then 891 | docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} 892 | fi 893 | done 894 | ''' 895 | } 896 | } 897 | } 898 | // If this is a public release tag it in the LS Github 899 | stage('Github-Tag-Push-Release') { 900 | when { 901 | branch "noble" 902 | expression { 903 | env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER 904 | } 905 | environment name: 'CHANGE_ID', value: '' 906 | environment name: 'EXIT_STATUS', value: '' 907 | } 908 | steps { 909 | echo "Pushing New tag for current commit ${META_TAG}" 910 | sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ 911 | -d '{"tag":"'${META_TAG}'",\ 912 | "object": "'${COMMIT_SHA}'",\ 913 | "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to noble",\ 914 | "type": "commit",\ 915 | "tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}' ''' 916 | echo "Pushing New release for Tag" 917 | sh '''#! /bin/bash 918 | echo "Updating base packages to ${PACKAGE_TAG}" > releasebody.json 919 | echo '{"tag_name":"'${META_TAG}'",\ 920 | "target_commitish": "noble",\ 921 | "name": "'${META_TAG}'",\ 922 | "body": "**CI Report:**\\n\\n'${CI_URL:-N/A}'\\n\\n**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n\\n**Remote Changes:**\\n\\n' > start 923 | printf '","draft": false,"prerelease": false}' >> releasebody.json 924 | paste -d'\\0' start releasebody.json > releasebody.json.done 925 | curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done''' 926 | } 927 | } 928 | // Add protection to the release branch 929 | stage('Github-Release-Branch-Protection') { 930 | when { 931 | branch "noble" 932 | environment name: 'CHANGE_ID', value: '' 933 | environment name: 'EXIT_STATUS', value: '' 934 | } 935 | steps { 936 | echo "Setting up protection for release branch noble" 937 | sh '''#! /bin/bash 938 | curl -H "Authorization: token ${GITHUB_TOKEN}" -X PUT https://api.github.com/repos/${LS_USER}/${LS_REPO}/branches/noble/protection \ 939 | -d $(jq -c . << EOF 940 | { 941 | "required_status_checks": null, 942 | "enforce_admins": false, 943 | "required_pull_request_reviews": { 944 | "dismiss_stale_reviews": false, 945 | "require_code_owner_reviews": false, 946 | "require_last_push_approval": false, 947 | "required_approving_review_count": 1 948 | }, 949 | "restrictions": null, 950 | "required_linear_history": false, 951 | "allow_force_pushes": false, 952 | "allow_deletions": false, 953 | "block_creations": false, 954 | "required_conversation_resolution": true, 955 | "lock_branch": false, 956 | "allow_fork_syncing": false, 957 | "required_signatures": false 958 | } 959 | EOF 960 | ) ''' 961 | } 962 | } 963 | // If this is a Pull request send the CI link as a comment on it 964 | stage('Pull Request Comment') { 965 | when { 966 | not {environment name: 'CHANGE_ID', value: ''} 967 | environment name: 'EXIT_STATUS', value: '' 968 | } 969 | steps { 970 | sh '''#! /bin/bash 971 | # Function to retrieve JSON data from URL 972 | get_json() { 973 | local url="$1" 974 | local response=$(curl -s "$url") 975 | if [ $? -ne 0 ]; then 976 | echo "Failed to retrieve JSON data from $url" 977 | return 1 978 | fi 979 | local json=$(echo "$response" | jq .) 980 | if [ $? -ne 0 ]; then 981 | echo "Failed to parse JSON data from $url" 982 | return 1 983 | fi 984 | echo "$json" 985 | } 986 | 987 | build_table() { 988 | local data="$1" 989 | 990 | # Get the keys in the JSON data 991 | local keys=$(echo "$data" | jq -r 'to_entries | map(.key) | .[]') 992 | 993 | # Check if keys are empty 994 | if [ -z "$keys" ]; then 995 | echo "JSON report data does not contain any keys or the report does not exist." 996 | return 1 997 | fi 998 | 999 | # Build table header 1000 | local header="| Tag | Passed |\\n| --- | --- |\\n" 1001 | 1002 | # Loop through the JSON data to build the table rows 1003 | local rows="" 1004 | for build in $keys; do 1005 | local status=$(echo "$data" | jq -r ".[\\"$build\\"].test_success") 1006 | if [ "$status" = "true" ]; then 1007 | status="✅" 1008 | else 1009 | status="❌" 1010 | fi 1011 | local row="| "$build" | "$status" |\\n" 1012 | rows="${rows}${row}" 1013 | done 1014 | 1015 | local table="${header}${rows}" 1016 | local escaped_table=$(echo "$table" | sed 's/\"/\\\\"/g') 1017 | echo "$escaped_table" 1018 | } 1019 | 1020 | if [[ "${CI}" = "true" ]]; then 1021 | # Retrieve JSON data from URL 1022 | data=$(get_json "$CI_JSON_URL") 1023 | # Create table from JSON data 1024 | table=$(build_table "$data") 1025 | echo -e "$table" 1026 | 1027 | curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ 1028 | -H "Accept: application/vnd.github.v3+json" \ 1029 | "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ 1030 | -d "{\\"body\\": \\"I am a bot, here are the test results for this PR: \\n${CI_URL}\\n${SHELLCHECK_URL}\\n${table}\\"}" 1031 | else 1032 | curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ 1033 | -H "Accept: application/vnd.github.v3+json" \ 1034 | "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ 1035 | -d "{\\"body\\": \\"I am a bot, here is the pushed image/manifest for this PR: \\n\\n\\`${GITHUBIMAGE}:${META_TAG}\\`\\"}" 1036 | fi 1037 | ''' 1038 | 1039 | } 1040 | } 1041 | } 1042 | /* ###################### 1043 | Send status to Discord 1044 | ###################### */ 1045 | post { 1046 | always { 1047 | sh '''#!/bin/bash 1048 | rm -rf /config/.ssh/id_sign 1049 | rm -rf /config/.ssh/id_sign.pub 1050 | git config --global --unset gpg.format 1051 | git config --global --unset user.signingkey 1052 | git config --global --unset commit.gpgsign 1053 | ''' 1054 | script{ 1055 | env.JOB_DATE = sh( 1056 | script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', 1057 | returnStdout: true).trim() 1058 | if (env.EXIT_STATUS == "ABORTED"){ 1059 | sh 'echo "build aborted"' 1060 | }else{ 1061 | if (currentBuild.currentResult == "SUCCESS"){ 1062 | if (env.GITHUBIMAGE =~ /lspipepr/){ 1063 | env.JOB_WEBHOOK_STATUS='Success' 1064 | env.JOB_WEBHOOK_COLOUR=3957028 1065 | env.JOB_WEBHOOK_FOOTER='PR Build' 1066 | }else if (env.GITHUBIMAGE =~ /lsiodev/){ 1067 | env.JOB_WEBHOOK_STATUS='Success' 1068 | env.JOB_WEBHOOK_COLOUR=3957028 1069 | env.JOB_WEBHOOK_FOOTER='Dev Build' 1070 | }else{ 1071 | env.JOB_WEBHOOK_STATUS='Success' 1072 | env.JOB_WEBHOOK_COLOUR=1681177 1073 | env.JOB_WEBHOOK_FOOTER='Live Build' 1074 | } 1075 | }else{ 1076 | if (env.GITHUBIMAGE =~ /lspipepr/){ 1077 | env.JOB_WEBHOOK_STATUS='Failure' 1078 | env.JOB_WEBHOOK_COLOUR=12669523 1079 | env.JOB_WEBHOOK_FOOTER='PR Build' 1080 | }else if (env.GITHUBIMAGE =~ /lsiodev/){ 1081 | env.JOB_WEBHOOK_STATUS='Failure' 1082 | env.JOB_WEBHOOK_COLOUR=12669523 1083 | env.JOB_WEBHOOK_FOOTER='Dev Build' 1084 | }else{ 1085 | env.JOB_WEBHOOK_STATUS='Failure' 1086 | env.JOB_WEBHOOK_COLOUR=16711680 1087 | env.JOB_WEBHOOK_FOOTER='Live Build' 1088 | } 1089 | } 1090 | sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"'color'": '${JOB_WEBHOOK_COLOUR}',\ 1091 | "footer": {"text" : "'"${JOB_WEBHOOK_FOOTER}"'"},\ 1092 | "timestamp": "'${JOB_DATE}'",\ 1093 | "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** '${JOB_WEBHOOK_STATUS}'\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ 1094 | "username": "Jenkins"}' ${BUILDS_DISCORD} ''' 1095 | } 1096 | } 1097 | } 1098 | cleanup { 1099 | sh '''#! /bin/bash 1100 | echo "Pruning builder!!" 1101 | docker builder prune -f --builder container || : 1102 | containers=$(docker ps -q) 1103 | if [[ -n "${containers}" ]]; then 1104 | BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') 1105 | for container in ${containers}; do 1106 | if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then 1107 | echo "skipping buildx container in docker stop" 1108 | else 1109 | echo "Stopping container ${container}" 1110 | docker stop ${container} 1111 | fi 1112 | done 1113 | fi 1114 | docker system prune -f --volumes || : 1115 | docker image prune -af || : 1116 | ''' 1117 | cleanWs() 1118 | } 1119 | } 1120 | } 1121 | 1122 | def retry_backoff(int max_attempts, int power_base, Closure c) { 1123 | int n = 0 1124 | while (n < max_attempts) { 1125 | try { 1126 | c() 1127 | return 1128 | } catch (err) { 1129 | if ((n + 1) >= max_attempts) { 1130 | throw err 1131 | } 1132 | sleep(power_base ** n) 1133 | n++ 1134 | } 1135 | } 1136 | return 1137 | } 1138 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | <one line to give the program's name and a brief idea of what it does.> 635 | Copyright (C) <year> <name of author> 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see <https://www.gnu.org/licenses/>. 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | <program> Copyright (C) <year> <name of author> 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | <https://www.gnu.org/licenses/>. 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | <https://www.gnu.org/licenses/why-not-lgpl.html>. 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | <!-- DO NOT EDIT THIS FILE MANUALLY --> 2 | <!-- Please read https://github.com/linuxserver/docker-baseimage-ubuntu/blob/noble/.github/CONTRIBUTING.md --> 3 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) 4 | 5 | ## Contact information:- 6 | 7 | | Type | Address/Details | 8 | | :---: | --- | 9 | | Discord | [Discord](https://linuxserver.io/discord) | 10 | | IRC | `#linuxserver.io` on irc.libera.chat | 11 | | Forum | [Discourse](https://discourse.linuxserver.io/) | 12 | 13 | A custom base image built with [Ubuntu Linux](https://ubuntu.com) and [s6-overlay](https://github.com/just-containers/s6-overlay). 14 | 15 | - Support for using our base images in your own projects is provided on a Reasonable Endeavours basis, please see our [Support Policy](https://www.linuxserver.io/supportpolicy) for details. 16 | - There is no `latest` tag for any of our base images, by design. We often make breaking changes between versions, and we don't publish release notes like we do for the downstream images. 17 | - If you're intending to distribute an image using one of our bases, please read our [docs on container branding](https://docs.linuxserver.io/general/container-branding/) first. 18 | - Ubuntu releases are supported for 5 years, after which we will stop building new base images for that version. 19 | 20 | The following line is only in this repo for loop testing: 21 | 22 | - { date: "01.01.50:", desc: "I am the release message for this internal repo." } 23 | -------------------------------------------------------------------------------- /jenkins-vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # jenkins variables 4 | project_name: docker-baseimage-ubuntu 5 | external_type: os 6 | release_type: stable 7 | release_tag: noble 8 | ls_branch: noble 9 | repo_vars: 10 | - BUILD_VERSION_ARG = 'OS' 11 | - LS_USER = 'linuxserver' 12 | - LS_REPO = 'docker-baseimage-ubuntu' 13 | - CONTAINER_NAME = 'baseimage-ubuntu' 14 | - DOCKERHUB_IMAGE = 'lsiobase/ubuntu' 15 | - DEV_DOCKERHUB_IMAGE = 'lsiodev/ubuntu' 16 | - PR_DOCKERHUB_IMAGE = 'lspipepr/ubuntu' 17 | - DIST_IMAGE = 'ubuntu' 18 | - MULTIARCH='true' 19 | - CI='true' 20 | - CI_WEB='false' 21 | - CI_PORT='80' 22 | - CI_SSL='true' 23 | - CI_DELAY='30' 24 | - CI_DOCKERENV='LSIO_FIRST_PARTY=true' 25 | - CI_AUTH='' 26 | - CI_WEBPATH='' 27 | -------------------------------------------------------------------------------- /package_versions.txt: -------------------------------------------------------------------------------- 1 | NAME VERSION TYPE 2 | adduser 3.137ubuntu1 deb 3 | apt 2.8.3 deb 4 | apt-utils 2.8.3 deb 5 | base-files 13ubuntu10.2 deb 6 | base-passwd 3.6.3build1 deb 7 | bash 5.2.21-2ubuntu4 deb 8 | bsdutils 1:2.39.3-9ubuntu6.2 deb 9 | ca-certificates 20240203 deb 10 | catatonit 0.1.7-1 deb 11 | coreutils 9.4-3ubuntu6 deb 12 | cron 3.0pl1-184ubuntu2 deb 13 | cron-daemon-common 3.0pl1-184ubuntu2 deb 14 | curl 8.5.0-2ubuntu10.6 deb 15 | dash 0.5.12-6ubuntu5 deb 16 | debconf 1.5.86ubuntu1 deb 17 | debianutils 5.17build1 deb 18 | diffutils 1:3.10-1build1 deb 19 | dirmngr 2.4.4-2ubuntu17.2 deb 20 | dpkg 1.22.6ubuntu6.1 deb 21 | e2fsprogs 1.47.0-2.4~exp1ubuntu4.1 deb 22 | findutils 4.9.0-5build1 deb 23 | gcc-14-base 14.2.0-4ubuntu2~24.04 deb 24 | gnupg 2.4.4-2ubuntu17.2 deb 25 | gnupg-l10n 2.4.4-2ubuntu17.2 deb 26 | gnupg-utils 2.4.4-2ubuntu17.2 deb 27 | gpg 2.4.4-2ubuntu17.2 deb 28 | gpg-agent 2.4.4-2ubuntu17.2 deb 29 | gpg-wks-client 2.4.4-2ubuntu17.2 deb 30 | gpgconf 2.4.4-2ubuntu17.2 deb 31 | gpgsm 2.4.4-2ubuntu17.2 deb 32 | gpgv 2.4.4-2ubuntu17.2 deb 33 | grep 3.11-4build1 deb 34 | gzip 1.12-1ubuntu3 deb 35 | hostname 3.23+nmu2ubuntu2 deb 36 | init-system-helpers 1.66ubuntu1 deb 37 | jq 1.7.1-3build1 deb 38 | keyboxd 2.4.4-2ubuntu17.2 deb 39 | krb5-locales 1.20.1-6ubuntu2.6 deb 40 | libacl1 2.3.2-1build1.1 deb 41 | libapt-pkg6.0t64 2.8.3 deb 42 | libassuan0 2.5.6-1build1 deb 43 | libattr1 1:2.5.2-1build1.1 deb 44 | libaudit-common 1:3.1.2-2.1build1.1 deb 45 | libaudit1 1:3.1.2-2.1build1.1 deb 46 | libblkid1 2.39.3-9ubuntu6.2 deb 47 | libbrotli1 1.1.0-2build2 deb 48 | libbsd0 0.12.1-1build1.1 deb 49 | libbz2-1.0 1.0.8-5.1build0.1 deb 50 | libc-bin 2.39-0ubuntu8.4 deb 51 | libc6 2.39-0ubuntu8.4 deb 52 | libcap-ng0 0.8.4-2build2 deb 53 | libcap2 1:2.66-5ubuntu2.2 deb 54 | libcom-err2 1.47.0-2.4~exp1ubuntu4.1 deb 55 | libcrypt1 1:4.4.36-4build1 deb 56 | libcurl4t64 8.5.0-2ubuntu10.6 deb 57 | libdb5.3t64 5.3.28+dfsg2-7 deb 58 | libdebconfclient0 0.271ubuntu3 deb 59 | libext2fs2t64 1.47.0-2.4~exp1ubuntu4.1 deb 60 | libffi8 3.4.6-1build1 deb 61 | libgcc-s1 14.2.0-4ubuntu2~24.04 deb 62 | libgcrypt20 1.10.3-2build1 deb 63 | libgmp10 2:6.3.0+dfsg-2ubuntu6.1 deb 64 | libgnutls30t64 3.8.3-1.1ubuntu3.3 deb 65 | libgpg-error0 1.47-3build2.1 deb 66 | libgssapi-krb5-2 1.20.1-6ubuntu2.6 deb 67 | libhogweed6t64 3.9.1-2.2build1.1 deb 68 | libidn2-0 2.3.7-2build1.1 deb 69 | libjq1 1.7.1-3build1 deb 70 | libk5crypto3 1.20.1-6ubuntu2.6 deb 71 | libkeyutils1 1.6.3-3build1 deb 72 | libkrb5-3 1.20.1-6ubuntu2.6 deb 73 | libkrb5support0 1.20.1-6ubuntu2.6 deb 74 | libksba8 1.6.6-1build1 deb 75 | libldap-common 2.6.7+dfsg-1~exp1ubuntu8.2 deb 76 | libldap2 2.6.7+dfsg-1~exp1ubuntu8.2 deb 77 | liblz4-1 1.9.4-1build1.1 deb 78 | liblzma5 5.6.1+really5.4.5-1ubuntu0.2 deb 79 | libmd0 1.1.0-2build1.1 deb 80 | libmount1 2.39.3-9ubuntu6.2 deb 81 | libncursesw6 6.4+20240113-1ubuntu2 deb 82 | libnettle8t64 3.9.1-2.2build1.1 deb 83 | libnghttp2-14 1.59.0-1ubuntu0.2 deb 84 | libnpth0t64 1.6-3.1build1 deb 85 | libonig5 6.9.9-1build1 deb 86 | libp11-kit0 0.25.3-4ubuntu2.1 deb 87 | libpam-modules 1.5.3-5ubuntu5.1 deb 88 | libpam-modules-bin 1.5.3-5ubuntu5.1 deb 89 | libpam-runtime 1.5.3-5ubuntu5.1 deb 90 | libpam0g 1.5.3-5ubuntu5.1 deb 91 | libpcre2-8-0 10.42-4ubuntu2.1 deb 92 | libproc2-0 2:4.0.4-4ubuntu3.2 deb 93 | libpsl5t64 0.21.2-1.1build1 deb 94 | libreadline8t64 8.2-4build1 deb 95 | librtmp1 2.4+20151223.gitfa8646d.1-2build7 deb 96 | libsasl2-2 2.1.28+dfsg1-5ubuntu3.1 deb 97 | libsasl2-modules 2.1.28+dfsg1-5ubuntu3.1 deb 98 | libsasl2-modules-db 2.1.28+dfsg1-5ubuntu3.1 deb 99 | libseccomp2 2.5.5-1ubuntu3.1 deb 100 | libselinux1 3.5-2ubuntu2.1 deb 101 | libsemanage-common 3.5-1build5 deb 102 | libsemanage2 3.5-1build5 deb 103 | libsepol2 3.5-2build1 deb 104 | libsmartcols1 2.39.3-9ubuntu6.2 deb 105 | libsqlite3-0 3.45.1-1ubuntu2.3 deb 106 | libss2 1.47.0-2.4~exp1ubuntu4.1 deb 107 | libssh-4 0.10.6-2build2 deb 108 | libssl3t64 3.0.13-0ubuntu3.5 deb 109 | libstdc++6 14.2.0-4ubuntu2~24.04 deb 110 | libsystemd0 255.4-1ubuntu8.6 deb 111 | libtasn1-6 4.19.0-3ubuntu0.24.04.1 deb 112 | libtinfo6 6.4+20240113-1ubuntu2 deb 113 | libudev1 255.4-1ubuntu8.6 deb 114 | libunistring5 1.1-2build1.1 deb 115 | libuuid1 2.39.3-9ubuntu6.2 deb 116 | libxxhash0 0.8.2-2build1 deb 117 | libzstd1 1.5.5+dfsg2-2build1.1 deb 118 | locales 2.39-0ubuntu8.4 deb 119 | login 1:4.13+dfsg1-4ubuntu3.2 deb 120 | logsave 1.47.0-2.4~exp1ubuntu4.1 deb 121 | mawk 1.3.4.20240123-1build1 deb 122 | mount 2.39.3-9ubuntu6.2 deb 123 | ncurses-base 6.4+20240113-1ubuntu2 deb 124 | ncurses-bin 6.4+20240113-1ubuntu2 deb 125 | netcat-openbsd 1.226-1ubuntu2 deb 126 | openssl 3.0.13-0ubuntu3.5 deb 127 | passwd 1:4.13+dfsg1-4ubuntu3.2 deb 128 | perl-base 5.38.2-3.2ubuntu0.1 deb 129 | pinentry-curses 1.2.1-3ubuntu5 deb 130 | procps 2:4.0.4-4ubuntu3.2 deb 131 | publicsuffix 20231001.0357-0.1 deb 132 | readline-common 8.2-4build1 deb 133 | sed 4.9-2build1 deb 134 | sensible-utils 0.0.22 deb 135 | systemd-standalone-sysusers 255.4-1ubuntu8.6 deb 136 | sysvinit-utils 3.08-6ubuntu3 deb 137 | tar 1.35+dfsg-3build1 deb 138 | tzdata 2025b-0ubuntu0.24.04.1 deb 139 | ubuntu-keyring 2023.11.28.1 deb 140 | unminimize 0.2.1 deb 141 | util-linux 2.39.3-9ubuntu6.2 deb 142 | zlib1g 1:1.3.dfsg-3.1ubuntu2.1 deb 143 | -------------------------------------------------------------------------------- /readme-vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # project information 4 | project_name: baseimage-ubuntu 5 | full_custom_readme: | 6 | {% raw -%} 7 | [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) 8 | 9 | ## Contact information:- 10 | 11 | | Type | Address/Details | 12 | | :---: | --- | 13 | | Discord | [Discord](https://linuxserver.io/discord) | 14 | | IRC | `#linuxserver.io` on irc.libera.chat | 15 | | Forum | [Discourse](https://discourse.linuxserver.io/) | 16 | 17 | A custom base image built with [Ubuntu Linux](https://ubuntu.com) and [s6-overlay](https://github.com/just-containers/s6-overlay). 18 | 19 | - Support for using our base images in your own projects is provided on a Reasonable Endeavours basis, please see our [Support Policy](https://www.linuxserver.io/supportpolicy) for details. 20 | - There is no `latest` tag for any of our base images, by design. We often make breaking changes between versions, and we don't publish release notes like we do for the downstream images. 21 | - If you're intending to distribute an image using one of our bases, please read our [docs on container branding](https://docs.linuxserver.io/general/container-branding/) first. 22 | - Ubuntu releases are supported for 5 years, after which we will stop building new base images for that version. 23 | 24 | The following line is only in this repo for loop testing: 25 | 26 | - { date: "01.01.50:", desc: "I am the release message for this internal repo." } 27 | {%- endraw %} 28 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/ci-service-check/dependencies.d/legacy-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/ci-service-check/up: -------------------------------------------------------------------------------- 1 | echo "[ls.io-init] done." -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/branding: -------------------------------------------------------------------------------- 1 | ─────────────────────────────────────── 2 | _____ __ __ _____ _____ _____ _____ 3 | | | | | __|_ _| | | 4 | | --| | |__ | | | | | | | | | 5 | |_____|_____|_____| |_| |_____|_|_|_| 6 | _____ __ __ _ __ ____ 7 | | __ | | | | | | \ 8 | | __ -| | | | |__| | | 9 | |_____|_____|_|_____|____/ 10 | 11 | Based on images from linuxserver.io 12 | ─────────────────────────────────────── 13 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-adduser/dependencies.d/init-migrations -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | PUID=${PUID:-911} 5 | PGID=${PGID:-911} 6 | 7 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 8 | USERHOME=$(grep abc /etc/passwd | cut -d ":" -f6) 9 | usermod -d "/root" abc 10 | 11 | groupmod -o -g "${PGID}" abc 12 | usermod -o -u "${PUID}" abc 13 | 14 | usermod -d "${USERHOME}" abc 15 | fi 16 | 17 | if { [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; } || [[ ! ${LSIO_FIRST_PARTY} = "true" ]]; then 18 | cat /etc/s6-overlay/s6-rc.d/init-adduser/branding 19 | else 20 | cat /run/branding 21 | fi 22 | 23 | if [[ -f /donate.txt ]]; then 24 | echo ' 25 | To support the app dev(s) visit:' 26 | cat /donate.txt 27 | fi 28 | echo ' 29 | To support LSIO projects visit: 30 | https://www.linuxserver.io/donate/ 31 | 32 | ─────────────────────────────────────── 33 | GID/UID 34 | ───────────────────────────────────────' 35 | if [[ -z ${LSIO_NON_ROOT_USER} ]]; then 36 | echo " 37 | User UID: $(id -u abc) 38 | User GID: $(id -g abc) 39 | ───────────────────────────────────────" 40 | else 41 | echo " 42 | User UID: $(stat /run -c %u) 43 | User GID: $(stat /run -c %g) 44 | ───────────────────────────────────────" 45 | fi 46 | if [[ -f /build_version ]]; then 47 | cat /build_version 48 | echo ' 49 | ─────────────────────────────────────── 50 | ' 51 | fi 52 | 53 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 54 | lsiown abc:abc /app 55 | lsiown abc:abc /config 56 | lsiown abc:abc /defaults 57 | fi 58 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-adduser/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-adduser/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-crontab-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the downstream image init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-config/dependencies.d/init-os-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-config/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the start of the downstream image init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-crontab-config/dependencies.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | for cron_user in abc root; do 5 | if [[ -z ${LSIO_READ_ONLY_FS} ]] && [[ -z ${LSIO_NON_ROOT_USER} ]]; then 6 | if [[ -f "/etc/crontabs/${cron_user}" ]]; then 7 | lsiown "${cron_user}":"${cron_user}" "/etc/crontabs/${cron_user}" 8 | crontab -u "${cron_user}" "/etc/crontabs/${cron_user}" 9 | fi 10 | fi 11 | 12 | if [[ -f "/defaults/crontabs/${cron_user}" ]]; then 13 | # make folders 14 | mkdir -p \ 15 | /config/crontabs 16 | 17 | # if crontabs do not exist in config 18 | if [[ ! -f "/config/crontabs/${cron_user}" ]]; then 19 | # copy crontab from system 20 | if crontab -l -u "${cron_user}" >/dev/null 2>&1; then 21 | crontab -l -u "${cron_user}" >"/config/crontabs/${cron_user}" 22 | fi 23 | 24 | # if crontabs still do not exist in config (were not copied from system) 25 | # copy crontab from image defaults (using -n, do not overwrite an existing file) 26 | cp -n "/defaults/crontabs/${cron_user}" /config/crontabs/ 27 | fi 28 | 29 | # set permissions and import user crontabs 30 | lsiown "${cron_user}":"${cron_user}" "/config/crontabs/${cron_user}" 31 | crontab -u "${cron_user}" "/config/crontabs/${cron_user}" 32 | fi 33 | done 34 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-crontab-config/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-crontab-config/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-custom-files/dependencies.d/init-mods-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | # Directories 5 | SCRIPTS_DIR="/custom-cont-init.d" 6 | 7 | # Make sure custom init directory exists and has files in it 8 | if [[ -e "${SCRIPTS_DIR}" ]] && [[ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]]; then 9 | echo "[custom-init] Files found, executing" 10 | for SCRIPT in "${SCRIPTS_DIR}"/*; do 11 | NAME="$(basename "${SCRIPT}")" 12 | if [[ -f "${SCRIPT}" ]]; then 13 | echo "[custom-init] ${NAME}: executing..." 14 | /bin/bash "${SCRIPT}" 15 | echo "[custom-init] ${NAME}: exited $?" 16 | elif [[ ! -f "${SCRIPT}" ]]; then 17 | echo "[custom-init] ${NAME}: is not a file" 18 | fi 19 | done 20 | else 21 | echo "[custom-init] No custom files found, skipping..." 22 | fi 23 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-custom-files/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-custom-files/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-device-perms/dependencies.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if [[ -z ${LSIO_NON_ROOT_USER} ]] && [[ -n ${ATTACHED_DEVICES_PERMS} ]]; then 5 | FILES=$(find ${ATTACHED_DEVICES_PERMS} -print 2>/dev/null) 6 | 7 | for i in ${FILES}; do 8 | FILE_GID=$(stat -c '%g' "${i}") 9 | FILE_UID=$(stat -c '%u' "${i}") 10 | # check if user matches device 11 | if id -u abc | grep -qw "${FILE_UID}"; then 12 | echo "**** permissions for ${i} are good ****" 13 | else 14 | # check if group matches and that device has group rw 15 | if id -G abc | grep -qw "${FILE_GID}" && [[ $(stat -c '%A' "${i}" | cut -b 5,6) == "rw" ]]; then 16 | echo "**** permissions for ${i} are good ****" 17 | # check if device needs to be added to group 18 | elif ! id -G abc | grep -qw "${FILE_GID}"; then 19 | # check if group needs to be created 20 | GROUP_NAME=$(getent group "${FILE_GID}" | awk -F: '{print $1}') 21 | if [[ -z "${GROUP_NAME}" ]]; then 22 | GROUP_NAME="group$(head /dev/urandom | tr -dc 'a-z0-9' | head -c4)" 23 | groupadd "${GROUP_NAME}" 24 | groupmod -g "${FILE_GID}" "${GROUP_NAME}" 25 | echo "**** creating group ${GROUP_NAME} with id ${FILE_GID} ****" 26 | fi 27 | echo "**** adding ${i} to group ${GROUP_NAME} with id ${FILE_GID} ****" 28 | usermod -a -G "${GROUP_NAME}" abc 29 | fi 30 | # check if device has group rw 31 | if [[ $(stat -c '%A' "${i}" | cut -b 5,6) != "rw" ]]; then 32 | echo -e "**** The device ${i} does not have group read/write permissions, attempting to fix inside the container. ****" 33 | chmod g+rw "${i}" 34 | fi 35 | fi 36 | done 37 | fi 38 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/type: -------------------------------------------------------------------------------- 1 | oneshot -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-device-perms/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-device-perms/run -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if find /run/s6/container_environment/FILE__* -maxdepth 1 > /dev/null 2>&1; then 5 | for FILENAME in /run/s6/container_environment/FILE__*; do 6 | SECRETFILE=$(cat "${FILENAME}") 7 | if [[ -f ${SECRETFILE} ]]; then 8 | FILESTRIP=${FILENAME//FILE__/} 9 | if [[ $(tail -n1 "${SECRETFILE}" | wc -l) != 0 ]]; then 10 | echo "[env-init] Your secret: ${FILENAME##*/}" 11 | echo " contains a trailing newline and may not work as expected" 12 | fi 13 | cat "${SECRETFILE}" >"${FILESTRIP}" 14 | echo "[env-init] ${FILESTRIP##*/} set from ${FILENAME##*/}" 15 | else 16 | echo "[env-init] cannot find secret in ${FILENAME##*/}" 17 | fi 18 | done 19 | fi 20 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-envfile/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-envfile/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | MIGRATIONS_DIR="/migrations" 5 | MIGRATIONS_HISTORY="/config/.migrations" 6 | 7 | echo "[migrations] started" 8 | 9 | if [[ ! -d ${MIGRATIONS_DIR} ]]; then 10 | echo "[migrations] no migrations found" 11 | exit 12 | fi 13 | 14 | for MIGRATION in $(find ${MIGRATIONS_DIR}/* | sort -n); do 15 | NAME="$(basename "${MIGRATION}")" 16 | if [[ -f ${MIGRATIONS_HISTORY} ]] && grep -Fxq "${NAME}" ${MIGRATIONS_HISTORY}; then 17 | echo "[migrations] ${NAME}: skipped" 18 | continue 19 | fi 20 | echo "[migrations] ${NAME}: executing..." 21 | chmod +x "${MIGRATION}" 22 | # Execute migration script in a subshell to prevent it from modifying the current environment 23 | ("${MIGRATION}") 24 | EXIT_CODE=$? 25 | if [[ ${EXIT_CODE} -ne 0 ]]; then 26 | echo "[migrations] ${NAME}: failed with exit code ${EXIT_CODE}, contact support" 27 | exit "${EXIT_CODE}" 28 | fi 29 | echo "${NAME}" >>${MIGRATIONS_HISTORY} 30 | echo "[migrations] ${NAME}: succeeded" 31 | done 32 | 33 | echo "[migrations] done" 34 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-migrations/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-migrations/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-mods-end/dependencies.d/init-mods-package-install -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-mods-package-install/dependencies.d/init-mods -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods-package-install/up: -------------------------------------------------------------------------------- 1 | /etc/s6-overlay/s6-rc.d/init-mods-package-install/run 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-mods/dependencies.d/init-config-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-mods/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the start of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-device-perms -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-os-end/dependencies.d/init-envfile -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-os-end/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it's just the end of the mod init process 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/init-services/dependencies.d/init-custom-files -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/type: -------------------------------------------------------------------------------- 1 | oneshot 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/init-services/up: -------------------------------------------------------------------------------- 1 | # This file doesn't do anything, it just signals that services can start 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/svc-cron/dependencies.d/init-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | # shellcheck shell=bash 3 | 4 | if builtin command -v crontab >/dev/null 2>&1 && [[ -n "$(crontab -l -u abc 2>/dev/null || true)" || -n "$(crontab -l -u root 2>/dev/null || true)" ]]; then 5 | if builtin command -v busybox >/dev/null 2>&1 && [[ $(busybox || true) =~ [[:space:]](crond)([,]|$) ]]; then 6 | exec busybox crond -f -S -l 5 7 | elif [[ -f /usr/bin/apt ]] && [[ -f /usr/sbin/cron ]]; then 8 | exec /usr/sbin/cron -f -L 5 9 | else 10 | echo "**** cron not found ****" 11 | sleep infinity 12 | fi 13 | else 14 | sleep infinity 15 | fi 16 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/svc-cron/type: -------------------------------------------------------------------------------- 1 | longrun 2 | -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-adduser -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-config-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-crontab-config -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-custom-files -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-device-perms -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-envfile -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-migrations -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-mods-package-install -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-os-end -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-services -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-cron -------------------------------------------------------------------------------- /root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linuxserver/docker-baseimage-ubuntu/facf89574a659fa7646f9ff093fd5eab51a70010/root/etc/s6-overlay/s6-rc.d/user2/contents.d/ci-service-check -------------------------------------------------------------------------------- /sources.list: -------------------------------------------------------------------------------- 1 | deb http://archive.ubuntu.com/ubuntu/ noble main restricted 2 | deb-src http://archive.ubuntu.com/ubuntu/ noble main restricted 3 | deb http://archive.ubuntu.com/ubuntu/ noble-updates main restricted 4 | deb-src http://archive.ubuntu.com/ubuntu/ noble-updates main restricted 5 | deb http://archive.ubuntu.com/ubuntu/ noble universe multiverse 6 | deb-src http://archive.ubuntu.com/ubuntu/ noble universe multiverse 7 | deb http://archive.ubuntu.com/ubuntu/ noble-updates universe multiverse 8 | deb-src http://archive.ubuntu.com/ubuntu/ noble-updates universe multiverse 9 | deb http://archive.ubuntu.com/ubuntu/ noble-security main restricted 10 | deb-src http://archive.ubuntu.com/ubuntu/ noble-security main restricted 11 | deb http://archive.ubuntu.com/ubuntu/ noble-security universe multiverse 12 | deb-src http://archive.ubuntu.com/ubuntu/ noble-security universe multiverse 13 | -------------------------------------------------------------------------------- /sources.list.arm: -------------------------------------------------------------------------------- 1 | deb http://ports.ubuntu.com/ubuntu-ports/ noble main restricted multiverse 2 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble main restricted multiverse 3 | deb http://ports.ubuntu.com/ubuntu-ports/ noble-updates main restricted multiverse 4 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble-updates main restricted multiverse 5 | deb http://ports.ubuntu.com/ubuntu-ports/ noble universe 6 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble universe 7 | deb http://ports.ubuntu.com/ubuntu-ports/ noble-updates universe 8 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble-updates universe 9 | deb http://ports.ubuntu.com/ubuntu-ports/ noble-security main restricted multiverse 10 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble-security main restricted multiverse 11 | deb http://ports.ubuntu.com/ubuntu-ports/ noble-security universe 12 | deb-src http://ports.ubuntu.com/ubuntu-ports/ noble-security universe 13 | --------------------------------------------------------------------------------