├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── Bug_report.md │ ├── Feature_request.md │ └── config.yml ├── dependabot.yml ├── release.yml └── workflows │ ├── codespell.yml │ ├── community-containers.yml │ ├── dependency-updates.yml │ ├── docker-lint.yml │ ├── helm-release.yml │ ├── imaginary-update.yml │ ├── json-validator.yml │ ├── lint-helm.yml │ ├── lint-php.yml │ ├── lock-threads.yml │ ├── nextcloud-update.yml │ ├── php-deprecation-detector.yml │ ├── playwright.yml │ ├── psalm-update-baseline.yml │ ├── psalm.yml │ ├── shellcheck.yml │ ├── talk.yml │ ├── twig-lint.yml │ ├── update-copyright.yml │ ├── update-helm.yml │ └── update-yaml.yml ├── .gitignore ├── Containers ├── alpine │ └── Dockerfile ├── apache │ ├── Caddyfile │ ├── Dockerfile │ ├── healthcheck.sh │ ├── nextcloud.conf │ ├── start.sh │ └── supervisord.conf ├── borgbackup │ ├── Dockerfile │ ├── backupscript.sh │ ├── borg_excludes │ └── start.sh ├── clamav │ ├── Dockerfile │ ├── healthcheck.sh │ ├── start.sh │ └── supervisord.conf ├── collabora │ ├── Dockerfile │ └── healthcheck.sh ├── docker-socket-proxy │ ├── Dockerfile │ ├── haproxy.cfg │ ├── healthcheck.sh │ └── start.sh ├── domaincheck │ ├── Dockerfile │ ├── lighttpd.conf │ └── start.sh ├── fulltextsearch │ ├── Dockerfile │ └── healthcheck.sh ├── imaginary │ ├── Dockerfile │ ├── healthcheck.sh │ └── start.sh ├── mastercontainer │ ├── Caddyfile │ ├── Dockerfile │ ├── backup-time-file-watcher.sh │ ├── cron.sh │ ├── daily-backup.sh │ ├── healthcheck.sh │ ├── mastercontainer.conf │ ├── session-deduplicator.sh │ ├── start.sh │ └── supervisord.conf ├── nextcloud │ ├── Dockerfile │ ├── config │ │ ├── aio.config.php │ │ ├── apcu.config.php │ │ ├── apps.config.php │ │ ├── proxy.config.php │ │ ├── redis.config.php │ │ ├── reverse-proxy.config.php │ │ ├── s3.config.php │ │ ├── smtp.config.php │ │ └── swift.config.php │ ├── cron.sh │ ├── entrypoint.sh │ ├── healthcheck.sh │ ├── notify-all.sh │ ├── notify.sh │ ├── root.motd │ ├── run-exec-commands.sh │ ├── start.sh │ ├── supervisord.conf │ └── upgrade.exclude ├── notify-push │ ├── Dockerfile │ ├── healthcheck.sh │ └── start.sh ├── onlyoffice │ ├── Dockerfile │ └── healthcheck.sh ├── postgresql │ ├── Dockerfile │ ├── healthcheck.sh │ ├── init-user-db.sh │ └── start.sh ├── redis │ ├── Dockerfile │ ├── healthcheck.sh │ └── start.sh ├── talk-recording │ ├── Dockerfile │ ├── healthcheck.sh │ ├── recording.conf │ └── start.sh ├── talk │ ├── Dockerfile │ ├── healthcheck.sh │ ├── server.conf.in │ ├── start.sh │ └── supervisord.conf ├── watchtower │ ├── Dockerfile │ └── start.sh └── whiteboard │ ├── Dockerfile │ ├── healthcheck.sh │ └── start.sh ├── LICENSE ├── app ├── .editorconfig ├── appinfo │ └── info.xml ├── composer │ ├── autoload.php │ ├── composer.json │ ├── composer.lock │ └── composer │ │ ├── ClassLoader.php │ │ ├── InstalledVersions.php │ │ ├── LICENSE │ │ ├── autoload_classmap.php │ │ ├── autoload_namespaces.php │ │ ├── autoload_psr4.php │ │ ├── autoload_real.php │ │ ├── autoload_static.php │ │ ├── installed.json │ │ └── installed.php ├── lib │ └── Settings │ │ └── Admin.php ├── readme.md └── templates │ └── admin.php ├── community-containers ├── borgbackup-viewer │ ├── borgbackup-viewer.json │ └── readme.md ├── caddy │ ├── caddy.json │ └── readme.md ├── calcardbackup │ ├── calcardbackup.json │ └── readme.md ├── dlna │ ├── dlna.json │ └── readme.md ├── facerecognition │ ├── facerecognition.json │ └── readme.md ├── fail2ban │ ├── fail2ban.json │ └── readme.md ├── helloworld │ ├── helloworld.json │ └── readme.md ├── jellyfin │ ├── jellyfin.json │ └── readme.md ├── jellyseerr │ ├── jellyseerr.json │ └── readme.md ├── libretranslate │ ├── libretranslate.json │ └── readme.md ├── lldap │ ├── lldap.json │ └── readme.md ├── local-ai │ ├── local-ai.json │ └── readme.md ├── makemkv │ ├── makemkv.json │ └── readme.md ├── memories │ ├── memories.json │ └── readme.md ├── nocodb │ ├── nocodb.json │ └── readme.md ├── npmplus │ ├── npmplus.json │ └── readme.md ├── pi-hole │ ├── pi-hole.json │ └── readme.md ├── plex │ ├── plex.json │ └── readme.md ├── readme.md ├── scrutiny │ ├── readme.md │ └── scrutiny.json ├── smbserver │ ├── readme.md │ └── smbserver.json ├── stalwart │ ├── readme.md │ └── stalwart.json └── vaultwarden │ ├── readme.md │ └── vaultwarden.json ├── compose.yaml ├── develop.md ├── docker-ipv6-support.md ├── docker-rootless.md ├── local-instance.md ├── manual-install ├── latest.yml ├── readme.md ├── sample.conf └── update-yaml.sh ├── manual-upgrade.md ├── migration.md ├── multiple-instances.md ├── nextcloud-aio-helm-chart ├── Chart.yaml ├── readme.md ├── templates │ ├── nextcloud-aio-apache-deployment.yaml │ ├── nextcloud-aio-apache-persistentvolumeclaim.yaml │ ├── nextcloud-aio-apache-service.yaml │ ├── nextcloud-aio-clamav-deployment.yaml │ ├── nextcloud-aio-clamav-persistentvolumeclaim.yaml │ ├── nextcloud-aio-clamav-service.yaml │ ├── nextcloud-aio-collabora-deployment.yaml │ ├── nextcloud-aio-collabora-service.yaml │ ├── nextcloud-aio-database-deployment.yaml │ ├── nextcloud-aio-database-dump-persistentvolumeclaim.yaml │ ├── nextcloud-aio-database-persistentvolumeclaim.yaml │ ├── nextcloud-aio-database-service.yaml │ ├── nextcloud-aio-elasticsearch-persistentvolumeclaim.yaml │ ├── nextcloud-aio-fulltextsearch-deployment.yaml │ ├── nextcloud-aio-fulltextsearch-service.yaml │ ├── nextcloud-aio-imaginary-deployment.yaml │ ├── nextcloud-aio-imaginary-service.yaml │ ├── nextcloud-aio-namespace-namespace.yaml │ ├── nextcloud-aio-networkpolicy.yaml │ ├── nextcloud-aio-nextcloud-data-persistentvolumeclaim.yaml │ ├── nextcloud-aio-nextcloud-deployment.yaml │ ├── nextcloud-aio-nextcloud-persistentvolumeclaim.yaml │ ├── nextcloud-aio-nextcloud-service.yaml │ ├── nextcloud-aio-nextcloud-trusted-cacerts-persistentvolumeclaim.yaml │ ├── nextcloud-aio-notify-push-deployment.yaml │ ├── nextcloud-aio-notify-push-service.yaml │ ├── nextcloud-aio-onlyoffice-deployment.yaml │ ├── nextcloud-aio-onlyoffice-persistentvolumeclaim.yaml │ ├── nextcloud-aio-onlyoffice-service.yaml │ ├── nextcloud-aio-redis-deployment.yaml │ ├── nextcloud-aio-redis-persistentvolumeclaim.yaml │ ├── nextcloud-aio-redis-service.yaml │ ├── nextcloud-aio-talk-deployment.yaml │ ├── nextcloud-aio-talk-recording-deployment.yaml │ ├── nextcloud-aio-talk-recording-persistentvolumeclaim.yaml │ ├── nextcloud-aio-talk-recording-service.yaml │ ├── nextcloud-aio-talk-service.yaml │ ├── nextcloud-aio-whiteboard-deployment.yaml │ └── nextcloud-aio-whiteboard-service.yaml ├── update-helm.sh └── values.yaml ├── php ├── README.md ├── composer.json ├── composer.lock ├── containers-schema.json ├── containers.json ├── data │ └── .gitkeep ├── domain-validator.php ├── psalm-baseline.xml ├── psalm.xml ├── public │ ├── automatic_reload.js │ ├── before-unload.js │ ├── containers-form-submit.js │ ├── disable-clamav.js │ ├── disable-collabora.js │ ├── disable-docker-socket-proxy.js │ ├── disable-fulltextsearch.js │ ├── disable-imaginary.js │ ├── disable-onlyoffice.js │ ├── disable-talk-recording.js │ ├── disable-talk.js │ ├── disable-whiteboard.js │ ├── forms.js │ ├── img │ │ ├── favicon.png │ │ ├── jenna-kim-the-globe-dark.webp │ │ ├── jenna-kim-the-globe.webp │ │ └── nextcloud-logo.svg │ ├── index.php │ ├── robots.txt │ ├── second-tab-warning.js │ ├── style.css │ ├── timezone.js │ └── toggle-dark-mode.js ├── session │ └── .gitkeep ├── src │ ├── Auth │ │ ├── AuthManager.php │ │ └── PasswordGenerator.php │ ├── Container │ │ ├── AioVariables.php │ │ ├── Container.php │ │ ├── ContainerEnvironmentVariables.php │ │ ├── ContainerPort.php │ │ ├── ContainerPorts.php │ │ ├── ContainerState.php │ │ ├── ContainerVolume.php │ │ ├── ContainerVolumes.php │ │ └── VersionState.php │ ├── ContainerDefinitionFetcher.php │ ├── Controller │ │ ├── ConfigurationController.php │ │ ├── DockerController.php │ │ └── LoginController.php │ ├── Cron │ │ ├── BackupNotification.php │ │ ├── CheckBackup.php │ │ ├── CheckFreeDiskSpace.php │ │ ├── CreateBackup.php │ │ ├── OutdatedNotification.php │ │ ├── StartAndUpdateContainers.php │ │ ├── StartContainers.php │ │ ├── StopContainers.php │ │ ├── UpdateMastercontainer.php │ │ └── UpdateNotification.php │ ├── Data │ │ ├── ConfigurationManager.php │ │ ├── DataConst.php │ │ ├── InvalidSettingConfigurationException.php │ │ └── Setup.php │ ├── DependencyInjection.php │ ├── Docker │ │ ├── DockerActionManager.php │ │ ├── DockerHubManager.php │ │ └── GitHubContainerRegistryManager.php │ ├── Middleware │ │ └── AuthMiddleware.php │ └── Twig │ │ ├── ClassExtension.php │ │ └── CsrfExtension.php ├── templates │ ├── already-installed.twig │ ├── components │ │ └── container-state.twig │ ├── containers.twig │ ├── includes │ │ ├── aio-config.twig │ │ ├── backup-dirs.twig │ │ ├── community-containers.twig │ │ └── optional-containers.twig │ ├── layout.twig │ ├── login.twig │ └── setup.twig └── tests │ ├── .gitignore │ ├── package-lock.json │ ├── package.json │ ├── playwright.config.js │ └── tests │ ├── initial-setup.spec.js │ └── restore-instance.spec.js ├── readme.md ├── reverse-proxy.md └── tests └── QA ├── 001-initial-setup.md ├── 002-new-instance.md ├── 003-automatic-login.md ├── 004-initial-backup.md ├── 010-restore-instance.md ├── 020-backup-and-restore.md ├── 030-aio-password-change.md ├── 040-login-behavior.md ├── 050-optional-addons.md ├── 060-environmental-variables.md ├── 070-timezone-change.md ├── 080-daily-backup-script.md ├── assets └── backup-archive │ └── readme.md └── readme.md /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/Bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 🐛 Bug report - no questions and no support! 3 | about: Help us improving by reporting a bug - this category is not for questions and also not for support! Please use one of the options below for questions and support 4 | labels: 0. Needs triage 5 | --- 6 | 7 | 13 | 14 | 15 | ### Steps to reproduce 16 | 1. 17 | 2. 18 | 3. 19 | 20 | ### Expected behavior 21 | 22 | ### Actual behavior 23 | 24 | 25 | ### Other information 26 | #### Host OS 27 | 28 | #### Output of `sudo docker info` 29 | 30 | #### Docker run command or docker-compose file that you used 31 | 32 | #### Other valuable info 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/Feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: 📖 Existing feature/documentation enhancement 3 | about: Suggest an enhancement of an existing feature/documentation - for other types, please use the feature request option below 4 | labels: 0. Needs triage 5 | --- 6 | 7 | 8 | ### Is your feature request related to a problem? Please describe. 9 | 10 | 11 | ### Describe the solution you'd like 12 | 13 | 14 | ### Describe alternatives you've considered 15 | 16 | 17 | ### Additional context 18 | 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: 📘 Documentation on Nextcloud AIO 4 | url: https://github.com/nextcloud/all-in-one#faq 5 | about: Please read the docs first before submitting any report or request! 6 | - name: ⛑️ General questions and support 7 | url: https://help.nextcloud.com/tag/aio 8 | about: For general questions, support and help 9 | - name: 💡 Suggest a new feature or discuss one 10 | url: https://github.com/nextcloud/all-in-one/discussions/categories/ideas 11 | about: For new feature requests and discussion of existing ones 12 | - name: ❓ Questions about Nextcloud AIO 13 | url: https://github.com/nextcloud/all-in-one/discussions/categories/questions 14 | about: For questions specifically about AIO 15 | - name: 💼 Nextcloud Enterprise 16 | url: https://portal.nextcloud.com/ 17 | about: If you are a Nextcloud Enterprise customer, or need Professional support, so it can be resolved directly by our dedicated engineers more quickly 18 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: ".github/workflows" 5 | schedule: 6 | interval: "daily" 7 | time: "12:00" 8 | open-pull-requests-limit: 10 9 | rebase-strategy: "disabled" 10 | labels: 11 | - 3. to review 12 | - dependencies 13 | - package-ecosystem: composer 14 | directory: "/php/" 15 | schedule: 16 | interval: "daily" 17 | time: "12:00" 18 | open-pull-requests-limit: 10 19 | rebase-strategy: "auto" 20 | labels: 21 | - 3. to review 22 | - dependencies 23 | - package-ecosystem: "docker" 24 | directories: 25 | - "/Containers/alpine" 26 | - "/Containers/apache" 27 | - "/Containers/borgbackup" 28 | - "/Containers/clamav" 29 | - "/Containers/collabora" 30 | - "/Containers/docker-socket-proxy" 31 | - "/Containers/domaincheck" 32 | - "/Containers/fulltextsearch" 33 | - "/Containers/imaginary" 34 | - "/Containers/mastercontainer" 35 | - "/Containers/nextcloud" 36 | - "/Containers/notify-push" 37 | - "/Containers/onlyoffice" 38 | - "/Containers/postgresql" 39 | - "/Containers/redis" 40 | - "/Containers/talk" 41 | - "/Containers/talk-recording" 42 | - "/Containers/watchtower" 43 | - "/Containers/whiteboard" 44 | schedule: 45 | interval: "daily" 46 | time: "04:00" 47 | open-pull-requests-limit: 10 48 | rebase-strategy: "disabled" 49 | labels: 50 | - 3. to review 51 | - dependencies 52 | ignore: 53 | - dependency-name: "php" 54 | update-types: ["version-update:semver-major", "version-update:semver-minor"] 55 | - dependency-name: "postgres" 56 | update-types: ["version-update:semver-major"] 57 | - dependency-name: "redis" 58 | update-types: ["version-update:semver-major", "version-update:semver-minor"] 59 | - dependency-name: "elasticsearch" 60 | update-types: ["version-update:semver-major"] 61 | -------------------------------------------------------------------------------- /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | categories: 3 | - title: 🏕 New features and other improvements 4 | labels: 5 | - enhancement 6 | - title: 🐞 Fixed bugs 7 | labels: 8 | - bug 9 | - title: 👒 Updated dependencies 10 | labels: 11 | - dependencies 12 | - title: 📄 Improved documentation 13 | labels: 14 | - documentation 15 | -------------------------------------------------------------------------------- /.github/workflows/codespell.yml: -------------------------------------------------------------------------------- 1 | name: 'Codespell' 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | codespell: 11 | name: Check spelling 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out code 15 | uses: actions/checkout@v4 16 | - name: Check spelling 17 | uses: codespell-project/actions-codespell@406322ec52dd7b488e48c1c4b82e2a8b3a1bf630 # v2 18 | with: 19 | check_filenames: true 20 | check_hidden: true 21 | -------------------------------------------------------------------------------- /.github/workflows/community-containers.yml: -------------------------------------------------------------------------------- 1 | name: Validate community containers 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'community-containers/**' 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - 'community-containers/**' 12 | 13 | jobs: 14 | validator-community-containers: 15 | name: Validate community containers 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | - name: Validate structure 21 | run: | 22 | CONTAINERS="$(find ./community-containers -mindepth 1 -maxdepth 1 -type d)" 23 | mapfile -t CONTAINERS <<< "$CONTAINERS" 24 | for container in "${CONTAINERS[@]}"; do 25 | container="$(echo "$container" | sed 's|./community-containers/||')" 26 | if ! [ -f ./community-containers/"$container"/"$container.json" ]; then 27 | echo ".json file must be named like its parent folder $container" 28 | FAIL=1 29 | fi 30 | if ! [ -f ./community-containers/"$container"/readme.md ]; then 31 | echo "There must be a readme.md file in the folder!" 32 | FAIL=1 33 | fi 34 | if [ -n "$FAIL" ]; then 35 | exit 1 36 | fi 37 | done 38 | -------------------------------------------------------------------------------- /.github/workflows/docker-lint.yml: -------------------------------------------------------------------------------- 1 | name: Docker Lint 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'Containers/**' 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - 'Containers/**' 12 | 13 | permissions: 14 | contents: read 15 | 16 | concurrency: 17 | group: docker-lint-${{ github.head_ref || github.run_id }} 18 | cancel-in-progress: true 19 | 20 | jobs: 21 | docker-lint: 22 | runs-on: ubuntu-latest 23 | 24 | name: docker-lint 25 | 26 | steps: 27 | - name: Checkout 28 | uses: actions/checkout@v4 29 | 30 | - name: Install hadolint 31 | run: | 32 | sudo wget https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -O /usr/bin/hadolint 33 | sudo chmod +x /usr/bin/hadolint 34 | 35 | - name: run lint 36 | run: | 37 | DOCKERFILES="$(find ./Containers -name Dockerfile)" 38 | mapfile -t DOCKERFILES <<< "$DOCKERFILES" 39 | for file in "${DOCKERFILES[@]}"; do 40 | # DL3018 warning: Pin versions in apk add. Instead of `apk add ` use `apk add =` 41 | # DL4006 warning: Set the SHELL option -o pipefail before RUN with a pipe in it. If you are using /bin/sh in an alpine image or if your shell is symlinked to busybox then consider explicitly setting your SHELL to /bin/ash, or disable this check 42 | hadolint "$file" --ignore DL3018 --ignore DL4006 | tee -a ./hadolint.log 43 | done 44 | if grep -q "DL[0-9]\+\|SC[0-9]\+" ./hadolint.log; then 45 | exit 1 46 | fi 47 | -------------------------------------------------------------------------------- /.github/workflows/helm-release.yml: -------------------------------------------------------------------------------- 1 | 2 | name: Helm Chart Releaser 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - 'nextcloud-aio-helm-chart/**' 10 | 11 | jobs: 12 | release: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | 18 | - name: Turnstyle 19 | uses: softprops/turnstyle@f9f8ef3f634144b126a09ea5b3bfe51ddebc700f # v2 20 | with: 21 | continue-after-seconds: 180 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | 25 | - name: Fetch history 26 | run: git fetch --prune --unshallow 27 | 28 | - name: Configure Git 29 | run: | 30 | git config user.name "$GITHUB_ACTOR" 31 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 32 | 33 | # See https://github.com/helm/chart-releaser-action/issues/6 34 | - name: Set up Helm 35 | uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4 36 | with: 37 | version: v3.6.3 38 | 39 | - name: Run Helm Lint 40 | run: | 41 | helm lint ./nextcloud-aio-helm-chart 42 | 43 | - name: Run chart-releaser 44 | uses: helm/chart-releaser-action@cae68fefc6b5f367a0275617c9f83181ba54714f # v1.7.0 45 | with: 46 | mark_as_latest: false 47 | charts_dir: . 48 | env: 49 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 50 | CR_RELEASE_NAME_TEMPLATE: "helm-chart-{{ .Version }}" 51 | CR_SKIP_EXISTING: true 52 | -------------------------------------------------------------------------------- /.github/workflows/imaginary-update.yml: -------------------------------------------------------------------------------- 1 | name: imaginary-update 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '00 12 * * *' 7 | 8 | jobs: 9 | run_update: 10 | name: update to latest imaginary commit on master branch 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Run imaginary-update 15 | run: | 16 | # Imaginary 17 | imaginary_version="$( 18 | git ls-remote https://github.com/h2non/imaginary master \ 19 | | cut -f1 \ 20 | | tail -1 21 | )" 22 | sed -i "s|^ENV IMAGINARY_HASH.*$|ENV IMAGINARY_HASH=$imaginary_version|" ./Containers/imaginary/Dockerfile 23 | 24 | - name: Create Pull Request 25 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 26 | with: 27 | commit-message: imaginary-update automated change 28 | signoff: true 29 | title: Imaginary update 30 | body: Automated Imaginary container update 31 | labels: dependencies, 3. to review 32 | milestone: next 33 | branch: imaginary-container-update 34 | -------------------------------------------------------------------------------- /.github/workflows/json-validator.yml: -------------------------------------------------------------------------------- 1 | name: Json Validator 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - '**.json' 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - '**.json' 12 | 13 | jobs: 14 | json-validator: 15 | name: Json Validator 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | - name: Validate Json 21 | run: | 22 | sudo apt-get update 23 | sudo apt-get install python3-venv -y --no-install-recommends 24 | python3 -m venv venv 25 | . venv/bin/activate 26 | pip3 install json-spec 27 | if ! json validate --schema-file=php/containers-schema.json --document-file=php/containers.json; then 28 | exit 1 29 | fi 30 | JSON_FILES="$(find ./community-containers -name '*.json')" 31 | mapfile -t JSON_FILES <<< "$JSON_FILES" 32 | for file in "${JSON_FILES[@]}"; do 33 | json validate --schema-file=php/containers-schema.json --document-file="$file" 2>&1 | tee -a ./json-validator.log 34 | done 35 | if grep -q "document does not validate with schema.\|invalid JSONFile" ./json-validator.log; then 36 | exit 1 37 | fi 38 | -------------------------------------------------------------------------------- /.github/workflows/lint-helm.yml: -------------------------------------------------------------------------------- 1 | name: Lint Helm Charts 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - 'nextcloud-aio-helm-chart/**' 8 | 9 | jobs: 10 | lint-helm: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 15 | with: 16 | fetch-depth: 0 17 | 18 | - name: Install Helm 19 | uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4 20 | with: 21 | version: v3.11.1 22 | 23 | - name: Lint charts 24 | run: helm lint nextcloud-aio-helm-chart 25 | -------------------------------------------------------------------------------- /.github/workflows/lint-php.yml: -------------------------------------------------------------------------------- 1 | # This workflow is provided via the organization template repository 2 | # 3 | # https://github.com/nextcloud/.github 4 | # https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization 5 | 6 | name: Lint php 7 | 8 | on: 9 | pull_request: 10 | paths: 11 | - 'php/**' 12 | push: 13 | branches: 14 | - main 15 | paths: 16 | - 'php/**' 17 | 18 | permissions: 19 | contents: read 20 | 21 | concurrency: 22 | group: lint-php-${{ github.head_ref || github.run_id }} 23 | cancel-in-progress: true 24 | 25 | jobs: 26 | php-lint: 27 | runs-on: ubuntu-latest 28 | strategy: 29 | matrix: 30 | php-versions: [ "8.4" ] 31 | 32 | name: php-lint 33 | 34 | steps: 35 | - name: Checkout 36 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 37 | 38 | - name: Set up php ${{ matrix.php-versions }} 39 | uses: shivammathur/setup-php@cf4cade2721270509d5b1c766ab3549210a39a2a # v2 40 | with: 41 | php-version: ${{ matrix.php-versions }} 42 | coverage: none 43 | ini-file: development 44 | env: 45 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 46 | 47 | - name: Lint 48 | run: cd php && composer run lint 49 | 50 | summary: 51 | permissions: 52 | contents: none 53 | runs-on: ubuntu-latest-low 54 | needs: php-lint 55 | 56 | if: always() 57 | 58 | name: php-lint-summary 59 | 60 | steps: 61 | - name: Summary status 62 | run: if ${{ needs.php-lint.result != 'success' && needs.php-lint.result != 'skipped' }}; then exit 1; fi 63 | -------------------------------------------------------------------------------- /.github/workflows/lock-threads.yml: -------------------------------------------------------------------------------- 1 | name: 'Lock Threads' 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | 7 | permissions: 8 | issues: write 9 | 10 | concurrency: 11 | group: lock 12 | 13 | jobs: 14 | action: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: dessant/lock-threads@1bf7ec25051fe7c00bdd17e6a7cf3d7bfb7dc771 # v5 18 | with: 19 | issue-inactive-days: '14' 20 | process-only: 'issues' 21 | -------------------------------------------------------------------------------- /.github/workflows/php-deprecation-detector.yml: -------------------------------------------------------------------------------- 1 | name: PHP Deprecation Detector 2 | # See https://github.com/wapmorgan/PhpDeprecationDetector 3 | 4 | on: 5 | pull_request: 6 | paths: 7 | - 'php/**' 8 | push: 9 | branches: 10 | - main 11 | paths: 12 | - 'php/**' 13 | 14 | jobs: 15 | phpdd: 16 | name: PHP Deprecation Detector 17 | runs-on: ubuntu-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - name: Set up php 21 | uses: shivammathur/setup-php@cf4cade2721270509d5b1c766ab3549210a39a2a # v2 22 | with: 23 | php-version: 8.4 24 | extensions: apcu 25 | coverage: none 26 | 27 | - name: Run script 28 | run: | 29 | set -x 30 | cd php 31 | composer install 32 | composer run php-deprecation-detector | tee -i ./phpdd.log 33 | if grep "Total issues:" ./phpdd.log; then 34 | exit 1 35 | fi 36 | -------------------------------------------------------------------------------- /.github/workflows/psalm-update-baseline.yml: -------------------------------------------------------------------------------- 1 | name: Update Psalm baseline 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '5 4 * * *' 7 | 8 | jobs: 9 | update-psalm-baseline: 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Set up php 16 | uses: shivammathur/setup-php@cf4cade2721270509d5b1c766ab3549210a39a2a # v2 17 | with: 18 | php-version: 8.4 19 | extensions: apcu 20 | coverage: none 21 | 22 | - name: Run script 23 | run: | 24 | set -x 25 | cd php 26 | composer install 27 | composer run psalm:update-baseline 28 | git clean -f lib/composer 29 | git checkout composer.json composer.lock lib/composer 30 | continue-on-error: true 31 | 32 | - name: Create Pull Request 33 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 34 | with: 35 | token: ${{ secrets.COMMAND_BOT_PAT }} 36 | commit-message: Update psalm baseline 37 | committer: GitHub 38 | author: nextcloud-command 39 | signoff: true 40 | branch: automated/noid/psalm-baseline-update 41 | title: '[Automated] Update psalm-baseline.xml' 42 | milestone: next 43 | body: | 44 | Auto-generated update psalm-baseline.xml with fixed psalm warnings 45 | labels: | 46 | 3. to review, dependencies 47 | -------------------------------------------------------------------------------- /.github/workflows/psalm.yml: -------------------------------------------------------------------------------- 1 | # This workflow is provided via the organization template repository 2 | # 3 | # https://github.com/nextcloud/.github 4 | # https://docs.github.com/en/actions/learn-github-actions/sharing-workflows-with-your-organization 5 | 6 | name: Static analysis 7 | 8 | on: 9 | pull_request: 10 | paths: 11 | - 'php/**' 12 | push: 13 | branches: 14 | - main 15 | paths: 16 | - 'php/**' 17 | 18 | concurrency: 19 | group: psalm-${{ github.head_ref || github.run_id }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | static-analysis: 24 | runs-on: ubuntu-latest 25 | 26 | name: static-psalm-analysis 27 | steps: 28 | - name: Checkout 29 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 30 | 31 | - name: Set up php 32 | uses: shivammathur/setup-php@cf4cade2721270509d5b1c766ab3549210a39a2a # v2 33 | with: 34 | php-version: 8.4 35 | extensions: apcu 36 | coverage: none 37 | ini-file: development 38 | env: 39 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 40 | 41 | - name: Install dependencies and run psalm 42 | run: | 43 | set -x 44 | cd php 45 | composer install 46 | composer run psalm 47 | -------------------------------------------------------------------------------- /.github/workflows/shellcheck.yml: -------------------------------------------------------------------------------- 1 | name: Shellcheck 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - '**.sh' 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - '**.sh' 12 | 13 | jobs: 14 | shellcheck: 15 | name: Check Shell 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Run Shellcheck 20 | uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0 21 | with: 22 | check_together: 'yes' 23 | env: 24 | SHELLCHECK_OPTS: --shell bash 25 | -------------------------------------------------------------------------------- /.github/workflows/twig-lint.yml: -------------------------------------------------------------------------------- 1 | name: Twig Lint 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - '**.twig' 7 | push: 8 | branches: 9 | - main 10 | paths: 11 | - '**.twig' 12 | 13 | permissions: 14 | contents: read 15 | 16 | concurrency: 17 | group: lint-twig-${{ github.head_ref || github.run_id }} 18 | cancel-in-progress: true 19 | 20 | jobs: 21 | twig-lint: 22 | runs-on: ubuntu-latest 23 | name: twig-lint 24 | 25 | steps: 26 | - name: Checkout 27 | uses: actions/checkout@v4 28 | 29 | - name: Set up php ${{ matrix.php-versions }} 30 | uses: shivammathur/setup-php@cf4cade2721270509d5b1c766ab3549210a39a2a # v2 31 | with: 32 | php-version: 8.4 33 | extensions: apcu 34 | coverage: none 35 | 36 | - name: twig lint 37 | run: | 38 | cd php 39 | composer install 40 | composer run lint:twig 41 | -------------------------------------------------------------------------------- /.github/workflows/update-copyright.yml: -------------------------------------------------------------------------------- 1 | name: Update Copyright 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | update-copyright: 8 | name: update copyright 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | -------------------------------------------------------------------------------- /.github/workflows/update-helm.yml: -------------------------------------------------------------------------------- 1 | name: Update Helm Chart 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '00 12 * * *' 7 | 8 | jobs: 9 | update-helm: 10 | name: update helm chart 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | - name: update helm chart 16 | run: | 17 | set -x 18 | GHCR_TOKEN="$(curl https://ghcr.io/token?scope=repository:nextcloud-releases/nce-php-fpm-mgmt:pull | jq '.token' | sed 's|"||g')" 19 | DOCKER_TAG="$(curl -H "Authorization: Bearer ${GHCR_TOKEN}" -L -s 'https://ghcr.io/v2/nextcloud-releases/all-in-one/tags/list?page_size=1024' | jq '.tags' | sed 's|"||g;s|[[:space:]]||g;s|,||g' | grep '^20[0-9_]\+' | grep -v latest | sort -r | head -1)" 20 | export DOCKER_TAG 21 | set +x 22 | if [ -n "$DOCKER_TAG" ] && ! grep -q "aio-nextcloud:$DOCKER_TAG" ./nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-deployment.yaml; then 23 | sudo bash nextcloud-aio-helm-chart/update-helm.sh "$DOCKER_TAG" 24 | fi 25 | - name: Create Pull Request 26 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 27 | with: 28 | commit-message: Helm Chart updates 29 | signoff: true 30 | title: Helm Chart updates 31 | body: Automated Helm Chart updates for the yaml files. It can be merged if it looks good at any time which will automatically trigger a new release of the helm chart. 32 | labels: dependencies, 3. to review 33 | milestone: next 34 | branch: aio-helm-update 35 | token: ${{ secrets.GITHUB_TOKEN }} 36 | -------------------------------------------------------------------------------- /.github/workflows/update-yaml.yml: -------------------------------------------------------------------------------- 1 | name: Update Yaml files 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '00 12 * * *' 7 | 8 | jobs: 9 | update-yaml: 10 | name: update yaml files 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | - name: update yaml files 16 | run: | 17 | sudo bash manual-install/update-yaml.sh 18 | - name: Create Pull Request 19 | uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7 20 | with: 21 | commit-message: Yaml updates 22 | signoff: true 23 | title: Yaml updates 24 | body: Automated yaml updates for the docker-compose files. Should only be merged shortly before the next latest release. 25 | labels: dependencies, 3. to review 26 | milestone: next 27 | branch: aio-yaml-update 28 | token: ${{ secrets.GITHUB_TOKEN }} 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea/ 3 | *.iml 4 | 5 | /php/data/* 6 | /php/session/* 7 | !/php/data/.gitkeep 8 | !/php/session/.gitkeep 9 | /php/vendor 10 | 11 | /manual-install/*.conf 12 | !/manual-install/sample.conf 13 | /manual-install/docker-compose.yml 14 | /manual-install/compose.yaml 15 | /manual-install/.env 16 | -------------------------------------------------------------------------------- /Containers/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM alpine:3.21.3 3 | 4 | RUN set -ex; \ 5 | apk upgrade --no-cache -a 6 | 7 | LABEL org.label-schema.vendor="Nextcloud" 8 | -------------------------------------------------------------------------------- /Containers/apache/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | auto_https disable_redirects 3 | 4 | storage file_system { 5 | root /mnt/data/caddy 6 | } 7 | 8 | servers { 9 | # trusted_proxies placeholder 10 | } 11 | 12 | log { 13 | level ERROR 14 | } 15 | } 16 | 17 | https://{$ADDITIONAL_TRUSTED_DOMAIN}:443, 18 | {$PROTOCOL}://{$NC_DOMAIN}:{$APACHE_PORT} { 19 | header -Server 20 | header -X-Powered-By 21 | 22 | # Collabora 23 | route /browser/* { 24 | reverse_proxy {$COLLABORA_HOST}:9980 25 | } 26 | route /hosting/* { 27 | reverse_proxy {$COLLABORA_HOST}:9980 28 | } 29 | route /cool/* { 30 | reverse_proxy {$COLLABORA_HOST}:9980 31 | } 32 | 33 | # Notify Push 34 | route /push/* { 35 | uri strip_prefix /push 36 | reverse_proxy {$NOTIFY_PUSH_HOST}:7867 37 | } 38 | 39 | # Onlyoffice 40 | route /onlyoffice/* { 41 | uri strip_prefix /onlyoffice 42 | reverse_proxy {$ONLYOFFICE_HOST}:80 { 43 | header_up X-Forwarded-Host {http.request.hostport}/onlyoffice 44 | header_up X-Forwarded-Proto https 45 | } 46 | } 47 | 48 | # Talk 49 | route /standalone-signaling/* { 50 | uri strip_prefix /standalone-signaling 51 | reverse_proxy {$TALK_HOST}:8081 52 | } 53 | 54 | # Whiteboard 55 | route /whiteboard/* { 56 | uri strip_prefix /whiteboard 57 | reverse_proxy {$WHITEBOARD_HOST}:3002 58 | } 59 | 60 | # Nextcloud 61 | route { 62 | header Strict-Transport-Security max-age=31536000; 63 | reverse_proxy 127.0.0.1:8000 64 | } 65 | redir /.well-known/carddav /remote.php/dav/ 301 66 | redir /.well-known/caldav /remote.php/dav/ 301 67 | 68 | # TLS options 69 | tls { 70 | issuer acme { 71 | disable_http_challenge 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /Containers/apache/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z "$NEXTCLOUD_HOST" 9000 || exit 0 4 | nc -z 127.0.0.1 8000 || exit 1 5 | nc -z 127.0.0.1 "$APACHE_PORT" || exit 1 6 | -------------------------------------------------------------------------------- /Containers/apache/nextcloud.conf: -------------------------------------------------------------------------------- 1 | Listen 8000 2 | 3 | ServerName localhost 4 | 5 | # Add error log 6 | CustomLog /proc/self/fd/1 proxy 7 | LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy 8 | ErrorLog /proc/self/fd/2 9 | ErrorLogFormat "[%t] [%l] [%E] [client: %{X-Forwarded-For}i] [%M] [%{User-Agent}i]" 10 | LogLevel warn 11 | 12 | # PHP match 13 | 14 | SetHandler "proxy:fcgi://${NEXTCLOUD_HOST}:9000" 15 | 16 | 17 | 18 | 19 | 20 | # Enable Brotli compression for js, css and svg files - other plain files are compressed by Nextcloud by default 21 | 22 | AddOutputFilterByType BROTLI_COMPRESS text/javascript application/javascript application/x-javascript text/css image/svg+xml 23 | BrotliCompressionQuality 0 24 | 25 | 26 | # Nextcloud dir 27 | DocumentRoot /var/www/html/ 28 | 29 | Options Indexes FollowSymLinks 30 | Require all granted 31 | AllowOverride All 32 | Options FollowSymLinks MultiViews 33 | Satisfy Any 34 | 35 | Dav off 36 | 37 | 38 | # Deny access to .ht files 39 | 40 | Require all denied 41 | 42 | 43 | # See https://httpd.apache.org/docs/current/en/mod/core.html#limitrequestbody 44 | LimitRequestBody ${APACHE_MAX_SIZE} 45 | 46 | # See https://httpd.apache.org/docs/current/mod/core.html#timeout 47 | Timeout ${APACHE_MAX_TIME} 48 | 49 | # See https://httpd.apache.org/docs/current/mod/mod_proxy.html#proxytimeout 50 | ProxyTimeout ${APACHE_MAX_TIME} 51 | 52 | # See https://httpd.apache.org/docs/trunk/mod/core.html#traceenable 53 | TraceEnable Off 54 | 55 | -------------------------------------------------------------------------------- /Containers/apache/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | nodaemon=true 4 | logfile=/var/log/supervisord/supervisord.log 5 | pidfile=/var/run/supervisord/supervisord.pid 6 | childlogdir=/var/log/supervisord/ 7 | logfile_maxbytes=50MB 8 | logfile_backups=10 9 | loglevel=error 10 | 11 | [program:apache] 12 | # Stdout logging is disabled as otherwise the logs are spammed 13 | stdout_logfile=NONE 14 | stderr_logfile=/dev/stderr 15 | stderr_logfile_maxbytes=0 16 | command=apachectl -DFOREGROUND 17 | 18 | [program:caddy] 19 | stdout_logfile=/dev/stdout 20 | stdout_logfile_maxbytes=0 21 | stderr_logfile=/dev/stderr 22 | stderr_logfile_maxbytes=0 23 | command=/usr/bin/caddy run --config /tmp/Caddyfile 24 | -------------------------------------------------------------------------------- /Containers/borgbackup/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM alpine:3.21.3 3 | 4 | RUN set -ex; \ 5 | \ 6 | apk upgrade --no-cache -a; \ 7 | apk add --no-cache \ 8 | util-linux-misc \ 9 | bash \ 10 | borgbackup \ 11 | rsync \ 12 | fuse \ 13 | py3-llfuse \ 14 | jq \ 15 | openssh-client 16 | 17 | VOLUME /root 18 | 19 | COPY --chmod=770 *.sh / 20 | COPY borg_excludes / 21 | 22 | ENTRYPOINT ["/start.sh"] 23 | # hadolint ignore=DL3002 24 | USER root 25 | 26 | LABEL com.centurylinklabs.watchtower.enable="false" \ 27 | org.label-schema.vendor="Nextcloud" 28 | ENV BORG_RETENTION_POLICY="--keep-within=7d --keep-weekly=4 --keep-monthly=6" 29 | -------------------------------------------------------------------------------- /Containers/borgbackup/borg_excludes: -------------------------------------------------------------------------------- 1 | # These patterns need to be kept in sync with rsync and find excludes in backupscript.sh, 2 | # which use a different syntax (patterns appear in 3 places in total) 3 | nextcloud_aio_volumes/nextcloud_aio_apache/caddy/ 4 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/caddy/ 5 | nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/nextcloud.log* 6 | nextcloud_aio_volumes/nextcloud_aio_nextcloud/data/audit.log 7 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/certs/ 8 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/daily_backup_running 9 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/session_date_file 10 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/session/ 11 | nextcloud_aio_volumes/nextcloud_aio_mastercontainer/data/id_borg* -------------------------------------------------------------------------------- /Containers/clamav/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM alpine:3.21.3 3 | 4 | RUN set -ex; \ 5 | apk upgrade --no-cache -a; \ 6 | apk add --no-cache tzdata clamav supervisor bash; \ 7 | mkdir -p /run/clamav /var/log/supervisord /var/run/supervisord; \ 8 | chmod 777 -R /run/clamav /var/log/clamav /var/log/supervisord /var/run/supervisord; \ 9 | sed -i "s|#\?MaxDirectoryRecursion.*|MaxDirectoryRecursion 30|g" /etc/clamav/clamd.conf; \ 10 | sed -i "s|#\?MaxFileSize.*|MaxFileSize 2G|g" /etc/clamav/clamd.conf; \ 11 | sed -i "s|#\?PCREMaxFileSize.*|PCREMaxFileSize aio-placeholder|g" /etc/clamav/clamd.conf; \ 12 | sed -i "s|#\?StreamMaxLength.*|StreamMaxLength aio-placeholder|g" /etc/clamav/clamd.conf; \ 13 | sed -i "s|#\?TCPSocket|TCPSocket|g" /etc/clamav/clamd.conf; \ 14 | sed -i "s|^LocalSocket .*|LocalSocket /tmp/clamd.sock|g" /etc/clamav/clamd.conf; \ 15 | freshclam --foreground --stdout 16 | 17 | COPY --chmod=775 start.sh /start.sh 18 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 19 | COPY --chmod=664 supervisord.conf /supervisord.conf 20 | 21 | USER 100 22 | VOLUME /var/lib/clamav 23 | ENTRYPOINT ["/start.sh"] 24 | CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf"] 25 | LABEL com.centurylinklabs.watchtower.enable="false" \ 26 | org.label-schema.vendor="Nextcloud" 27 | HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh 28 | -------------------------------------------------------------------------------- /Containers/clamav/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$(echo "PING" | nc 127.0.0.1 3310)" != "PONG" ]; then 4 | echo "ERROR: Unable to contact server" 5 | exit 1 6 | fi 7 | 8 | echo "Clamd is up" 9 | exit 0 10 | -------------------------------------------------------------------------------- /Containers/clamav/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed "s|aio-placeholder|$MAX_SIZE|" /etc/clamav/clamd.conf > /tmp/clamd.conf 4 | 5 | echo "Clamav started" 6 | 7 | exec "$@" 8 | -------------------------------------------------------------------------------- /Containers/clamav/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | nodaemon=true 4 | logfile=/var/log/supervisord/supervisord.log 5 | pidfile=/var/run/supervisord/supervisord.pid 6 | childlogdir=/var/log/supervisord/ 7 | logfile_maxbytes=50MB 8 | logfile_backups=10 9 | loglevel=error 10 | 11 | [program:freshclam] 12 | stdout_logfile=/dev/stdout 13 | stdout_logfile_maxbytes=0 14 | stderr_logfile=/dev/stderr 15 | stderr_logfile_maxbytes=0 16 | command=freshclam --foreground --stdout --daemon --daemon-notify=/tmp/clamd.conf 17 | 18 | [program:clamd] 19 | stdout_logfile=/dev/stdout 20 | stdout_logfile_maxbytes=0 21 | stderr_logfile=/dev/stderr 22 | stderr_logfile_maxbytes=0 23 | command=clamd --foreground --config-file=/tmp/clamd.conf 24 | -------------------------------------------------------------------------------- /Containers/collabora/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # From a file located probably somewhere here: https://github.com/CollaboraOnline/online/blob/master/docker/from-packages/Dockerfile 3 | FROM collabora/code:25.04.2.1.1 4 | 5 | USER root 6 | ARG DEBIAN_FRONTEND=noninteractive 7 | 8 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 9 | 10 | USER 1001 11 | 12 | HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh 13 | LABEL com.centurylinklabs.watchtower.enable="false" \ 14 | org.label-schema.vendor="Nextcloud" 15 | -------------------------------------------------------------------------------- /Containers/collabora/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Unfortunately, no curl and no nc is installed in the container 4 | # and packages can also not be added as the package list is broken. 5 | # So always exiting 0 for now. 6 | # nc http://127.0.0.1:9980 || exit 1 7 | exit 0 8 | -------------------------------------------------------------------------------- /Containers/docker-socket-proxy/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM haproxy:3.2.0-alpine 3 | 4 | # hadolint ignore=DL3002 5 | USER root 6 | ENV NEXTCLOUD_HOST=nextcloud-aio-nextcloud 7 | RUN set -ex; \ 8 | apk upgrade --no-cache -a; \ 9 | apk add --no-cache \ 10 | ca-certificates \ 11 | tzdata \ 12 | bash \ 13 | bind-tools; \ 14 | chmod -R 777 /tmp 15 | 16 | COPY --chmod=775 *.sh / 17 | COPY --chmod=664 haproxy.cfg /haproxy.cfg 18 | 19 | ENTRYPOINT ["/start.sh"] 20 | HEALTHCHECK CMD /healthcheck.sh 21 | LABEL com.centurylinklabs.watchtower.enable="false" \ 22 | org.label-schema.vendor="Nextcloud" 23 | -------------------------------------------------------------------------------- /Containers/docker-socket-proxy/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z "$NEXTCLOUD_HOST" 9001 || exit 0 4 | nc -z 127.0.0.1 2375 || exit 1 5 | -------------------------------------------------------------------------------- /Containers/docker-socket-proxy/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Only start container if nextcloud is accessible 4 | while ! nc -z "$NEXTCLOUD_HOST" 9001; do 5 | echo "Waiting for Nextcloud to start..." 6 | sleep 5 7 | done 8 | 9 | set -x 10 | IPv4_ADDRESS_NC="$(dig nextcloud-aio-nextcloud IN A +short +search | grep '^[0-9.]\+$' | sort | head -n1)" 11 | HAPROXYFILE="$(sed "s|NC_IPV4_PLACEHOLDER|$IPv4_ADDRESS_NC|" /haproxy.cfg)" 12 | echo "$HAPROXYFILE" > /tmp/haproxy.cfg 13 | 14 | IPv6_ADDRESS_NC="$(dig nextcloud-aio-nextcloud AAAA +short +search | grep '^[0-9a-f:]\+$' | sort | head -n1)" 15 | if [ -n "$IPv6_ADDRESS_NC" ]; then 16 | HAPROXYFILE="$(sed "s|NC_IPV6_PLACEHOLDER|$IPv6_ADDRESS_NC|" /tmp/haproxy.cfg)" 17 | else 18 | HAPROXYFILE="$(sed "s# || { src NC_IPV6_PLACEHOLDER }##g" /tmp/haproxy.cfg)" 19 | fi 20 | echo "$HAPROXYFILE" > /tmp/haproxy.cfg 21 | set +x 22 | 23 | haproxy -f /tmp/haproxy.cfg -db 24 | -------------------------------------------------------------------------------- /Containers/domaincheck/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM alpine:3.21.3 3 | RUN set -ex; \ 4 | apk upgrade --no-cache -a; \ 5 | apk add --no-cache bash lighttpd netcat-openbsd; \ 6 | adduser -S www-data -G www-data; \ 7 | rm -rf /etc/lighttpd/lighttpd.conf; \ 8 | chmod 777 -R /etc/lighttpd; \ 9 | mkdir -p /var/www/domaincheck; \ 10 | chown www-data:www-data -R /var/www; \ 11 | chmod 777 -R /var/www/domaincheck 12 | COPY --chown=www-data:www-data lighttpd.conf /lighttpd.conf 13 | 14 | COPY --chmod=775 start.sh /start.sh 15 | 16 | USER www-data 17 | ENTRYPOINT ["/start.sh"] 18 | 19 | HEALTHCHECK CMD nc -z 127.0.0.1 $APACHE_PORT || exit 1 20 | LABEL com.centurylinklabs.watchtower.enable="false" \ 21 | org.label-schema.vendor="Nextcloud" 22 | -------------------------------------------------------------------------------- /Containers/domaincheck/lighttpd.conf: -------------------------------------------------------------------------------- 1 | server.document-root = "/var/www/domaincheck/" 2 | 3 | server.port = env.APACHE_PORT 4 | 5 | server.username = "www-data" 6 | server.groupname = "www-data" 7 | 8 | mimetype.assign = ( 9 | ".html" => "text/html", 10 | ".txt" => "text/plain", 11 | ".jpg" => "image/jpeg", 12 | ".png" => "image/png" 13 | ) 14 | 15 | static-file.exclude-extensions = ( ".fcgi", ".php", ".rb", "~", ".inc" ) 16 | index-file.names = ( "index.html" ) 17 | 18 | $SERVER["socket"] == "ipv6-placeholder" { 19 | server.document-root = "/var/www/domaincheck/" 20 | } 21 | -------------------------------------------------------------------------------- /Containers/domaincheck/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$INSTANCE_ID" ]; then 4 | echo "You need to provide an instance id." 5 | exit 1 6 | fi 7 | 8 | echo "$INSTANCE_ID" > /var/www/domaincheck/index.html 9 | 10 | if [ -z "$APACHE_PORT" ]; then 11 | export APACHE_PORT="443" 12 | fi 13 | 14 | CONF_FILE="$(sed "s|ipv6-placeholder|\[::\]:$APACHE_PORT|" /lighttpd.conf)" 15 | echo "$CONF_FILE" > /etc/lighttpd/lighttpd.conf 16 | 17 | # Check config file 18 | lighttpd -tt -f /etc/lighttpd/lighttpd.conf 19 | 20 | # Run server 21 | lighttpd -D -f /etc/lighttpd/lighttpd.conf 22 | 23 | exec "$@" 24 | -------------------------------------------------------------------------------- /Containers/fulltextsearch/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # Probably from here https://github.com/elastic/elasticsearch/blob/main/distribution/docker/src/docker/Dockerfile 3 | FROM elasticsearch:8.18.2 4 | 5 | USER root 6 | 7 | ARG DEBIAN_FRONTEND=noninteractive 8 | 9 | # hadolint ignore=DL3008 10 | RUN set -ex; \ 11 | \ 12 | apt-get update; \ 13 | apt-get upgrade -y; \ 14 | apt-get install -y --no-install-recommends \ 15 | tzdata \ 16 | ; \ 17 | rm -rf /var/lib/apt/lists/*; 18 | 19 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 20 | 21 | USER 1000:0 22 | 23 | HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD /healthcheck.sh 24 | LABEL com.centurylinklabs.watchtower.enable="false" \ 25 | org.label-schema.vendor="Nextcloud" 26 | ENV ES_JAVA_OPTS="-Xms512M -Xmx512M" 27 | -------------------------------------------------------------------------------- /Containers/fulltextsearch/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z 127.0.0.1 9200 || exit 1 4 | -------------------------------------------------------------------------------- /Containers/imaginary/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM golang:1.24.3-alpine3.21 AS go 3 | 4 | ENV IMAGINARY_HASH=1d4e251cfcd58ea66f8361f8721d7b8cc85002a3 5 | 6 | RUN set -ex; \ 7 | apk upgrade --no-cache -a; \ 8 | apk add --no-cache \ 9 | vips-dev \ 10 | vips-magick \ 11 | vips-heif \ 12 | vips-jxl \ 13 | vips-poppler \ 14 | build-base; \ 15 | go install github.com/h2non/imaginary@"$IMAGINARY_HASH"; 16 | 17 | FROM alpine:3.21.3 18 | RUN set -ex; \ 19 | apk upgrade --no-cache -a; \ 20 | apk add --no-cache \ 21 | tzdata \ 22 | ca-certificates \ 23 | netcat-openbsd \ 24 | vips \ 25 | vips-magick \ 26 | vips-heif \ 27 | vips-jxl \ 28 | vips-poppler \ 29 | ttf-dejavu \ 30 | bash 31 | 32 | COPY --from=go /go/bin/imaginary /usr/local/bin/imaginary 33 | COPY --chmod=775 start.sh /start.sh 34 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 35 | 36 | ENV PORT=9000 37 | 38 | USER 65534 39 | 40 | # https://github.com/h2non/imaginary#memory-issues 41 | ENV MALLOC_ARENA_MAX=2 42 | ENTRYPOINT ["/start.sh"] 43 | 44 | HEALTHCHECK CMD /healthcheck.sh 45 | LABEL com.centurylinklabs.watchtower.enable="false" \ 46 | org.label-schema.vendor="Nextcloud" 47 | -------------------------------------------------------------------------------- /Containers/imaginary/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z 127.0.0.1 "$PORT" || exit 1 4 | -------------------------------------------------------------------------------- /Containers/imaginary/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Imaginary has started" 4 | if [ -z "$IMAGINARY_SECRET" ]; then 5 | imaginary -return-size -max-allowed-resolution 222.2 "$@" 6 | else 7 | imaginary -return-size -max-allowed-resolution 222.2 -key "$IMAGINARY_SECRET" "$@" 8 | fi 9 | -------------------------------------------------------------------------------- /Containers/mastercontainer/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | # auto_https will create redirects for https://{host}:8443 instead of https://{host} 3 | # https redirects are added manually in the http://:80 block 4 | auto_https disable_redirects 5 | 6 | storage file_system { 7 | root /mnt/docker-aio-config/caddy/ 8 | } 9 | 10 | log { 11 | level ERROR 12 | } 13 | 14 | servers { 15 | protocols h1 h2 h2c 16 | } 17 | 18 | on_demand_tls { 19 | ask http://127.0.0.1:9876/ 20 | } 21 | } 22 | 23 | http://:80 { 24 | redir https://{host}{uri} permanent 25 | } 26 | 27 | https://:8443 { 28 | 29 | reverse_proxy 127.0.0.1:8000 30 | 31 | tls { 32 | on_demand 33 | issuer acme { 34 | disable_tlsalpn_challenge 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /Containers/mastercontainer/backup-time-file-watcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | restart_process() { 4 | echo "Restarting cron.sh because daily backup time was set, changed or unset." 5 | pkill cron.sh 6 | } 7 | 8 | file_present() { 9 | if [ -f "/mnt/docker-aio-config/data/daily_backup_time" ]; then 10 | if [ "$FILE_PRESENT" = 0 ]; then 11 | restart_process 12 | else 13 | if [ -n "$BACKUP_TIME" ] && [ "$(head -1 "/mnt/docker-aio-config/data/daily_backup_time")" != "$BACKUP_TIME" ]; then 14 | restart_process 15 | fi 16 | fi 17 | FILE_PRESENT=1 18 | BACKUP_TIME="$(head -1 "/mnt/docker-aio-config/data/daily_backup_time")" 19 | else 20 | if [ "$FILE_PRESENT" = 1 ]; then 21 | restart_process 22 | fi 23 | FILE_PRESENT=0 24 | fi 25 | } 26 | 27 | while true; do 28 | file_present 29 | sleep 2 30 | done 31 | -------------------------------------------------------------------------------- /Containers/mastercontainer/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -f "/mnt/docker-aio-config/data/configuration.json" ]; then 4 | nc -z 127.0.0.1 80 || exit 1 5 | nc -z 127.0.0.1 8000 || exit 1 6 | nc -z 127.0.0.1 8080 || exit 1 7 | nc -z 127.0.0.1 8443 || exit 1 8 | nc -z 127.0.0.1 9000 || exit 1 9 | nc -z 127.0.0.1 9876 || exit 1 10 | fi 11 | -------------------------------------------------------------------------------- /Containers/mastercontainer/session-deduplicator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | deduplicate_sessions() { 4 | echo "Deleting duplicate sessions" 5 | find "/mnt/docker-aio-config/session/" -mindepth 1 -exec grep -qv "$NEW_SESSION_TIME" {} \; -delete 6 | } 7 | 8 | compare_times() { 9 | if [ -f "/mnt/docker-aio-config/data/session_date_file" ]; then 10 | unset NEW_SESSION_TIME 11 | NEW_SESSION_TIME="$(cat "/mnt/docker-aio-config/data/session_date_file")" 12 | if [ -n "$NEW_SESSION_TIME" ] && [ -n "$OLD_SESSION_TIME" ] && [ "$NEW_SESSION_TIME" != "$OLD_SESSION_TIME" ]; then 13 | deduplicate_sessions 14 | fi 15 | OLD_SESSION_TIME="$NEW_SESSION_TIME" 16 | fi 17 | } 18 | 19 | while true; do 20 | compare_times 21 | sleep 2 22 | done 23 | -------------------------------------------------------------------------------- /Containers/mastercontainer/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | logfile=/var/log/supervisord/supervisord.log 4 | pidfile=/var/run/supervisord/supervisord.pid 5 | childlogdir=/var/log/supervisord/ 6 | logfile_maxbytes=50MB 7 | logfile_backups=10 8 | loglevel=error 9 | user=root 10 | 11 | [program:php-fpm] 12 | # Stdout logging is disabled as otherwise the logs are spammed 13 | stdout_logfile=NONE 14 | stderr_logfile=/dev/stderr 15 | stderr_logfile_maxbytes=0 16 | command=php-fpm 17 | user=root 18 | 19 | [program:apache] 20 | # Stdout logging is disabled as otherwise the logs are spammed 21 | stdout_logfile=NONE 22 | stderr_logfile=/dev/stderr 23 | stderr_logfile_maxbytes=0 24 | command=httpd -DFOREGROUND 25 | user=root 26 | 27 | [program:caddy] 28 | stdout_logfile=/dev/stdout 29 | stdout_logfile_maxbytes=0 30 | stderr_logfile=/dev/stderr 31 | stderr_logfile_maxbytes=0 32 | command=/usr/bin/caddy run --config /Caddyfile 33 | user=www-data 34 | 35 | [program:cron] 36 | stdout_logfile=/dev/stdout 37 | stdout_logfile_maxbytes=0 38 | stderr_logfile=/dev/stderr 39 | stderr_logfile_maxbytes=0 40 | command=/cron.sh 41 | user=root 42 | 43 | [program:backup-time-file-watcher] 44 | stdout_logfile=/dev/stdout 45 | stdout_logfile_maxbytes=0 46 | stderr_logfile=/dev/stderr 47 | stderr_logfile_maxbytes=0 48 | command=/backup-time-file-watcher.sh 49 | user=root 50 | 51 | [program:session-deduplicator] 52 | stdout_logfile=/dev/stdout 53 | stdout_logfile_maxbytes=0 54 | stderr_logfile=/dev/stderr 55 | stderr_logfile_maxbytes=0 56 | command=/session-deduplicator.sh 57 | user=root 58 | 59 | [program:domain-validator] 60 | # Logging is disabled as otherwise all attempts will be logged which spams the logs 61 | stdout_logfile=NONE 62 | stderr_logfile=NONE 63 | command=php -S 127.0.0.1:9876 /var/www/docker-aio/php/domain-validator.php 64 | user=www-data 65 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/aio.config.php: -------------------------------------------------------------------------------- 1 | true, 4 | 'one-click-instance.user-limit' => 100, 5 | ); 6 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/apcu.config.php: -------------------------------------------------------------------------------- 1 | '\OC\Memcache\APCu', 4 | ); 5 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/apps.config.php: -------------------------------------------------------------------------------- 1 | array ( 4 | 0 => array ( 5 | 'path' => '/var/www/html/apps', 6 | 'url' => '/apps', 7 | 'writable' => false, 8 | ), 9 | 1 => array ( 10 | 'path' => '/var/www/html/custom_apps', 11 | 'url' => '/custom_apps', 12 | 'writable' => true, 13 | ), 14 | ), 15 | ); 16 | if (getenv('APPS_ALLOWLIST')) { 17 | $CONFIG['appsallowlist'] = explode(" ", getenv('APPS_ALLOWLIST')); 18 | } 19 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/proxy.config.php: -------------------------------------------------------------------------------- 1 | '\OC\Memcache\Redis', 5 | 'memcache.locking' => '\OC\Memcache\Redis', 6 | 'redis' => array( 7 | 'host' => getenv('REDIS_HOST'), 8 | 'password' => (string) getenv('REDIS_HOST_PASSWORD'), 9 | ), 10 | ); 11 | 12 | if (getenv('REDIS_HOST_PORT')) { 13 | $CONFIG['redis']['port'] = (int) getenv('REDIS_HOST_PORT'); 14 | } elseif (getenv('REDIS_HOST')[0] != '/') { 15 | $CONFIG['redis']['port'] = 6379; 16 | } 17 | 18 | if (getenv('REDIS_DB_INDEX')) { 19 | $CONFIG['redis']['dbindex'] = (int) getenv('REDIS_DB_INDEX'); 20 | } 21 | 22 | if (getenv('REDIS_USER_AUTH') !== false) { 23 | $CONFIG['redis']['user'] = str_replace("&auth[]=", "", getenv('REDIS_USER_AUTH')); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/reverse-proxy.config.php: -------------------------------------------------------------------------------- 1 | array( 9 | 'class' => '\OC\Files\ObjectStore\S3', 10 | 'arguments' => array( 11 | 'bucket' => getenv('OBJECTSTORE_S3_BUCKET'), 12 | 'key' => getenv('OBJECTSTORE_S3_KEY') ?: '', 13 | 'secret' => getenv('OBJECTSTORE_S3_SECRET') ?: '', 14 | 'region' => getenv('OBJECTSTORE_S3_REGION') ?: '', 15 | 'hostname' => getenv('OBJECTSTORE_S3_HOST') ?: '', 16 | 'port' => getenv('OBJECTSTORE_S3_PORT') ?: '', 17 | 'storageClass' => getenv('OBJECTSTORE_S3_STORAGE_CLASS') ?: '', 18 | 'objectPrefix' => getenv("OBJECTSTORE_S3_OBJECT_PREFIX") ? getenv("OBJECTSTORE_S3_OBJECT_PREFIX") : "urn:oid:", 19 | 'autocreate' => (strtolower($autocreate) === 'false' || $autocreate == false) ? false : true, 20 | 'use_ssl' => (strtolower($use_ssl) === 'false' || $use_ssl == false) ? false : true, 21 | // required for some non Amazon S3 implementations 22 | 'use_path_style' => $use_path == true && strtolower($use_path) !== 'false', 23 | // required for older protocol versions 24 | 'legacy_auth' => $use_legacyauth == true && strtolower($use_legacyauth) !== 'false' 25 | ) 26 | ) 27 | ); 28 | 29 | $sse_c_key = getenv('OBJECTSTORE_S3_SSE_C_KEY'); 30 | if ($sse_c_key) { 31 | $CONFIG['objectstore']['arguments']['sse_c_key'] = $sse_c_key; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/smtp.config.php: -------------------------------------------------------------------------------- 1 | 'smtp', 5 | 'mail_smtphost' => getenv('SMTP_HOST'), 6 | 'mail_smtpport' => getenv('SMTP_PORT') ?: (getenv('SMTP_SECURE') ? 465 : 25), 7 | 'mail_smtpsecure' => getenv('SMTP_SECURE') ?: '', 8 | 'mail_smtpauth' => getenv('SMTP_NAME') && getenv('SMTP_PASSWORD'), 9 | 'mail_smtpauthtype' => getenv('SMTP_AUTHTYPE') ?: 'LOGIN', 10 | 'mail_smtpname' => getenv('SMTP_NAME') ?: '', 11 | 'mail_from_address' => getenv('MAIL_FROM_ADDRESS'), 12 | 'mail_domain' => getenv('MAIL_DOMAIN'), 13 | ); 14 | 15 | if (getenv('SMTP_PASSWORD')) { 16 | $CONFIG['mail_smtppassword'] = getenv('SMTP_PASSWORD'); 17 | } else { 18 | $CONFIG['mail_smtppassword'] = ''; 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /Containers/nextcloud/config/swift.config.php: -------------------------------------------------------------------------------- 1 | [ 6 | 'class' => 'OC\\Files\\ObjectStore\\Swift', 7 | 'arguments' => [ 8 | 'autocreate' => $autocreate == true && strtolower($autocreate) !== 'false', 9 | 'user' => [ 10 | 'name' => getenv('OBJECTSTORE_SWIFT_USER_NAME'), 11 | 'password' => getenv('OBJECTSTORE_SWIFT_USER_PASSWORD'), 12 | 'domain' => [ 13 | 'name' => (getenv('OBJECTSTORE_SWIFT_USER_DOMAIN')) ?: 'Default', 14 | ], 15 | ], 16 | 'scope' => [ 17 | 'project' => [ 18 | 'name' => getenv('OBJECTSTORE_SWIFT_PROJECT_NAME'), 19 | 'domain' => [ 20 | 'name' => (getenv('OBJECTSTORE_SWIFT_PROJECT_DOMAIN')) ?: 'Default', 21 | ], 22 | ], 23 | ], 24 | 'serviceName' => (getenv('OBJECTSTORE_SWIFT_SERVICE_NAME')) ?: 'swift', 25 | 'region' => getenv('OBJECTSTORE_SWIFT_REGION'), 26 | 'url' => getenv('OBJECTSTORE_SWIFT_URL'), 27 | 'bucket' => getenv('OBJECTSTORE_SWIFT_CONTAINER_NAME'), 28 | ] 29 | ] 30 | ); 31 | } 32 | -------------------------------------------------------------------------------- /Containers/nextcloud/cron.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | wait_for_cron() { 3 | set -x 4 | while [ -n "$(pgrep -f /var/www/html/cron.php)" ]; do 5 | echo "Waiting for cron to stop..." 6 | sleep 5 7 | done 8 | echo "Cronjob successfully exited." 9 | exit 10 | } 11 | 12 | trap wait_for_cron SIGINT SIGTERM 13 | 14 | while true; do 15 | php -f /var/www/html/cron.php & 16 | sleep 5m & 17 | wait $! 18 | done 19 | -------------------------------------------------------------------------------- /Containers/nextcloud/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set a default value for POSTGRES_PORT 4 | if [ -z "$POSTGRES_PORT" ]; then 5 | POSTGRES_PORT=5432 6 | fi 7 | 8 | 9 | # POSTGRES_HOST must be set in the containers env vars and POSTGRES_PORT has a default above 10 | # shellcheck disable=SC2153 11 | nc -z "$POSTGRES_HOST" "$POSTGRES_PORT" || exit 0 12 | 13 | if ! nc -z 127.0.0.1 9000; then 14 | exit 1 15 | fi 16 | -------------------------------------------------------------------------------- /Containers/nextcloud/notify-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$EUID" = 0 ]]; then 4 | COMMAND=(sudo -E -u www-data php /var/www/html/occ) 5 | else 6 | COMMAND=(php /var/www/html/occ) 7 | fi 8 | 9 | SUBJECT="$1" 10 | MESSAGE="$2" 11 | 12 | if [ "$("${COMMAND[@]}" config:app:get notifications enabled)" = "no" ]; then 13 | echo "Cannot send notification as notification app is not enabled." 14 | exit 1 15 | fi 16 | 17 | echo "Posting notifications to all users..." 18 | NC_USERS=$("${COMMAND[@]}" user:list | sed 's|^ - ||g' | sed 's|:.*||') 19 | mapfile -t NC_USERS <<< "$NC_USERS" 20 | for user in "${NC_USERS[@]}" 21 | do 22 | echo "Posting '$SUBJECT' to: $user" 23 | "${COMMAND[@]}" notification:generate "$user" "$NC_DOMAIN: $SUBJECT" -l "$MESSAGE" --object-type='update' --object-id="$SUBJECT" 24 | done 25 | 26 | echo "Done!" 27 | exit 0 -------------------------------------------------------------------------------- /Containers/nextcloud/notify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ "$EUID" = 0 ]]; then 4 | COMMAND=(sudo -E -u www-data php /var/www/html/occ) 5 | else 6 | COMMAND=(php /var/www/html/occ) 7 | fi 8 | 9 | SUBJECT="$1" 10 | MESSAGE="$2" 11 | 12 | if [ "$("${COMMAND[@]}" config:app:get notifications enabled)" = "no" ]; then 13 | echo "Cannot send notification as notification app is not enabled." 14 | exit 1 15 | fi 16 | 17 | echo "Posting notifications to users that are admins..." 18 | NC_USERS=$("${COMMAND[@]}" user:list | sed 's|^ - ||g' | sed 's|:.*||') 19 | mapfile -t NC_USERS <<< "$NC_USERS" 20 | for user in "${NC_USERS[@]}" 21 | do 22 | if "${COMMAND[@]}" user:info "$user" | cut -d "-" -f2 | grep -x -q " admin" 23 | then 24 | NC_ADMIN_USER+=("$user") 25 | fi 26 | done 27 | 28 | for admin in "${NC_ADMIN_USER[@]}" 29 | do 30 | echo "Posting '$SUBJECT' to: $admin" 31 | "${COMMAND[@]}" notification:generate "$admin" "$NC_DOMAIN: $SUBJECT" -l "$MESSAGE" --object-type='update' --object-id="$SUBJECT" 32 | done 33 | 34 | echo "Done!" 35 | exit 0 36 | -------------------------------------------------------------------------------- /Containers/nextcloud/root.motd: -------------------------------------------------------------------------------- 1 | Warning: You have logged in into the Nextcloud container as root user. 2 | See https://github.com/nextcloud/all-in-one#how-to-run-occ-commands if you want to run occ commands. 3 | Apart from that, you can use 'sudo -E -u www-data php occ ' in order to run occ commands. 4 | Of course needs to be substituted with the command that you want to use. 5 | -------------------------------------------------------------------------------- /Containers/nextcloud/run-exec-commands.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Wait until the apache container is ready 4 | while ! nc -z "$APACHE_HOST" "$APACHE_PORT"; do 5 | echo "Waiting for $APACHE_HOST to become available..." 6 | sleep 15 7 | done 8 | 9 | if [ -n "$NEXTCLOUD_EXEC_COMMANDS" ]; then 10 | echo "#!/bin/bash" > /tmp/nextcloud-exec-commands 11 | echo "$NEXTCLOUD_EXEC_COMMANDS" >> /tmp/nextcloud-exec-commands 12 | if ! grep "one-click-instance" /tmp/nextcloud-exec-commands; then 13 | bash /tmp/nextcloud-exec-commands 14 | rm /tmp/nextcloud-exec-commands 15 | fi 16 | else 17 | # Collabora must work also if using manual-install 18 | if [ "$COLLABORA_ENABLED" = yes ]; then 19 | echo "Activating Collabora config..." 20 | php /var/www/html/occ richdocuments:activate-config 21 | fi 22 | # OnlyOffice must work also if using manual-install 23 | if [ "$ONLYOFFICE_ENABLED" = yes ]; then 24 | echo "Activating OnlyOffice config..." 25 | php /var/www/html/occ onlyoffice:documentserver --check 26 | fi 27 | fi 28 | 29 | sleep inf 30 | -------------------------------------------------------------------------------- /Containers/nextcloud/supervisord.conf: -------------------------------------------------------------------------------- 1 | # From https://github.com/nextcloud/docker/blob/master/.examples/dockerfiles/full/fpm/supervisord.conf 2 | [supervisord] 3 | nodaemon=true 4 | logfile=/var/log/supervisord/supervisord.log 5 | pidfile=/var/run/supervisord/supervisord.pid 6 | childlogdir=/var/log/supervisord/ 7 | logfile_maxbytes=50MB ; maximum size of logfile before rotation 8 | logfile_backups=10 ; number of backed up logfiles 9 | loglevel=error 10 | user=root 11 | 12 | [program:php-fpm] 13 | stdout_logfile=/dev/stdout 14 | stdout_logfile_maxbytes=0 15 | stderr_logfile=/dev/stderr 16 | stderr_logfile_maxbytes=0 17 | command=php-fpm 18 | user=root 19 | 20 | [program:cron] 21 | stdout_logfile=/dev/stdout 22 | stdout_logfile_maxbytes=0 23 | stderr_logfile=/dev/stderr 24 | stderr_logfile_maxbytes=0 25 | command=/cron.sh 26 | user=www-data 27 | 28 | [program:run-exec-commands] 29 | stdout_logfile=/dev/stdout 30 | stdout_logfile_maxbytes=0 31 | stderr_logfile=/dev/stderr 32 | stderr_logfile_maxbytes=0 33 | command=/run-exec-commands.sh 34 | user=www-data 35 | 36 | # This is a hack but no better solution is there 37 | [program:is-nextcloud-online] 38 | stdout_logfile=/dev/stdout 39 | stdout_logfile_maxbytes=0 40 | stderr_logfile=/dev/stderr 41 | stderr_logfile_maxbytes=0 42 | # Restart the netcat command once a day to ensure that it stays reachable 43 | # See https://github.com/nextcloud/all-in-one/issues/6334 44 | command=timeout 86400 nc -lk 9001 45 | user=www-data 46 | -------------------------------------------------------------------------------- /Containers/nextcloud/upgrade.exclude: -------------------------------------------------------------------------------- 1 | /config/ 2 | /data/ 3 | /custom_apps/ 4 | /themes/ 5 | /version.php 6 | -------------------------------------------------------------------------------- /Containers/notify-push/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM alpine:3.21.3 3 | 4 | COPY --chmod=775 start.sh /start.sh 5 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 6 | 7 | RUN set -ex; \ 8 | apk upgrade --no-cache -a; \ 9 | apk add --no-cache \ 10 | ca-certificates \ 11 | netcat-openbsd \ 12 | tzdata \ 13 | bash \ 14 | openssl; \ 15 | # Give root a random password 16 | echo "root:$(openssl rand -base64 12)" | chpasswd; \ 17 | apk del --no-cache \ 18 | openssl; 19 | 20 | USER 33 21 | ENTRYPOINT ["/start.sh"] 22 | 23 | HEALTHCHECK CMD /healthcheck.sh 24 | LABEL com.centurylinklabs.watchtower.enable="false" \ 25 | org.label-schema.vendor="Nextcloud" 26 | -------------------------------------------------------------------------------- /Containers/notify-push/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if ! nc -z "$NEXTCLOUD_HOST" 9001; then 4 | exit 0 5 | fi 6 | 7 | nc -z 127.0.0.1 7867 || exit 1 8 | -------------------------------------------------------------------------------- /Containers/onlyoffice/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # From https://github.com/ONLYOFFICE/Docker-DocumentServer/blob/master/Dockerfile 3 | FROM onlyoffice/documentserver:8.3.3.1 4 | 5 | # USER root is probably used 6 | 7 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 8 | 9 | HEALTHCHECK --start-period=60s --retries=9 CMD /healthcheck.sh 10 | LABEL com.centurylinklabs.watchtower.enable="false" \ 11 | org.label-schema.vendor="Nextcloud" 12 | -------------------------------------------------------------------------------- /Containers/onlyoffice/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z 127.0.0.1 80 || exit 1 4 | -------------------------------------------------------------------------------- /Containers/postgresql/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # From https://github.com/docker-library/postgres/blob/master/17/alpine3.21/Dockerfile 3 | FROM postgres:17.5-alpine 4 | 5 | COPY --chmod=775 start.sh /start.sh 6 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 7 | COPY --chmod=775 init-user-db.sh /docker-entrypoint-initdb.d/init-user-db.sh 8 | 9 | RUN set -ex; \ 10 | apk upgrade --no-cache -a; \ 11 | apk add --no-cache \ 12 | bash \ 13 | openssl \ 14 | shadow \ 15 | grep; \ 16 | \ 17 | # We need to use the same gid and uid as on old installations 18 | deluser postgres; \ 19 | groupmod -g 9999 ping; \ 20 | addgroup -g 999 -S postgres; \ 21 | adduser -u 999 -S -D -G postgres -H -h /var/lib/postgresql -s /bin/sh postgres; \ 22 | apk del --no-cache shadow; \ 23 | \ 24 | # Fix default permissions 25 | chown -R postgres:postgres /var/lib/postgresql; \ 26 | chown -R postgres:postgres /var/run/postgresql; \ 27 | chmod -R 777 /var/run/postgresql; \ 28 | chown -R postgres:postgres "$PGDATA"; \ 29 | \ 30 | mkdir /mnt/data; \ 31 | chown postgres:postgres /mnt/data; \ 32 | \ 33 | # Give root a random password 34 | echo "root:$(openssl rand -base64 12)" | chpasswd; \ 35 | apk --no-cache del openssl; \ 36 | \ 37 | # Get rid of unused binaries 38 | rm -f /usr/local/bin/gosu /usr/local/bin/su-exec; 39 | 40 | VOLUME /mnt/data 41 | 42 | USER 999 43 | ENTRYPOINT ["/start.sh"] 44 | 45 | HEALTHCHECK CMD /healthcheck.sh 46 | LABEL com.centurylinklabs.watchtower.enable="false" \ 47 | org.label-schema.vendor="Nextcloud" 48 | -------------------------------------------------------------------------------- /Containers/postgresql/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | test -f "/mnt/data/backup-is-running" && exit 0 4 | 5 | psql -d "postgresql://oc_$POSTGRES_USER:$POSTGRES_PASSWORD@127.0.0.1:11000/$POSTGRES_DB" -c "select now()" && exit 0 6 | 7 | psql -d "postgresql://oc_$POSTGRES_USER:$POSTGRES_PASSWORD@127.0.0.1:5432/$POSTGRES_DB" -c "select now()" || exit 1 8 | -------------------------------------------------------------------------------- /Containers/postgresql/init-user-db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | touch "$DUMP_DIR/initialization.failed" 5 | 6 | psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL 7 | CREATE USER "oc_$POSTGRES_USER" WITH PASSWORD '$POSTGRES_PASSWORD' CREATEDB; 8 | ALTER DATABASE "$POSTGRES_DB" OWNER TO "oc_$POSTGRES_USER"; 9 | GRANT ALL PRIVILEGES ON DATABASE "$POSTGRES_DB" TO "oc_$POSTGRES_USER"; 10 | GRANT ALL PRIVILEGES ON SCHEMA public TO "oc_$POSTGRES_USER"; 11 | EOSQL 12 | 13 | rm "$DUMP_DIR/initialization.failed" 14 | 15 | set +ex 16 | -------------------------------------------------------------------------------- /Containers/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # From https://github.com/docker-library/redis/blob/master/7.2/alpine/Dockerfile 3 | FROM redis:7.2.9-alpine 4 | 5 | COPY --chmod=775 start.sh /start.sh 6 | 7 | RUN set -ex; \ 8 | apk upgrade --no-cache -a; \ 9 | apk add --no-cache openssl bash; \ 10 | \ 11 | # Give root a random password 12 | echo "root:$(openssl rand -base64 12)" | chpasswd; \ 13 | \ 14 | # Get rid of unused binaries 15 | rm -f /usr/local/bin/gosu; 16 | 17 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 18 | 19 | USER 999 20 | ENTRYPOINT ["/start.sh"] 21 | 22 | HEALTHCHECK CMD /healthcheck.sh 23 | LABEL com.centurylinklabs.watchtower.enable="false" \ 24 | org.label-schema.vendor="Nextcloud" 25 | -------------------------------------------------------------------------------- /Containers/redis/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | redis-cli -a "$REDIS_HOST_PASSWORD" PING || exit 1 4 | -------------------------------------------------------------------------------- /Containers/redis/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Show wiki if vm.overcommit is disabled 4 | if [ "$(sysctl -n vm.overcommit_memory)" != "1" ]; then 5 | echo "Memory overcommit is disabled but necessary for safe operation" 6 | echo "See https://github.com/nextcloud/all-in-one/discussions/1731 how to enable overcommit" 7 | fi 8 | 9 | # Run redis with a password if provided 10 | echo "Redis has started" 11 | if [ -n "$REDIS_HOST_PASSWORD" ]; then 12 | exec redis-server --requirepass "$REDIS_HOST_PASSWORD" --loglevel warning 13 | else 14 | exec redis-server --loglevel warning 15 | fi 16 | 17 | exec "$@" 18 | -------------------------------------------------------------------------------- /Containers/talk-recording/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM python:3.13.3-alpine3.21 3 | 4 | COPY --chmod=775 start.sh /start.sh 5 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 6 | 7 | ENV RECORDING_VERSION=v0.1 8 | ENV ALLOW_ALL=false 9 | ENV HPB_PROTOCOL=https 10 | ENV NC_PROTOCOL=https 11 | ENV SKIP_VERIFY=false 12 | ENV HPB_PATH=/standalone-signaling/ 13 | 14 | RUN set -ex; \ 15 | apk upgrade --no-cache -a; \ 16 | apk add --no-cache \ 17 | ca-certificates \ 18 | tzdata \ 19 | bash \ 20 | xvfb \ 21 | ffmpeg \ 22 | firefox \ 23 | bind-tools \ 24 | netcat-openbsd \ 25 | git \ 26 | wget \ 27 | shadow \ 28 | pulseaudio \ 29 | openssl \ 30 | build-base \ 31 | linux-headers \ 32 | geckodriver; \ 33 | useradd -d /tmp --system recording -u 122; \ 34 | # Give root a random password 35 | echo "root:$(openssl rand -base64 12)" | chpasswd; \ 36 | git clone --recursive https://github.com/nextcloud/nextcloud-talk-recording --depth=1 --single-branch --branch "$RECORDING_VERSION" /src; \ 37 | python3 -m pip install --no-cache-dir /src; \ 38 | rm -rf /src; \ 39 | touch /etc/recording.conf; \ 40 | chown recording:recording -R \ 41 | /tmp /etc/recording.conf; \ 42 | mkdir -p /conf; \ 43 | chmod 777 /conf; \ 44 | chmod 777 /tmp; \ 45 | apk del --no-cache \ 46 | git \ 47 | wget \ 48 | shadow \ 49 | openssl \ 50 | build-base \ 51 | linux-headers; 52 | 53 | VOLUME /tmp 54 | WORKDIR /tmp 55 | USER 122 56 | ENTRYPOINT ["/start.sh"] 57 | CMD ["python", "-m", "nextcloud.talk.recording", "--config", "/conf/recording.conf"] 58 | 59 | HEALTHCHECK CMD /healthcheck.sh 60 | LABEL com.centurylinklabs.watchtower.enable="false" \ 61 | org.label-schema.vendor="Nextcloud" 62 | -------------------------------------------------------------------------------- /Containers/talk-recording/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z 127.0.0.1 1234 || exit 1 4 | -------------------------------------------------------------------------------- /Containers/talk-recording/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Variables 4 | if [ -z "$NC_DOMAIN" ]; then 5 | echo "You need to provide the NC_DOMAIN." 6 | exit 1 7 | elif [ -z "$RECORDING_SECRET" ]; then 8 | echo "You need to provide the RECORDING_SECRET." 9 | exit 1 10 | elif [ -z "$INTERNAL_SECRET" ]; then 11 | echo "You need to provide the INTERNAL_SECRET." 12 | exit 1 13 | fi 14 | 15 | if [ -z "$HPB_DOMAIN" ]; then 16 | export HPB_DOMAIN="$NC_DOMAIN" 17 | fi 18 | 19 | # Delete all contents on startup to start fresh 20 | rm -fr /tmp/{*,.*} 21 | 22 | cat << RECORDING_CONF > "/conf/recording.conf" 23 | [logs] 24 | # 30 means Warning 25 | level = 30 26 | 27 | [http] 28 | listen = 0.0.0.0:1234 29 | 30 | [backend] 31 | allowall = ${ALLOW_ALL} 32 | # The secret below is still needed if allowall is set to true, also it doesn't hurt to be here 33 | secret = ${RECORDING_SECRET} 34 | backends = backend-1 35 | skipverify = ${SKIP_VERIFY} 36 | maxmessagesize = 1024 37 | videowidth = 1920 38 | videoheight = 1080 39 | directory = /tmp 40 | 41 | [backend-1] 42 | url = ${NC_PROTOCOL}://${NC_DOMAIN} 43 | secret = ${RECORDING_SECRET} 44 | skipverify = ${SKIP_VERIFY} 45 | 46 | [signaling] 47 | signalings = signaling-1 48 | 49 | [signaling-1] 50 | url = ${HPB_PROTOCOL}://${HPB_DOMAIN}${HPB_PATH} 51 | internalsecret = ${INTERNAL_SECRET} 52 | 53 | [ffmpeg] 54 | # common = ffmpeg -loglevel level+warning -n 55 | # outputaudio = -c:a libopus 56 | # outputvideo = -c:v libvpx -deadline:v realtime -crf 10 -b:v 1M 57 | extensionaudio = .ogg 58 | extensionvideo = .webm 59 | 60 | [recording] 61 | browser = firefox 62 | RECORDING_CONF 63 | 64 | exec "$@" 65 | -------------------------------------------------------------------------------- /Containers/talk/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z 127.0.0.1 8081 || exit 1 4 | nc -z 127.0.0.1 8188 || exit 1 5 | nc -z 127.0.0.1 4222 || exit 1 6 | nc -z 127.0.0.1 "$TALK_PORT" || exit 1 7 | eturnalctl status || exit 1 8 | -------------------------------------------------------------------------------- /Containers/talk/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | logfile=/var/log/supervisord/supervisord.log 4 | pidfile=/var/run/supervisord/supervisord.pid 5 | childlogdir=/var/log/supervisord/ 6 | logfile_maxbytes=50MB 7 | logfile_backups=10 8 | loglevel=error 9 | 10 | [program:eturnal] 11 | stdout_logfile=/dev/stdout 12 | stdout_logfile_maxbytes=0 13 | stderr_logfile=/dev/stderr 14 | stderr_logfile_maxbytes=0 15 | command=eturnalctl foreground 16 | 17 | [program:nats-server] 18 | stdout_logfile=/dev/stdout 19 | stdout_logfile_maxbytes=0 20 | stderr_logfile=/dev/stderr 21 | stderr_logfile_maxbytes=0 22 | command=nats-server -c /etc/nats.conf 23 | 24 | [program:janus] 25 | stdout_logfile=/dev/stdout 26 | stdout_logfile_maxbytes=0 27 | stderr_logfile=/dev/stderr 28 | stderr_logfile_maxbytes=0 29 | # debug-level 3 means warning 30 | command=janus --config=/usr/local/etc/janus/janus.jcfg --disable-colors --log-stdout --full-trickle --debug-level 3 31 | 32 | [program:signaling] 33 | stdout_logfile=/dev/stdout 34 | stdout_logfile_maxbytes=0 35 | stderr_logfile=/dev/stderr 36 | stderr_logfile_maxbytes=0 37 | command=nextcloud-spreed-signaling -config /conf/signaling.conf 38 | -------------------------------------------------------------------------------- /Containers/watchtower/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | FROM golang:1.24.3-alpine3.21 AS go 3 | 4 | RUN set -ex; \ 5 | apk upgrade --no-cache -a; \ 6 | apk add --no-cache \ 7 | build-base; \ 8 | go install github.com/containrrr/watchtower@76f9cea516593fabb8ca91ff13de55caa6aa0a8b; 9 | 10 | FROM alpine:3.21.3 11 | 12 | RUN set -ex; \ 13 | apk upgrade --no-cache -a; \ 14 | apk add --no-cache bash ca-certificates tzdata 15 | 16 | COPY --from=go /go/bin/watchtower /watchtower 17 | 18 | COPY --chmod=775 start.sh /start.sh 19 | 20 | # hadolint ignore=DL3002 21 | USER root 22 | 23 | ENTRYPOINT ["/start.sh"] 24 | LABEL com.centurylinklabs.watchtower.enable="false" \ 25 | org.label-schema.vendor="Nextcloud" 26 | -------------------------------------------------------------------------------- /Containers/watchtower/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if socket is available and readable 4 | if ! [ -a "/var/run/docker.sock" ]; then 5 | echo "Docker socket is not available. Cannot continue." 6 | exit 1 7 | elif ! test -r /var/run/docker.sock; then 8 | echo "Docker socket is not readable by the root user. Cannot continue." 9 | exit 1 10 | fi 11 | 12 | if [ -n "$CONTAINER_TO_UPDATE" ]; then 13 | exec /watchtower --cleanup --debug --run-once "$CONTAINER_TO_UPDATE" 14 | else 15 | echo "'CONTAINER_TO_UPDATE' is not set. Cannot update anything." 16 | exit 1 17 | fi 18 | 19 | exec "$@" 20 | -------------------------------------------------------------------------------- /Containers/whiteboard/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:latest 2 | # Probably from this file: https://github.com/nextcloud/whiteboard/blob/main/Dockerfile 3 | FROM ghcr.io/nextcloud-releases/whiteboard:v1.0.5 4 | 5 | USER root 6 | RUN set -ex; \ 7 | apk upgrade --no-cache -a; \ 8 | apk add --no-cache bash; \ 9 | chmod 777 -R /tmp 10 | USER 65534 11 | 12 | COPY --chmod=775 start.sh /start.sh 13 | COPY --chmod=775 healthcheck.sh /healthcheck.sh 14 | 15 | HEALTHCHECK CMD /healthcheck.sh 16 | 17 | WORKDIR /tmp 18 | 19 | ENTRYPOINT ["/start.sh"] 20 | 21 | LABEL com.centurylinklabs.watchtower.enable="false" \ 22 | org.label-schema.vendor="Nextcloud" 23 | -------------------------------------------------------------------------------- /Containers/whiteboard/healthcheck.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | nc -z "$REDIS_HOST" 6379 || exit 0 4 | nc -z 127.0.0.1 3002 || exit 1 5 | -------------------------------------------------------------------------------- /Containers/whiteboard/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Only start container if nextcloud is accessible 4 | while ! nc -z "$REDIS_HOST" 6379; do 5 | echo "Waiting for redis to start..." 6 | sleep 5 7 | done 8 | 9 | # Set a default for redis db index 10 | if [ -z "$REDIS_DB_INDEX" ]; then 11 | REDIS_DB_INDEX=0 12 | fi 13 | 14 | export REDIS_URL="redis://$REDIS_USER:$REDIS_HOST_PASSWORD@$REDIS_HOST/$REDIS_DB_INDEX" 15 | 16 | # Run it 17 | exec npm --prefix /app run server:start 18 | -------------------------------------------------------------------------------- /app/.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | end_of_line = lf 8 | indent_size = 4 9 | indent_style = tab 10 | insert_final_newline = true 11 | trim_trailing_whitespace = true 12 | 13 | [*.feature] 14 | indent_size = 2 15 | indent_style = space 16 | 17 | [*.yml] 18 | indent_size = 2 19 | indent_style = space 20 | -------------------------------------------------------------------------------- /app/appinfo/info.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | nextcloud-aio 5 | Nextcloud All-in-One 6 | Provides a login link for admins. 7 | Add a link to the admin settings that gives access to the Nextcloud All-in-One admin interface 8 | 0.8.0 9 | agpl 10 | Azul 11 | AllInOne 12 | 13 | monitoring 14 | https://github.com/nextcloud/all-in-one/issues 15 | 16 | 17 | 18 | 19 | 20 | OCA\AllInOne\Settings\Admin 21 | 22 | 23 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /app/composer/autoload.php: -------------------------------------------------------------------------------- 1 | $vendorDir . '/composer/InstalledVersions.php', 10 | 'OCA\\AllInOne\\Settings\\Admin' => $baseDir . '/../lib/Settings/Admin.php', 11 | ); 12 | -------------------------------------------------------------------------------- /app/composer/composer/autoload_namespaces.php: -------------------------------------------------------------------------------- 1 | array($baseDir . '/../lib'), 10 | ); 11 | -------------------------------------------------------------------------------- /app/composer/composer/autoload_real.php: -------------------------------------------------------------------------------- 1 | = 50600 && !defined('HHVM_VERSION') && (!function_exists('zend_loader_file_encoded') || !zend_loader_file_encoded()); 30 | if ($useStaticLoader) { 31 | require __DIR__ . '/autoload_static.php'; 32 | 33 | call_user_func(\Composer\Autoload\ComposerStaticInitAllInOne::getInitializer($loader)); 34 | } else { 35 | $classMap = require __DIR__ . '/autoload_classmap.php'; 36 | if ($classMap) { 37 | $loader->addClassMap($classMap); 38 | } 39 | } 40 | 41 | $loader->setClassMapAuthoritative(true); 42 | $loader->register(true); 43 | 44 | return $loader; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /app/composer/composer/autoload_static.php: -------------------------------------------------------------------------------- 1 | 11 | array ( 12 | 'OCA\\AllInOne\\' => 13, 13 | ), 14 | ); 15 | 16 | public static $prefixDirsPsr4 = array ( 17 | 'OCA\\AllInOne\\' => 18 | array ( 19 | 0 => __DIR__ . '/..' . '/../lib', 20 | ), 21 | ); 22 | 23 | public static $classMap = array ( 24 | 'Composer\\InstalledVersions' => __DIR__ . '/..' . '/composer/InstalledVersions.php', 25 | 'OCA\\AllInOne\\Settings\\Admin' => __DIR__ . '/..' . '/../lib/Settings/Admin.php', 26 | ); 27 | 28 | public static function getInitializer(ClassLoader $loader) 29 | { 30 | return \Closure::bind(function () use ($loader) { 31 | $loader->prefixLengthsPsr4 = ComposerStaticInitAllInOne::$prefixLengthsPsr4; 32 | $loader->prefixDirsPsr4 = ComposerStaticInitAllInOne::$prefixDirsPsr4; 33 | $loader->classMap = ComposerStaticInitAllInOne::$classMap; 34 | 35 | }, null, ClassLoader::class); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /app/composer/composer/installed.json: -------------------------------------------------------------------------------- 1 | { 2 | "packages": [], 3 | "dev": true, 4 | "dev-package-names": [] 5 | } 6 | -------------------------------------------------------------------------------- /app/composer/composer/installed.php: -------------------------------------------------------------------------------- 1 | array( 3 | 'pretty_version' => 'dev-master', 4 | 'version' => 'dev-master', 5 | 'type' => 'library', 6 | 'install_path' => __DIR__ . '/../', 7 | 'aliases' => array(), 8 | 'reference' => '1b16a136ebd8f63e09df061d383f34170e2cef35', 9 | 'name' => '__root__', 10 | 'dev' => true, 11 | ), 12 | 'versions' => array( 13 | '__root__' => array( 14 | 'pretty_version' => 'dev-master', 15 | 'version' => 'dev-master', 16 | 'type' => 'library', 17 | 'install_path' => __DIR__ . '/../', 18 | 'aliases' => array(), 19 | 'reference' => '1b16a136ebd8f63e09df061d383f34170e2cef35', 20 | 'dev_requirement' => false, 21 | ), 22 | ), 23 | ); 24 | -------------------------------------------------------------------------------- /app/readme.md: -------------------------------------------------------------------------------- 1 | ## How to develop the app? 2 | 3 | Please note that in order to check if an app is already downloaded 4 | Nextcloud will look for a folder with the same name as the app. 5 | 6 | Therefore you need to add the app to one of the app directories 7 | naming the directory `nextcloud-aio`. 8 | -------------------------------------------------------------------------------- /app/templates/admin.php: -------------------------------------------------------------------------------- 1 | 5 | * 6 | * @author Azul 7 | * 8 | * This file is licensed under the Affero General Public License version 3 or 9 | * later. See the COPYING file. 10 | */ 11 | /** @var array $_ */ ?> 12 | 17 | -------------------------------------------------------------------------------- /community-containers/borgbackup-viewer/readme.md: -------------------------------------------------------------------------------- 1 | ## Borgbackup Viewer 2 | This container allows to view the local borg repository in a web session. It also allows you to restore files and folders from the backup by using desktop programs in a web browser. 3 | 4 | ### Notes 5 | - After adding and starting the container, you need to visit `https://ip.address.of.this.server:5801` in order to log in with the user `nextcloud` and the password that you can see next to the container in the AIO interface. (The web page uses a self-signed certificate, so you need to accept the warning). 6 | - Then, you should see a terminal. There type in `borg mount /mnt/borgbackup/borg /tmp/borg` to mount the backup archive at `/tmp/borg` inside the container. Afterwards type in `nautilus /tmp/borg` which will show a file explorer and allows you to see all the files. You can then copy files and folders back to their initial mountpoints inside `/nextcloud_aio_volumes/`, `/host_mounts/` and `/docker_volumes/`. ⚠️ Be very carefully while doing that as can break your instance! 7 | - After you are done with the operation, click on the terminal in the background and press `[CTRL]+[c]` multiple times to close any open application. Then run `umount /tmp/borg` to unmount the mountpoint correctly. 8 | - You can also delete specific archives by running `borg list`, delete a specific archive e.g. via `borg delete --stats --progress "::20220223_174237-nextcloud-aio"` and compact the archives via `borg compact`. After doing so, make sure to update the backup archives list in the AIO interface! You can do so by clicking on the `Check backup integrity` button or `Create backup` button. 9 | - ⚠️ After you are done doing your operations, remove the container for better security again from the stack: https://github.com/nextcloud/all-in-one/tree/main/community-containers#how-to-remove-containers-from-aios-stack 10 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 11 | 12 | ### Repository 13 | https://github.com/szaimen/aio-borgbackup-viewer 14 | 15 | ### Maintainer 16 | https://github.com/szaimen 17 | 18 | -------------------------------------------------------------------------------- /community-containers/caddy/caddy.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-caddy", 5 | "display_name": "Caddy with geoblocking", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/caddy", 7 | "image": "ghcr.io/szaimen/aio-caddy", 8 | "image_tag": "v2", 9 | "internal_port": "443", 10 | "restart": "unless-stopped", 11 | "ports": [ 12 | { 13 | "ip_binding": "", 14 | "port_number": "443", 15 | "protocol": "tcp" 16 | }, 17 | { 18 | "ip_binding": "", 19 | "port_number": "443", 20 | "protocol": "udp" 21 | } 22 | ], 23 | "environment": [ 24 | "TZ=%TIMEZONE%", 25 | "NC_DOMAIN=%NC_DOMAIN%", 26 | "APACHE_PORT=%APACHE_PORT%" 27 | ], 28 | "volumes": [ 29 | { 30 | "source": "nextcloud_aio_caddy", 31 | "destination": "/data", 32 | "writeable": true 33 | }, 34 | { 35 | "source": "%NEXTCLOUD_DATADIR%", 36 | "destination": "/nextcloud", 37 | "writeable": false 38 | } 39 | ], 40 | "aio_variables": [ 41 | "apache_ip_binding=@INTERNAL", 42 | "apache_port=11000" 43 | ], 44 | "nextcloud_exec_commands": [ 45 | "mkdir '/mnt/ncdata/admin/files/nextcloud-aio-caddy'", 46 | "touch '/mnt/ncdata/admin/files/nextcloud-aio-caddy/allowed-countries.txt'", 47 | "echo 'Scanning nextcloud-aio-caddy folder for admin user...'", 48 | "php /var/www/html/occ files:scan --path='/admin/files/nextcloud-aio-caddy'" 49 | ] 50 | } 51 | ] 52 | } 53 | -------------------------------------------------------------------------------- /community-containers/calcardbackup/calcardbackup.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-calcardbackup", 5 | "display_name": "Calendar and contacts backup", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/calcardbackup", 7 | "image": "waja/calcardbackup", 8 | "image_tag": "latest", 9 | "restart": "unless-stopped", 10 | "environment": [ 11 | "CRON_TIME=0 0 * * *", 12 | "INIT_BACKUP=yes", 13 | "BACKUP_DIR=/backup", 14 | "NC_DIR=/nextcloud", 15 | "NC_HOST=%NC_DOMAIN%", 16 | "DB_HOST=nextcloud-aio-database", 17 | "DB_PORT=5432", 18 | "CALCARD_OPTS=-ltm 5" 19 | ], 20 | "volumes": [ 21 | { 22 | "source": "nextcloud_aio_calcardbackup", 23 | "destination": "/backup", 24 | "writeable": true 25 | }, 26 | { 27 | "source": "nextcloud_aio_nextcloud", 28 | "destination": "/nextcloud", 29 | "writeable": false 30 | } 31 | ], 32 | "backup_volumes": [ 33 | "nextcloud_aio_calcardbackup" 34 | ] 35 | } 36 | ] 37 | } -------------------------------------------------------------------------------- /community-containers/calcardbackup/readme.md: -------------------------------------------------------------------------------- 1 | ## calcardbackup 2 | This container packages calcardbackup which is a tool that exports calendars and addressbooks from Nextcloud to .ics and .vcf files and saves them to a compressed file. 3 | 4 | ### Notes 5 | - Backups will be created at 00:00 CEST every day. Make sure that this does not conflict with the configured daily backups inside AIO. 6 | - All the exports will be included in AIOs backup solution 7 | - You can find the exports in the nextcloud_aio_calcardbackup volume 8 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 9 | 10 | ### Repository 11 | https://github.com/waja/docker-calcardbackup 12 | 13 | ### Maintainer 14 | https://github.com/pailloM 15 | 16 | -------------------------------------------------------------------------------- /community-containers/dlna/dlna.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-dlna", 5 | "display_name": "DLNA", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/dlna", 7 | "image": "thanek/nextcloud-dlna", 8 | "image_tag": "latest", 9 | "internal_port": "host", 10 | "restart": "unless-stopped", 11 | "depends_on": [ 12 | "nextcloud-aio-database" 13 | ], 14 | "environment": [ 15 | "NC_DOMAIN=%NC_DOMAIN%", 16 | "NC_PORT=443", 17 | "NEXTCLOUD_DLNA_SERVER_PORT=9999", 18 | "NEXTCLOUD_DLNA_FRIENDLY_NAME=nextcloud-aio", 19 | "NEXTCLOUD_DATA_DIR=/data", 20 | "NEXTCLOUD_DB_TYPE=postgres", 21 | "NEXTCLOUD_DB_HOST=%AIO_DATABASE_HOST%", 22 | "NEXTCLOUD_DB_PORT=5432", 23 | "NEXTCLOUD_DB_NAME=nextcloud_database", 24 | "NEXTCLOUD_DB_USER=oc_nextcloud", 25 | "NEXTCLOUD_DB_PASS=%DATABASE_PASSWORD%" 26 | ], 27 | "secrets": [ 28 | "DATABASE_PASSWORD" 29 | ], 30 | "volumes": [ 31 | { 32 | "source": "%NEXTCLOUD_DATADIR%", 33 | "destination": "/data", 34 | "writeable": false 35 | } 36 | ] 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /community-containers/dlna/readme.md: -------------------------------------------------------------------------------- 1 | ## DLNA server 2 | This container bundles DLNA server for your Nextcloud files to be accessible by the clients in your local network. Simply run the container and look for a new media server `nextcloud-aio` in your local network. 3 | 4 | ### Notes 5 | - This container will work only if the Nextcloud installation is in your home network, it is not suitable for installations on remote servers. 6 | - If you have a firewall like ufw configured, you might need to open at least port 9999 TCP and 1900 UDP first in order to make it work. 7 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 8 | 9 | ### Repository 10 | https://github.com/thanek/nextcloud-dlna 11 | 12 | ### Maintainer 13 | https://github.com/thanek 14 | 15 | -------------------------------------------------------------------------------- /community-containers/fail2ban/fail2ban.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-fail2ban", 5 | "display_name": "Fail2ban", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/fail2ban", 7 | "image": "ghcr.io/szaimen/aio-fail2ban", 8 | "image_tag": "v1", 9 | "internal_port": "host", 10 | "restart": "unless-stopped", 11 | "cap_add": [ 12 | "NET_ADMIN", 13 | "NET_RAW" 14 | ], 15 | "environment": [ 16 | "TZ=%TIMEZONE%" 17 | ], 18 | "volumes": [ 19 | { 20 | "source": "nextcloud_aio_nextcloud", 21 | "destination": "/nextcloud", 22 | "writeable": false 23 | }, 24 | { 25 | "source": "nextcloud_aio_vaultwarden_logs", 26 | "destination": "/vaultwarden", 27 | "writeable": false 28 | }, 29 | { 30 | "source": "nextcloud_aio_jellyfin", 31 | "destination": "/jellyfin", 32 | "writeable": false 33 | }, 34 | { 35 | "source": "nextcloud_aio_jellyseerr", 36 | "destination": "/jellyseerr", 37 | "writeable": false 38 | } 39 | ] 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /community-containers/fail2ban/readme.md: -------------------------------------------------------------------------------- 1 | ## Fail2ban 2 | This container bundles fail2ban and auto-configures it for you in order to block ip-addresses automatically. It also covers https://github.com/nextcloud/all-in-one/tree/main/community-containers/vaultwarden, https://github.com/nextcloud/all-in-one/tree/main/community-containers/jellyfin, and https://github.com/nextcloud/all-in-one/tree/main/community-containers/jellyseerr, if installed. 3 | 4 | ### Notes 5 | - If you get an error like `"ip6tables v1.8.9 (legacy): can't initialize ip6tables table filter': Table does not exist (do you need to insmod?)"`, you need to enable ip6tables on your host via `sudo modprobe ip6table_filter`. 6 | - If you get an error like `stderr: 'iptables: No chain/target/match by that name.'` and `stderr: 'ip6tables: No chain/target/match by that name.'`, you need to follow https://github.com/szaimen/aio-fail2ban/issues/9#issuecomment-2026898790 in order to resolve this. 7 | - You can unban ip addresses like so for example: `docker exec -it nextcloud-aio-fail2ban fail2ban-client set nextcloud unbanip 203.113.167.162`. 8 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 9 | 10 | ### Repository 11 | https://github.com/szaimen/aio-fail2ban 12 | 13 | ### Maintainer 14 | https://github.com/szaimen 15 | -------------------------------------------------------------------------------- /community-containers/helloworld/helloworld.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-helloworld", 5 | "display_name": "Hello world", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/helloworld", 7 | "image": "ghcr.io/docjyj/aio-helloworld", 8 | "image_tag": "%AIO_CHANNEL%", 9 | "restart": "unless-stopped" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /community-containers/helloworld/readme.md: -------------------------------------------------------------------------------- 1 | ## Hello World 2 | This container is a template for creating a community container. 3 | 4 | ### Repository 5 | https://github.com/docjyj/aio-helloworld 6 | 7 | ### Maintainer 8 | https://github.com/docjyj 9 | -------------------------------------------------------------------------------- /community-containers/jellyfin/jellyfin.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-jellyfin", 5 | "display_name": "Jellyfin", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/jellyfin", 7 | "image": "jellyfin/jellyfin", 8 | "image_tag": "latest", 9 | "internal_port": "host", 10 | "restart": "unless-stopped", 11 | "environment": [ 12 | "TZ=%TIMEZONE%" 13 | ], 14 | "volumes": [ 15 | { 16 | "source": "nextcloud_aio_jellyfin", 17 | "destination": "/config", 18 | "writeable": true 19 | }, 20 | { 21 | "source": "%NEXTCLOUD_DATADIR%", 22 | "destination": "/media", 23 | "writeable": false 24 | }, 25 | { 26 | "source": "%NEXTCLOUD_MOUNT%", 27 | "destination": "%NEXTCLOUD_MOUNT%", 28 | "writeable": true 29 | } 30 | ], 31 | "devices": [ 32 | "/dev/dri" 33 | ], 34 | "enable_nvidia_gpu": true, 35 | "backup_volumes": [ 36 | "nextcloud_aio_jellyfin" 37 | ] 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /community-containers/jellyfin/readme.md: -------------------------------------------------------------------------------- 1 | ## Jellyfin 2 | This container bundles Jellyfin and auto-configures it for you. 3 | 4 | ### Notes 5 | - This container is incompatible with the [Plex](https://github.com/nextcloud/all-in-one/tree/main/community-containers/plex) community container. So make sure that you do not enable both at the same time! 6 | - After adding and starting the container, you can directly visit http://ip.address.of.server:8096/ and access your new Jellyfin instance! 7 | - This container should usually only be run in home networks as it exposes unencrypted services like DLNA by default which can be disabld via the web interface though. 8 | - In order to access your Jellyfin outside the local network, you have to set up your own reverse proxy. You can set up a reverse proxy following [these instructions](https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md) and [Jellyfin's networking documentation](https://jellyfin.org/docs/general/networking/#running-jellyfin-behind-a-reverse-proxy), OR use the [Caddy](https://github.com/nextcloud/all-in-one/tree/main/community-containers/caddy) community container that will automatically configure `media.$NC_DOMAIN` to redirect to your Jellyfin. 9 | - ⚠️ After the initial start, Jellyfin shows a configuration page to set up the root password, etc. **Be careful to initialize your Jellyfin before adding the DNS record.** 10 | - If you have a firewall like ufw configured, you might need to open all Jellyfin ports in there first in order to make it work. Especially port 8096 is important! 11 | - If you want to secure the installation with fail2ban, you might want to check out https://github.com/nextcloud/all-in-one/tree/main/community-containers/fail2ban 12 | - The data of Jellyfin will be automatically included in AIO's backup solution! 13 | - See [here](https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers) how to add it to the AIO stack. 14 | 15 | 16 | ### Repository 17 | https://github.com/jellyfin/jellyfin 18 | 19 | ### Maintainer 20 | https://github.com/airopi 21 | -------------------------------------------------------------------------------- /community-containers/jellyseerr/jellyseerr.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-jellyseerr", 5 | "display_name": "Jellyseerr", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/jellyseerr", 7 | "image": "fallenbagel/jellyseerr", 8 | "image_tag": "latest", 9 | "internal_port": "5055", 10 | "restart": "unless-stopped", 11 | "init": false, 12 | "ports": [ 13 | { 14 | "ip_binding": "%APACHE_IP_BINDING%", 15 | "port_number": "5055", 16 | "protocol": "tcp" 17 | } 18 | ], 19 | "environment": [ 20 | "PORT=5055", 21 | "TZ=%TIMEZONE%" 22 | ], 23 | "volumes": [ 24 | { 25 | "source": "nextcloud_aio_jellyseerr", 26 | "destination": "/app/config", 27 | "writeable": true 28 | } 29 | ], 30 | "backup_volumes": [ 31 | "nextcloud_aio_jellyseerr" 32 | ] 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /community-containers/jellyseerr/readme.md: -------------------------------------------------------------------------------- 1 | ## Jellyseerr 2 | This container bundles Jellyseerr and auto-configures it for you. 3 | 4 | ### Notes 5 | - This container is only intended to be used inside home networks as it uses http for its management page by default. 6 | - After adding and starting the container, you can directly visit `http://ip.address.of.server:5055` and access your new Jellyseerr instance, which can be used to manage Plex, Jellyfin, and Emby. 7 | - In order to access your Jellyseerr outside the local network, you have to set up your own reverse proxy. You can set up a reverse proxy following [these instructions](https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md) and [Jellyseerr's reverse proxy documentation.](https://docs.jellyseerr.dev/extending-jellyseerr/reverse-proxy), OR use the Caddy community container that will automatically configure requests.$NC_DOMAIN to redirect to your Jellyseerr. Note that it is recommended to [enable CSRF protection in Jellyseerr](https://docs.jellyseerr.dev/using-jellyseerr/settings/general#enable-csrf-protection) for added security if you plan to use Jellyseerr outside the local network, but make sure to read up on it and understand the caveats first. 8 | - If you want to secure the installation with fail2ban, you might want to check out https://github.com/nextcloud/all-in-one/tree/main/community-containers/fail2ban. Note that [enabling the proxy support option in Jellyseerr](https://docs.jellyseerr.dev/using-jellyseerr/settings/general#enable-proxy-support) is required for this to work properly. 9 | - The config of Jellyseerr will be automatically included in AIO's backup solution! 10 | - See [here](https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers) how to add it to the AIO stack. 11 | 12 | ### Repository 13 | https://github.com/Fallenbagel/jellyseerr 14 | 15 | ### Maintainer 16 | https://github.com/Anvil5465 17 | -------------------------------------------------------------------------------- /community-containers/libretranslate/libretranslate.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-libretranslate", 5 | "display_name": "LibreTranslate", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/libretranslate", 7 | "image": "ghcr.io/szaimen/aio-libretranslate", 8 | "image_tag": "v1", 9 | "internal_port": "5000", 10 | "restart": "unless-stopped", 11 | "environment": [ 12 | "TZ=%TIMEZONE%" 13 | ], 14 | "volumes": [ 15 | { 16 | "source": "nextcloud_aio_libretranslate_db", 17 | "destination": "/app/db", 18 | "writeable": true 19 | }, 20 | { 21 | "source": "nextcloud_aio_libretranslate_models", 22 | "destination": "/home/libretranslate/.local", 23 | "writeable": true 24 | } 25 | ], 26 | "nextcloud_exec_commands": [ 27 | "php /var/www/html/occ app:install integration_libretranslate", 28 | "php /var/www/html/occ app:enable integration_libretranslate", 29 | "php /var/www/html/occ config:app:set integration_libretranslate host --value='http://nextcloud-aio-libretranslate'", 30 | "php /var/www/html/occ config:app:set integration_libretranslate port --value='5000'" 31 | ] 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /community-containers/libretranslate/readme.md: -------------------------------------------------------------------------------- 1 | ## LibreTranslate 2 | This container bundles LibreTranslate and auto-configures it for you. 3 | 4 | > [!WARNING] 5 | > The LibreTranslate container and app is deprecated! 6 | > Please use the [translate2 app](https://apps.nextcloud.com/apps/translate2) instead. 7 | > You can activate it by first enabling the Docker-Socket-Proxy in the AIO-interface and then heading over to `https://your-nc-domain.com/settings/apps/tools` and installing and enabling the `Local Machine Translation` app. 8 | 9 | ### Notes 10 | - After the initial startup is done, you might want to change the default language to translate from and to via: 11 | ```bash 12 | # Adjust the values `en` and `de` in commands below accordingly 13 | sudo docker exec --user www-data nextcloud-aio-nextcloud php occ config:app:set integration_libretranslate from_lang --value="en" 14 | sudo docker exec --user www-data nextcloud-aio-nextcloud php occ config:app:set integration_libretranslate to_lang --value="de" 15 | ``` 16 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 17 | 18 | ### Repository 19 | https://github.com/szaimen/aio-libretranslate 20 | 21 | ### Maintainer 22 | https://github.com/szaimen 23 | -------------------------------------------------------------------------------- /community-containers/lldap/lldap.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-lldap", 5 | "display_name": "Light LDAP implementation", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/lldap", 7 | "image": "lldap/lldap", 8 | "image_tag": "v0-alpine", 9 | "internal_port": "17170", 10 | "restart": "unless-stopped", 11 | "ports": [ 12 | { 13 | "ip_binding": "%APACHE_IP_BINDING%", 14 | "port_number": "17170", 15 | "protocol": "tcp" 16 | } 17 | ], 18 | "environment": [ 19 | "TZ=%TIMEZONE%", 20 | "UID=65534", 21 | "GID=65534", 22 | "LLDAP_JWT_SECRET=%LLDAP_JWT_SECRET%", 23 | "LLDAP_LDAP_USER_PASS=%LLDAP_LDAP_USER_PASS%", 24 | "LLDAP_LDAP_BASE_DN=%NC_BASE_DN%" 25 | ], 26 | "secrets": [ 27 | "LLDAP_JWT_SECRET", 28 | "LLDAP_LDAP_USER_PASS" 29 | ], 30 | "ui_secret": "LLDAP_JWT_SECRET", 31 | "volumes": [ 32 | { 33 | "source": "nextcloud_aio_lldap", 34 | "destination": "/data", 35 | "writeable": true 36 | } 37 | ], 38 | "backup_volumes": [ 39 | "nextcloud_aio_lldap" 40 | ], 41 | "nextcloud_exec_commands": [ 42 | "php /var/www/html/occ app:install user_ldap", 43 | "php /var/www/html/occ app:enable user_ldap" 44 | ] 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /community-containers/local-ai/readme.md: -------------------------------------------------------------------------------- 1 | ## Local AI 2 | This container bundles Local AI and auto-configures it for you. 3 | 4 | ### Notes 5 | - Make sure to have enough storage space available. This container alone needs ~7GB storage. Every model that you add to `models.yaml` will of course use additional space which adds up quite fast. 6 | - After the container was started the first time, you should see a new `nextcloud-aio-local-ai` folder when you open the files app with the default `admin` user. In there you should see a `models.yaml` config file. You can now add models in there. Please refer [here](https://github.com/mudler/LocalAI/blob/master/gallery/index.yaml) where you can get further urls that you can put in there. Afterwards restart all containers from the AIO interface and the models should automatically get downloaded by the local-ai container and activated. 7 | - Example for content of `models.yaml` (if you add all of them, it takes around 10GB additional space): 8 | ```yaml 9 | # Stable Diffusion in NCNN with c++, supported txt2img and img2img 10 | - url: github:mudler/LocalAI/blob/master/gallery/stablediffusion.yaml 11 | name: Stable_diffusion 12 | ``` 13 | - To make it work, you first need to browse `https://your-nc-domain.com/settings/admin/ai` and enable or disable specific features for your models in the openAI settings. Afterwards using the Nextcloud Assistant should work. 14 | - See [this guide](https://github.com/nextcloud/all-in-one/discussions/5430) for how to improve AI task pickup speed 15 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 16 | 17 | ### Repository 18 | https://github.com/szaimen/aio-local-ai 19 | 20 | ### Maintainer 21 | https://github.com/szaimen 22 | -------------------------------------------------------------------------------- /community-containers/makemkv/readme.md: -------------------------------------------------------------------------------- 1 | ## MakeMKV 2 | This container bundles MakeMKV and auto-configures it for you. 3 | 4 | ### Notes 5 | - This container should only be run in home networks 6 | - ⚠️ This container mounts all devices from the host inside the container in order to be able to access the external DVD/Blu-ray drives which is a security issue. However no better solution was found for the time being. 7 | - This container only works on Linux and not on Docker-Desktop. 8 | - This container requires the [`NEXTCLOUD_MOUNT` variable in AIO to be set](https://github.com/nextcloud/all-in-one?tab=readme-ov-file#how-to-allow-the-nextcloud-container-to-access-directories-on-the-host). Otherwise the output will not be saved correctly.. 9 | - After adding and starting the container, you need to visit `https://internal.ip.of.server:5802` in order to log in with the `makemkv` user and the password that you can see next to the container in the AIO interface. (The web page uses a self-signed certificate, so you need to accept the warning). 10 | - After the first login, you can adjust the `/output` directory in the MakeMKV settings to a subdirectory of the root of your chosen `NEXTCLOUD_MOUNT`. (by default `NEXTCLOUD_MOUNT` is mounted to `/output` inside the container. Thus all data is written to the root of it) 11 | - The configured `NEXTCLOUD_DATADIR` is getting mounted to `/storage` inside the container. 12 | - The config data of MakeMKV will be automatically included in AIOs backup solution! 13 | - ⚠️ After you are done doing your operations, remove the container for better security again from the stack: https://github.com/nextcloud/all-in-one/tree/main/community-containers#how-to-remove-containers-from-aios-stack 14 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 15 | 16 | ### Repository 17 | https://github.com/jlesage/docker-makemkv 18 | 19 | ### Maintainer 20 | https://github.com/szaimen 21 | -------------------------------------------------------------------------------- /community-containers/memories/memories.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-memories", 5 | "display_name": "Memories Transcoder", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/memories", 7 | "image": "radialapps/go-vod", 8 | "image_tag": "latest", 9 | "internal_port": "47788", 10 | "restart": "unless-stopped", 11 | "environment": [ 12 | "TZ=%TIMEZONE%", 13 | "NEXTCLOUD_HOST=https://%NC_DOMAIN%" 14 | ], 15 | "volumes": [ 16 | { 17 | "source": "%NEXTCLOUD_DATADIR%", 18 | "destination": "/mnt/ncdata", 19 | "writeable": false 20 | }, 21 | { 22 | "source": "%NEXTCLOUD_MOUNT%", 23 | "destination": "%NEXTCLOUD_MOUNT%", 24 | "writeable": false 25 | } 26 | ], 27 | "devices": [ 28 | "/dev/dri" 29 | ], 30 | "enable_nvidia_gpu": true, 31 | "nextcloud_exec_commands": [ 32 | "php /var/www/html/occ app:install memories", 33 | "php /var/www/html/occ app:enable memories", 34 | "php /var/www/html/occ config:system:set memories.vod.external --value true --type bool", 35 | "php /var/www/html/occ config:system:set memories.vod.connect --value nextcloud-aio-memories:47788" 36 | ] 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /community-containers/memories/readme.md: -------------------------------------------------------------------------------- 1 | ## Memories 2 | This container bundles the hardware-transcoding container of memories and auto-configures it for you. 3 | 4 | ### Notes 5 | - In order to actually enable the hardware transcoding, you need to add the following flag to AIO apart from adding this container: https://github.com/nextcloud/all-in-one#how-to-enable-hardware-acceleration-for-nextcloud 6 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 7 | 8 | ### Repository 9 | https://github.com/pulsejet/memories 10 | 11 | ### Maintainer 12 | https://github.com/pulsejet 13 | -------------------------------------------------------------------------------- /community-containers/nocodb/nocodb.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-nocodb", 5 | "display_name": "NocoDB", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/nocodb", 7 | "image": "nocodb/nocodb", 8 | "image_tag": "latest", 9 | "internal_port": "10028", 10 | "restart": "unless-stopped", 11 | "ports": [ 12 | { 13 | "ip_binding": "%APACHE_IP_BINDING%", 14 | "port_number": "10028", 15 | "protocol": "tcp" 16 | } 17 | ], 18 | "environment": [ 19 | "NC_AUTH_JWT_SECRET=%NOCODB_JWT_SECRET%", 20 | "NC_PUBLIC_URL=https://tables.%NC_DOMAIN%/", 21 | "NC_DASHBOARD_URL=/", 22 | "NC_ADMIN_EMAIL=admin@noco.db", 23 | "NC_ADMIN_PASS=%NOCODB_USER_PASS%", 24 | "PORT=10028", 25 | "NC_DISABLE_TELE=true" 26 | ], 27 | "secrets": [ 28 | "NOCODB_JWT_SECRET", 29 | "NOCODB_USER_PASS" 30 | ], 31 | "ui_secret": "NOCODB_USER_PASS", 32 | "volumes": [ 33 | { 34 | "source": "nextcloud_aio_nocodb", 35 | "destination": "/usr/app/data", 36 | "writeable": true 37 | } 38 | ], 39 | "backup_volumes": [ 40 | "nextcloud_aio_nocodb" 41 | ] 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /community-containers/nocodb/readme.md: -------------------------------------------------------------------------------- 1 | > [!NOTE] 2 | > This container is there to compensate for the lack of functionality in Nextcloud Tables. 3 | > 4 | > When Nextcloud Tables V2 is released, I will stop checking for updates, and will no longer fix any potential issues. 5 | > 6 | > Some missing functionality in Nextcloud Tables: 7 | > - Multiple view layout (Gantt, Kanban, Calendar...) 8 | > - Field (Person, Tag, File...) 9 | > - See more here https://github.com/nextcloud/tables/issues/103 10 | 11 | ## NocoDb server 12 | This container bundles NocoDb without synchronization with Nextcloud. 13 | 14 | This is an alternative of **Airtable**. 15 | 16 | ### Notes 17 | - You need to configure a reverse proxy in order to run this container since nocodb needs a dedicated (sub)domain! For that, you might have a look at https://github.com/nextcloud/all-in-one/tree/main/community-containers/caddy. 18 | - Currently, only `tables.$NC_DOMAIN` is supported as subdomain! So if Nextcloud is using `your-domain.com`, nocodb will use `tables.your-domain.com`. 19 | - The data of NocoDb will be automatically included in AIOs backup solution! 20 | - After adding and starting the container, you can log in to the web interface at `https://tables.$NC_DOMAIN/#/signin` with the username `admin@noco.db` and the password that you can see in the AIO interface next to the container. 21 | - See https://docs.nocodb.com/ for usage of NocoDb 22 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 23 | 24 | ### Repository 25 | https://github.com/nocodb/nocodb 26 | 27 | ### Maintainer 28 | https://github.com/docjyJ 29 | -------------------------------------------------------------------------------- /community-containers/npmplus/npmplus.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-npmplus", 5 | "display_name": "NPMplus", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/npmplus", 7 | "image": "ghcr.io/zoeyvid/npmplus", 8 | "image_tag": "latest", 9 | "internal_port": "host", 10 | "restart": "unless-stopped", 11 | "environment": [ 12 | "TZ=%TIMEZONE%", 13 | "NC_AIO=true", 14 | "NC_DOMAIN=%NC_DOMAIN%" 15 | ], 16 | "volumes": [ 17 | { 18 | "source": "nextcloud_aio_npmplus", 19 | "destination": "/data", 20 | "writeable": true 21 | } 22 | ], 23 | "backup_volumes": [ 24 | "nextcloud_aio_npmplus" 25 | ], 26 | "aio_variables": [ 27 | "apache_ip_binding=127.0.0.1", 28 | "apache_port=11000" 29 | ] 30 | } 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /community-containers/npmplus/readme.md: -------------------------------------------------------------------------------- 1 | ## NPMplus 2 | This container contains a fork of the Nginx Proxy Manager, which is a WebUI for nginx. It will also automatically create a config and cert for AIO. 3 | 4 | ### Notes 5 | - This container is incompatible with the [caddy](https://github.com/nextcloud/all-in-one/tree/main/community-containers/caddy) community container. So make sure that you do not enable both at the same time! 6 | - Make sure that no other service is using port `443 (tcp/upd)` or `81 (tcp)` on your host as otherwise the containers will fail to start. You can check this with `sudo netstat -tulpn | grep "443\|81"` before installing AIO. 7 | - Please change the default login data first, after you can read inside the logs that the default config for AIO is created and there are no errors. 8 | - After the container was started the first time, please check the logs for errors. Then you can open NPMplus on `https://:81` and change the password. 9 | - The default password is `iArhP1j7p1P6TA92FA2FMbbUGYqwcYzxC4AVEe12Wbi94FY9gNN62aKyF1shrvG4NycjjX9KfmDQiwkLZH1ZDR9xMjiG2QmoHXi` and the default email is `admin@example.org` 10 | - If you want to use NPMplus behind a domain and outside localhost just create a new proxy host inside the NPMplus which proxies to `https`, `127.0.0.1` and port `81` - all other settings should be the same as for the AIO host. 11 | - If you want to set env options from this [compose.yaml](https://github.com/ZoeyVid/NPMplus/blob/develop/compose.yaml), please set them inside the `.env` file which you can find in the `nextcloud_aio_npmplus` volume 12 | - The data (certs, configs, etc.) of NPMplus will be automatically included in AIOs backup solution! 13 | - **Important:** you always need to enable https for your hosts, since `DISABLE_HTTP` is set to true by default 14 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 15 | 16 | ### Repository and Documentation 17 | https://github.com/ZoeyVid/NPMplus 18 | 19 | ### Maintainer 20 | https://github.com/Zoey2936 21 | -------------------------------------------------------------------------------- /community-containers/pi-hole/pi-hole.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-pihole", 5 | "display_name": "Pi-hole", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/pi-hole", 7 | "image": "pihole/pihole", 8 | "image_tag": "latest", 9 | "internal_port": "8573", 10 | "restart": "unless-stopped", 11 | "init": false, 12 | "ports": [ 13 | { 14 | "ip_binding": "", 15 | "port_number": "53", 16 | "protocol": "tcp" 17 | }, 18 | { 19 | "ip_binding": "", 20 | "port_number": "53", 21 | "protocol": "udp" 22 | }, 23 | { 24 | "ip_binding": "", 25 | "port_number": "8573", 26 | "protocol": "tcp" 27 | } 28 | ], 29 | "environment": [ 30 | "TZ=%TIMEZONE%", 31 | "FTLCONF_webserver_api_password=%PIHOLE_WEBPASSWORD%", 32 | "FTLCONF_dns_listeningMode=all", 33 | "FTLCONF_webserver_port=8573" 34 | ], 35 | "volumes": [ 36 | { 37 | "source": "nextcloud_aio_pihole", 38 | "destination": "/etc/pihole", 39 | "writeable": true 40 | }, 41 | { 42 | "source": "nextcloud_aio_pihole_dnsmasq", 43 | "destination": "/etc/dnsmasq.d", 44 | "writeable": true 45 | } 46 | ], 47 | "backup_volumes": [ 48 | "nextcloud_aio_pihole", 49 | "nextcloud_aio_pihole_dnsmasq" 50 | ], 51 | "ui_secret": "PIHOLE_WEBPASSWORD", 52 | "secrets": [ 53 | "PIHOLE_WEBPASSWORD" 54 | ] 55 | } 56 | ] 57 | } 58 | -------------------------------------------------------------------------------- /community-containers/pi-hole/readme.md: -------------------------------------------------------------------------------- 1 | ## Pi-hole 2 | This container bundles pi-hole and auto-configures it for you. 3 | 4 | ### Notes 5 | - You should not run this container on a public VPS! It is only intended to run in home networks! 6 | - Make sure that no dns server is already running by checking with `sudo netstat -tulpn | grep 53`. Otherwise the container will not be able to start! 7 | - The DHCP functionality of Pi-hole has been disabled! 8 | - The data of pi-hole will be automatically included in AIOs backup solution! 9 | - After adding and starting the container, you can visit `http://ip.address.of.this.server:8573/admin` in order to log in with the admin key that you can see next to the container in the AIO interface. There you can configure the pi-hole setup. Also you can add local dns records. 10 | - You can configure your home network now to use pi-hole as its dns server by configuring your router. 11 | - Additionally, you can configure the docker daemon to use that by editing `/etc/docker/daemon.json` and adding ` { "dns" : [ "ip.address.of.this.server" , "8.8.8.8" ] } `. 12 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 13 | 14 | ### Repository 15 | https://github.com/pi-hole/docker-pi-hole 16 | 17 | ### Maintainer 18 | https://github.com/szaimen 19 | -------------------------------------------------------------------------------- /community-containers/plex/plex.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-plex", 5 | "display_name": "Plex", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/plex", 7 | "image": "plexinc/pms-docker", 8 | "image_tag": "latest", 9 | "internal_port": "host", 10 | "restart": "unless-stopped", 11 | "environment": [ 12 | "TZ=%TIMEZONE%", 13 | "PLEX_UID=33", 14 | "PLEX_GID=33" 15 | ], 16 | "volumes": [ 17 | { 18 | "source": "nextcloud_aio_plex", 19 | "destination": "/config", 20 | "writeable": true 21 | }, 22 | { 23 | "source": "%NEXTCLOUD_DATADIR%", 24 | "destination": "/data", 25 | "writeable": false 26 | }, 27 | { 28 | "source": "%NEXTCLOUD_MOUNT%", 29 | "destination": "%NEXTCLOUD_MOUNT%", 30 | "writeable": false 31 | } 32 | ], 33 | "devices": [ 34 | "/dev/dri" 35 | ], 36 | "enable_nvidia_gpu": true, 37 | "backup_volumes": [ 38 | "nextcloud_aio_plex" 39 | ] 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /community-containers/plex/readme.md: -------------------------------------------------------------------------------- 1 | ## Plex 2 | This container bundles Plex and auto-configures it for you. 3 | 4 | ### Notes 5 | - This container is incompatible with the [Jellyfin](https://github.com/nextcloud/all-in-one/tree/main/community-containers/jellyfin) community container. So make sure that you do not enable both at the same time! 6 | - This is not working on arm64 since Plex does only provide x64 docker images. 7 | - This container should usually only be run in home networks as it exposes unencrypted services like DLNA by default which can be disabld via the web interface though. 8 | - If you have a firewall like ufw configured, you might need to open all Plex ports in there first in order to make it work. Especially port 32400 is important! 9 | - After adding and starting the container, you need to visit http://ip.address.of.server:32400/manage in order to claim your server with a plex account 10 | - The data of Plex will be automatically included in AIOs backup solution! 11 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 12 | 13 | ### Repository 14 | https://github.com/plexinc/pms-docker 15 | 16 | ### Maintainer 17 | https://github.com/szaimen 18 | -------------------------------------------------------------------------------- /community-containers/scrutiny/readme.md: -------------------------------------------------------------------------------- 1 | ## Scrutiny 2 | This container bundles Scrutiny which is a frontend for SMART stats and auto-configures it for you. 3 | 4 | ### Notes 5 | - This container should only be run in home networks 6 | - ⚠️ This container mounts all devices from the host inside the container in order to be able to access the drives and smartctl stats which is a security issue. However no better solution was found for the time being. 7 | - This container only works on Linux and not on Docker-Desktop. 8 | - After adding and starting the container, you need to visit `http://internal.ip.of.server:8000` which will show the dashboard for your drives. 9 | - It currently does not support sending notifications as no good solution was found yet that makes this possible. See https://github.com/szaimen/aio-scrutiny/issues/3 10 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 11 | 12 | ### Repository 13 | https://github.com/szaimen/aio-scrutiny 14 | 15 | ### Maintainer 16 | https://github.com/szaimen 17 | -------------------------------------------------------------------------------- /community-containers/scrutiny/scrutiny.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-scrutiny", 5 | "display_name": "Scrutiny", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/scrutiny", 7 | "image": "ghcr.io/szaimen/aio-scrutiny", 8 | "image_tag": "v1", 9 | "internal_port": "8000", 10 | "init": false, 11 | "restart": "unless-stopped", 12 | "ports": [ 13 | { 14 | "ip_binding": "", 15 | "port_number": "8000", 16 | "protocol": "tcp" 17 | } 18 | ], 19 | "cap_add": [ 20 | "SYS_RAWIO", 21 | "SYS_ADMIN" 22 | ], 23 | "environment": [ 24 | "TZ=%TIMEZONE%", 25 | "SCRUTINY_WEB_LISTEN_PORT=8000", 26 | "COLLECTOR_API_ENDPOINT=http://127.0.0.1:8000" 27 | ], 28 | "volumes": [ 29 | { 30 | "source": "nextcloud_aio_scrutiny", 31 | "destination": "/opt/scrutiny/config", 32 | "writeable": true 33 | }, 34 | { 35 | "source": "nextcloud_aio_scrutiny_db", 36 | "destination": "/opt/scrutiny/influxdb", 37 | "writeable": true 38 | }, 39 | { 40 | "source": "/run/udev", 41 | "destination": "/run/udev", 42 | "writeable": false 43 | }, 44 | { 45 | "source": "/dev", 46 | "destination": "/dev", 47 | "writeable": false 48 | } 49 | ], 50 | "backup_volumes": [ 51 | "nextcloud_aio_scrutiny", 52 | "nextcloud_aio_scrutiny_db" 53 | ] 54 | } 55 | ] 56 | } 57 | -------------------------------------------------------------------------------- /community-containers/smbserver/readme.md: -------------------------------------------------------------------------------- 1 | ## SMB-server 2 | This container bundles an SMB-server and allows to configure it via a graphical shell script. 3 | 4 | ### Notes 5 | - This container should only be run in home networks 6 | - This container currently only works on amd64. See https://github.com/szaimen/aio-smbserver/issues/3 7 | - After adding and starting the container, you need to visit `https://internal.ip.of.server:5803` in order to log in with the `smbserver` user and the password that you can see next to the container in the AIO interface. (The web page uses a self-signed certificate, so you need to accept the warning). Then type in `bash /smbserver.sh` and you will see a graphical UI for configuring the smb-server interactively. 8 | - The config data of SMB-server will be automatically included in AIOs backup solution! 9 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 10 | 11 | ### Repository 12 | https://github.com/szaimen/aio-smbserver/ 13 | 14 | ### Maintainer 15 | https://github.com/szaimen 16 | -------------------------------------------------------------------------------- /community-containers/stalwart/readme.md: -------------------------------------------------------------------------------- 1 | > [!CAUTION] 2 | > Be aware that the mail server is the most difficult service to deploy. 3 | > 4 | > Do not use this feature as a main mail server or without a redundancy system and without knowledge. 5 | 6 | ## Stalwart mail server 7 | This container bundles stalwart mail server and auto-configures it for you. 8 | 9 | ### Notes 10 | Documentation is available on the container repository. 11 | This documentation is regularly updated and is intended to be as simple and detailed as possible. 12 | Thanks for all your feedback! 13 | 14 | - See https://github.com/docjyJ/aio-stalwart#getting-started for getting start with this container. 15 | - See https://stalw.art/docs/faq for further faq and docs on the project 16 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 17 | 18 | ### Repository 19 | https://github.com/docjyj/aio-stalwart 20 | 21 | ### Maintainer 22 | https://github.com/docjyj 23 | -------------------------------------------------------------------------------- /community-containers/vaultwarden/readme.md: -------------------------------------------------------------------------------- 1 | ## Vaultwarden 2 | This container bundles vaultwarden and auto-configures it for you. 3 | 4 | ### Notes 5 | - You need to configure a reverse proxy in order to run this container since vaultwarden needs a dedicated (sub)domain! For that, you might have a look at https://github.com/nextcloud/all-in-one/tree/main/community-containers/caddy or follow https://github.com/nextcloud/all-in-one/blob/main/reverse-proxy.md and https://github.com/dani-garcia/vaultwarden/wiki/Proxy-examples. You need to point the reverse proxy at port 8812 of this server. 6 | - Currently, only `bw.$NC_DOMAIN` is supported as subdomain! So if Nextcloud is using `your-domain.com`, vaultwarden will use `bw.your-domain.com`. The reverse proxy and domain must be configured accordingly! 7 | - If you want to secure the installation with fail2ban, you might want to check out https://github.com/nextcloud/all-in-one/tree/main/community-containers/fail2ban 8 | - The data of Vaultwarden will be automatically included in AIOs backup solution! 9 | - After adding and starting the container, you need to visit `https://bw.your-domain.com/admin` in order to log in with the admin key that you can see next to the container in the AIO interface. There you can configure smtp first and then invite users via mail. After this is done, you might disable the admin panel via the reverse proxy by blocking connections to the subdirectory. 10 | - If using the caddy community container, the vaultwarden admin interface can be disabled by creating a `block-vaultwarden-admin` file in the `nextcloud-aio-caddy` folder when you open the Nextcloud files app with the default `admin` user. Afterwards restart all containers from the AIO interface and the admin interface should be disabled! You can unlock the admin interface by removing the file again and afterwards restarting the containers via the AIO interface. 11 | - See https://github.com/nextcloud/all-in-one/tree/main/community-containers#community-containers how to add it to the AIO stack 12 | 13 | ### Repository 14 | https://github.com/dani-garcia/vaultwarden 15 | 16 | ### Maintainer 17 | https://github.com/szaimen 18 | -------------------------------------------------------------------------------- /community-containers/vaultwarden/vaultwarden.json: -------------------------------------------------------------------------------- 1 | { 2 | "aio_services_v1": [ 3 | { 4 | "container_name": "nextcloud-aio-vaultwarden", 5 | "display_name": "Vaultwarden", 6 | "documentation": "https://github.com/nextcloud/all-in-one/tree/main/community-containers/vaultwarden", 7 | "image": "ghcr.io/dani-garcia/vaultwarden", 8 | "image_tag": "alpine", 9 | "internal_port": "8812", 10 | "restart": "unless-stopped", 11 | "ports": [ 12 | { 13 | "ip_binding": "%APACHE_IP_BINDING%", 14 | "port_number": "8812", 15 | "protocol": "tcp" 16 | } 17 | ], 18 | "environment": [ 19 | "TZ=%TIMEZONE%", 20 | "ROCKET_PORT=8812", 21 | "ADMIN_TOKEN=%VAULTWARDEN_ADMIN_TOKEN%", 22 | "DOMAIN=https://bw.%NC_DOMAIN%", 23 | "LOG_FILE=/logs/vaultwarden.log", 24 | "LOG_LEVEL=warn", 25 | "SIGNUPS_VERIFY=true", 26 | "SIGNUPS_ALLOWED=false" 27 | ], 28 | "volumes": [ 29 | { 30 | "source": "nextcloud_aio_vaultwarden", 31 | "destination": "/data", 32 | "writeable": true 33 | }, 34 | { 35 | "source": "nextcloud_aio_vaultwarden_logs", 36 | "destination": "/logs", 37 | "writeable": true 38 | } 39 | ], 40 | "backup_volumes": [ 41 | "nextcloud_aio_vaultwarden" 42 | ], 43 | "ui_secret": "VAULTWARDEN_ADMIN_TOKEN", 44 | "secrets": [ 45 | "VAULTWARDEN_ADMIN_TOKEN" 46 | ] 47 | } 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: nextcloud-aio-helm-chart 2 | description: A generated Helm Chart for Nextcloud AIO from Skippbox Kompose 3 | version: 10.15.0 4 | apiVersion: v2 5 | keywords: 6 | - latest 7 | - nextcloud 8 | - helm-chart 9 | - open-source 10 | - cloud 11 | sources: 12 | - https://github.com/nextcloud/all-in-one/tree/main/nextcloud-aio-helm-chart 13 | home: https://github.com/nextcloud/all-in-one/tree/main/nextcloud-aio-helm-chart 14 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-apache-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-apache 6 | name: nextcloud-aio-apache 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.APACHE_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-apache-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.version: 1.36.0 (ae2a39403) 6 | labels: 7 | io.kompose.service: nextcloud-aio-apache 8 | name: nextcloud-aio-apache 9 | namespace: "{{ .Values.NAMESPACE }}" 10 | spec: 11 | ipFamilyPolicy: PreferDualStack 12 | type: LoadBalancer 13 | externalTrafficPolicy: Local 14 | ports: 15 | - name: "{{ .Values.APACHE_PORT }}" 16 | port: {{ .Values.APACHE_PORT }} 17 | targetPort: {{ .Values.APACHE_PORT }} 18 | - name: {{ .Values.APACHE_PORT }}-udp 19 | port: {{ .Values.APACHE_PORT }} 20 | protocol: UDP 21 | targetPort: {{ .Values.APACHE_PORT }} 22 | selector: 23 | io.kompose.service: nextcloud-aio-apache 24 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-clamav-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.CLAMAV_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | labels: 6 | io.kompose.service: nextcloud-aio-clamav 7 | name: nextcloud-aio-clamav 8 | namespace: "{{ .Values.NAMESPACE }}" 9 | spec: 10 | {{- if .Values.STORAGE_CLASS }} 11 | storageClassName: {{ .Values.STORAGE_CLASS }} 12 | {{- end }} 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: {{ .Values.CLAMAV_STORAGE_SIZE }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-clamav-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.CLAMAV_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-clamav 9 | name: nextcloud-aio-clamav 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "3310" 15 | port: 3310 16 | targetPort: 3310 17 | selector: 18 | io.kompose.service: nextcloud-aio-clamav 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-collabora-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.COLLABORA_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-collabora 9 | name: nextcloud-aio-collabora 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "9980" 15 | port: 9980 16 | targetPort: 9980 17 | selector: 18 | io.kompose.service: nextcloud-aio-collabora 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-database-dump-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-database-dump 6 | name: nextcloud-aio-database-dump 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.DATABASE_DUMP_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-database-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-database 6 | name: nextcloud-aio-database 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.DATABASE_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-database-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.version: 1.36.0 (ae2a39403) 6 | labels: 7 | io.kompose.service: nextcloud-aio-database 8 | name: nextcloud-aio-database 9 | namespace: "{{ .Values.NAMESPACE }}" 10 | spec: 11 | ipFamilyPolicy: PreferDualStack 12 | ports: 13 | - name: "5432" 14 | port: 5432 15 | targetPort: 5432 16 | selector: 17 | io.kompose.service: nextcloud-aio-database 18 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-elasticsearch-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.FULLTEXTSEARCH_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | labels: 6 | io.kompose.service: nextcloud-aio-elasticsearch 7 | name: nextcloud-aio-elasticsearch 8 | namespace: "{{ .Values.NAMESPACE }}" 9 | spec: 10 | {{- if .Values.STORAGE_CLASS }} 11 | storageClassName: {{ .Values.STORAGE_CLASS }} 12 | {{- end }} 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: {{ .Values.ELASTICSEARCH_STORAGE_SIZE }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-fulltextsearch-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.FULLTEXTSEARCH_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-fulltextsearch 9 | name: nextcloud-aio-fulltextsearch 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "9200" 15 | port: 9200 16 | targetPort: 9200 17 | selector: 18 | io.kompose.service: nextcloud-aio-fulltextsearch 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-imaginary-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.IMAGINARY_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-imaginary 9 | name: nextcloud-aio-imaginary 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "9000" 15 | port: 9000 16 | targetPort: 9000 17 | selector: 18 | io.kompose.service: nextcloud-aio-imaginary 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-namespace-namespace.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (ne .Values.NAMESPACE "default") (ne .Values.NAMESPACE_DISABLED "yes") }} 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: "{{ .Values.NAMESPACE }}" 6 | namespace: "{{ .Values.NAMESPACE }}" 7 | {{- if eq (.Values.RPSS_ENABLED | default "no") "yes" }} 8 | labels: 9 | pod-security.kubernetes.io/enforce: restricted 10 | {{- end }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.NETWORK_POLICY_ENABLED "yes" }} 2 | # https://github.com/ahmetb/kubernetes-network-policy-recipes/blob/master/04-deny-traffic-from-other-namespaces.md 3 | kind: NetworkPolicy 4 | apiVersion: networking.k8s.io/v1 5 | metadata: 6 | namespace: "{{ .Values.NAMESPACE }}" 7 | name: nextcloud-aio-deny-from-other-namespaces 8 | spec: 9 | podSelector: 10 | matchLabels: 11 | policyTypes: 12 | - Ingress 13 | - Egress 14 | ingress: 15 | - from: 16 | - podSelector: {} 17 | egress: 18 | - {} # Allows all egress traffic 19 | --- 20 | apiVersion: networking.k8s.io/v1 21 | kind: NetworkPolicy 22 | metadata: 23 | namespace: "{{ .Values.NAMESPACE }}" 24 | name: nextcloud-aio-webserver-allow 25 | spec: 26 | podSelector: 27 | matchExpressions: 28 | - key: io.kompose.service 29 | operator: In 30 | values: 31 | - nextcloud-aio-apache 32 | policyTypes: 33 | - Ingress 34 | ingress: 35 | - {} # Allows all ingress traffic 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-data-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-nextcloud-data 6 | name: nextcloud-aio-nextcloud-data 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.NEXTCLOUD_DATA_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-nextcloud 6 | name: nextcloud-aio-nextcloud 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteMany 14 | resources: 15 | requests: 16 | storage: {{ .Values.NEXTCLOUD_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.version: 1.36.0 (ae2a39403) 6 | labels: 7 | io.kompose.service: nextcloud-aio-nextcloud 8 | name: nextcloud-aio-nextcloud 9 | namespace: "{{ .Values.NAMESPACE }}" 10 | spec: 11 | ipFamilyPolicy: PreferDualStack 12 | ports: 13 | - name: "9000" 14 | port: 9000 15 | targetPort: 9000 16 | - name: "9001" 17 | port: 9001 18 | targetPort: 9001 19 | selector: 20 | io.kompose.service: nextcloud-aio-nextcloud 21 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-nextcloud-trusted-cacerts-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-nextcloud-trusted-cacerts 6 | name: nextcloud-aio-nextcloud-trusted-cacerts 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.NEXTCLOUD_TRUSTED_CACERTS_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-notify-push-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.version: 1.36.0 (ae2a39403) 6 | labels: 7 | io.kompose.service: nextcloud-aio-notify-push 8 | name: nextcloud-aio-notify-push 9 | namespace: "{{ .Values.NAMESPACE }}" 10 | spec: 11 | ipFamilyPolicy: PreferDualStack 12 | ports: 13 | - name: "7867" 14 | port: 7867 15 | targetPort: 7867 16 | selector: 17 | io.kompose.service: nextcloud-aio-notify-push 18 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-onlyoffice-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.ONLYOFFICE_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | labels: 6 | io.kompose.service: nextcloud-aio-onlyoffice 7 | name: nextcloud-aio-onlyoffice 8 | namespace: "{{ .Values.NAMESPACE }}" 9 | spec: 10 | {{- if .Values.STORAGE_CLASS }} 11 | storageClassName: {{ .Values.STORAGE_CLASS }} 12 | {{- end }} 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: {{ .Values.ONLYOFFICE_STORAGE_SIZE }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-onlyoffice-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.ONLYOFFICE_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-onlyoffice 9 | name: nextcloud-aio-onlyoffice 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "80" 15 | port: 80 16 | targetPort: 80 17 | selector: 18 | io.kompose.service: nextcloud-aio-onlyoffice 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-redis-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | io.kompose.service: nextcloud-aio-redis 6 | name: nextcloud-aio-redis 7 | namespace: "{{ .Values.NAMESPACE }}" 8 | spec: 9 | {{- if .Values.STORAGE_CLASS }} 10 | storageClassName: {{ .Values.STORAGE_CLASS }} 11 | {{- end }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.REDIS_STORAGE_SIZE }} 17 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-redis-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.version: 1.36.0 (ae2a39403) 6 | labels: 7 | io.kompose.service: nextcloud-aio-redis 8 | name: nextcloud-aio-redis 9 | namespace: "{{ .Values.NAMESPACE }}" 10 | spec: 11 | ipFamilyPolicy: PreferDualStack 12 | ports: 13 | - name: "6379" 14 | port: 6379 15 | targetPort: 6379 16 | selector: 17 | io.kompose.service: nextcloud-aio-redis 18 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-talk-recording-persistentvolumeclaim.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.TALK_RECORDING_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | labels: 6 | io.kompose.service: nextcloud-aio-talk-recording 7 | name: nextcloud-aio-talk-recording 8 | namespace: "{{ .Values.NAMESPACE }}" 9 | spec: 10 | {{- if .Values.STORAGE_CLASS }} 11 | storageClassName: {{ .Values.STORAGE_CLASS }} 12 | {{- end }} 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: {{ .Values.TALK_RECORDING_STORAGE_SIZE }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-talk-recording-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.TALK_RECORDING_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-talk-recording 9 | name: nextcloud-aio-talk-recording 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "1234" 15 | port: 1234 16 | targetPort: 1234 17 | selector: 18 | io.kompose.service: nextcloud-aio-talk-recording 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-talk-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.TALK_ENABLED "yes" }} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | annotations: 7 | kompose.version: 1.36.0 (ae2a39403) 8 | labels: 9 | io.kompose.service: nextcloud-aio-talk 10 | name: nextcloud-aio-talk-public 11 | namespace: "{{ .Values.NAMESPACE }}" 12 | spec: 13 | ipFamilyPolicy: PreferDualStack 14 | type: LoadBalancer 15 | externalTrafficPolicy: Local 16 | ports: 17 | - name: "{{ .Values.TALK_PORT }}" 18 | port: {{ .Values.TALK_PORT }} 19 | targetPort: {{ .Values.TALK_PORT }} 20 | - name: {{ .Values.TALK_PORT }}-udp 21 | port: {{ .Values.TALK_PORT }} 22 | protocol: UDP 23 | targetPort: {{ .Values.TALK_PORT }} 24 | selector: 25 | io.kompose.service: nextcloud-aio-talk 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | annotations: 31 | kompose.version: 1.36.0 (ae2a39403) 32 | labels: 33 | io.kompose.service: nextcloud-aio-talk 34 | name: nextcloud-aio-talk 35 | namespace: "{{ .Values.NAMESPACE }}" 36 | spec: 37 | ipFamilyPolicy: PreferDualStack 38 | ports: 39 | - name: "8081" 40 | port: 8081 41 | targetPort: 8081 42 | selector: 43 | io.kompose.service: nextcloud-aio-talk 44 | {{- end }} 45 | -------------------------------------------------------------------------------- /nextcloud-aio-helm-chart/templates/nextcloud-aio-whiteboard-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.WHITEBOARD_ENABLED "yes" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | annotations: 6 | kompose.version: 1.36.0 (ae2a39403) 7 | labels: 8 | io.kompose.service: nextcloud-aio-whiteboard 9 | name: nextcloud-aio-whiteboard 10 | namespace: "{{ .Values.NAMESPACE }}" 11 | spec: 12 | ipFamilyPolicy: PreferDualStack 13 | ports: 14 | - name: "3002" 15 | port: 3002 16 | targetPort: 3002 17 | selector: 18 | io.kompose.service: nextcloud-aio-whiteboard 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /php/composer.json: -------------------------------------------------------------------------------- 1 | { 2 | "autoload": { 3 | "psr-4": { 4 | "AIO\\": ["src/"] 5 | } 6 | }, 7 | "require": { 8 | "php": "8.4.*", 9 | "ext-json": "*", 10 | "ext-sodium": "*", 11 | "ext-curl": "*", 12 | "slim/slim": "^4.11", 13 | "php-di/slim-bridge": "^3.3", 14 | "guzzlehttp/guzzle": "^7.5", 15 | "guzzlehttp/psr7": "^2.4", 16 | "http-interop/http-factory-guzzle": "^1.2", 17 | "slim/twig-view": "^3.3", 18 | "slim/csrf": "^1.3", 19 | "ext-apcu": "*" 20 | }, 21 | "require-dev": { 22 | "sserbin/twig-linter": "@dev", 23 | "vimeo/psalm": "^6.0", 24 | "wapmorgan/php-deprecation-detector": "dev-master" 25 | }, 26 | "scripts": { 27 | "dev": [ 28 | "Composer\\Config::disableProcessTimeout", 29 | "php -S localhost:8080 -t public" 30 | ], 31 | "psalm": "psalm --threads=1", 32 | "psalm:update-baseline": "psalm --threads=1 --monochrome --no-progress --output-format=text --update-baseline", 33 | "psalm:strict": "psalm --threads=1 --show-info=true", 34 | "lint": "php -l src/*.php src/**/*.php public/index.php", 35 | "lint:twig": "twig-linter lint ./templates", 36 | "php-deprecation-detector": "phpdd scan -n -t 8.4 src/*.php src/**/*.php public/index.php" 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /php/data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nextcloud/all-in-one/81fec27c1499c9d93cec173d86e361d5d63cf260/php/data/.gitkeep -------------------------------------------------------------------------------- /php/domain-validator.php: -------------------------------------------------------------------------------- 1 | 2 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /php/public/automatic_reload.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | if (document.hasFocus()) { 3 | // hide reload button if the site reloads automatically 4 | let list = document.getElementsByClassName("reload button"); 5 | for (let i = 0; i < list.length; i++) { 6 | // list[i] is a node with the desired class name 7 | list[i].style.display = 'none'; 8 | } 9 | 10 | // set timeout for reload 11 | setTimeout(function(){ 12 | window.location.reload(1); 13 | }, 5000); 14 | } else { 15 | window.addEventListener("beforeunload", function() { 16 | document.getElementById('overlay').classList.add('loading') 17 | }); 18 | } 19 | }); 20 | -------------------------------------------------------------------------------- /php/public/before-unload.js: -------------------------------------------------------------------------------- 1 | window.addEventListener("beforeunload", function() { 2 | document.getElementById('overlay').classList.add('loading') 3 | }); -------------------------------------------------------------------------------- /php/public/disable-clamav.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Clamav 3 | let clamav = document.getElementById("clamav"); 4 | clamav.disabled = true; 5 | }); -------------------------------------------------------------------------------- /php/public/disable-collabora.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Collabora 3 | let collabora = document.getElementById("collabora"); 4 | collabora.disabled = true; 5 | }); -------------------------------------------------------------------------------- /php/public/disable-docker-socket-proxy.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Docker socket proxy 3 | let dockerSocketProxy = document.getElementById("docker-socket-proxy"); 4 | if (dockerSocketProxy) { 5 | dockerSocketProxy.disabled = true; 6 | } 7 | }); 8 | -------------------------------------------------------------------------------- /php/public/disable-fulltextsearch.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Fulltextsearch 3 | let fulltextsearch = document.getElementById("fulltextsearch"); 4 | fulltextsearch.disabled = true; 5 | }); -------------------------------------------------------------------------------- /php/public/disable-imaginary.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Imaginary 3 | let imaginary = document.getElementById("imaginary"); 4 | imaginary.disabled = true; 5 | }); -------------------------------------------------------------------------------- /php/public/disable-onlyoffice.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // OnlyOffice 3 | let onlyoffice = document.getElementById("onlyoffice"); 4 | if (onlyoffice) { 5 | onlyoffice.disabled = true; 6 | } 7 | }); -------------------------------------------------------------------------------- /php/public/disable-talk-recording.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Talk-recording 3 | document.getElementById("talk-recording").disabled = true; 4 | }); 5 | -------------------------------------------------------------------------------- /php/public/disable-talk.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Talk 3 | let talk = document.getElementById("talk"); 4 | talk.disabled = true; 5 | }); -------------------------------------------------------------------------------- /php/public/disable-whiteboard.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // Whiteboard 3 | let whiteboard = document.getElementById("whiteboard"); 4 | whiteboard.disabled = true; 5 | }); 6 | -------------------------------------------------------------------------------- /php/public/img/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nextcloud/all-in-one/81fec27c1499c9d93cec173d86e361d5d63cf260/php/public/img/favicon.png -------------------------------------------------------------------------------- /php/public/img/jenna-kim-the-globe-dark.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nextcloud/all-in-one/81fec27c1499c9d93cec173d86e361d5d63cf260/php/public/img/jenna-kim-the-globe-dark.webp -------------------------------------------------------------------------------- /php/public/img/jenna-kim-the-globe.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nextcloud/all-in-one/81fec27c1499c9d93cec173d86e361d5d63cf260/php/public/img/jenna-kim-the-globe.webp -------------------------------------------------------------------------------- /php/public/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: / 3 | -------------------------------------------------------------------------------- /php/public/second-tab-warning.js: -------------------------------------------------------------------------------- 1 | const channel = new BroadcastChannel('tab') 2 | 3 | channel.postMessage('second-tab') 4 | // note that listener is added after posting the message 5 | 6 | channel.addEventListener('message', (msg) => { 7 | if (msg.data === 'second-tab') { 8 | // message received from 2nd tab 9 | document.getElementById('overlay').classList.add('loading') 10 | alert('Cannot open multiple instances. You can use AIO here by reloading the page.') 11 | } 12 | }); -------------------------------------------------------------------------------- /php/public/timezone.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("DOMContentLoaded", function(event) { 2 | // timezone 3 | let timezone = document.getElementById("timezone"); 4 | if (timezone) { 5 | timezone.value = Intl.DateTimeFormat().resolvedOptions().timeZone 6 | } 7 | }); 8 | -------------------------------------------------------------------------------- /php/public/toggle-dark-mode.js: -------------------------------------------------------------------------------- 1 | // Function to toggle theme 2 | function toggleTheme() { 3 | const currentTheme = document.documentElement.getAttribute('data-theme'); 4 | const newTheme = (currentTheme === 'dark') ? '' : 'dark'; // Toggle between no theme and dark theme 5 | document.documentElement.setAttribute('data-theme', newTheme); 6 | localStorage.setItem('theme', newTheme); 7 | 8 | // Change the icon based on the current theme 9 | const themeIcon = document.getElementById('theme-icon'); 10 | themeIcon.textContent = newTheme === 'dark' ? '☀️' : '🌙'; // Switch between moon and sun icons 11 | } 12 | 13 | // Function to immediately apply saved theme without icon update 14 | function applySavedThemeImmediately() { 15 | const savedTheme = localStorage.getItem('theme'); 16 | if (savedTheme === 'dark') { 17 | document.documentElement.setAttribute('data-theme', 'dark'); 18 | } else { 19 | document.documentElement.removeAttribute('data-theme'); // Default to light theme 20 | } 21 | } 22 | 23 | // Function to apply theme-icon update 24 | function setThemeIcon() { 25 | const savedTheme = localStorage.getItem('theme'); 26 | if (savedTheme === 'dark') { 27 | document.getElementById('theme-icon').textContent = '☀️'; // Sun icon for dark mode 28 | } else { 29 | document.getElementById('theme-icon').textContent = '🌙'; // Moon icon for light mode 30 | } 31 | } 32 | 33 | // Immediately apply the saved theme to avoid flickering 34 | applySavedThemeImmediately(); 35 | 36 | // Apply theme when the page loads 37 | document.addEventListener('DOMContentLoaded', setThemeIcon); 38 | -------------------------------------------------------------------------------- /php/session/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nextcloud/all-in-one/81fec27c1499c9d93cec173d86e361d5d63cf260/php/session/.gitkeep -------------------------------------------------------------------------------- /php/src/Auth/AuthManager.php: -------------------------------------------------------------------------------- 1 | configurationManager->GetPassword(), $password); 19 | } 20 | 21 | public function CheckToken(string $token) : bool { 22 | return hash_equals($this->configurationManager->GetToken(), $token); 23 | } 24 | 25 | public function SetAuthState(bool $isLoggedIn) : void { 26 | 27 | if (!$this->IsAuthenticated() && $isLoggedIn === true) { 28 | $date = new DateTime(); 29 | $dateTime = $date->getTimestamp(); 30 | $_SESSION['date_time'] = $dateTime; 31 | 32 | $df = disk_free_space(DataConst::GetSessionDirectory()); 33 | if ($df !== false && (int)$df < 10240) { 34 | error_log(DataConst::GetSessionDirectory() . " has only less than 10KB free space. The login might not succeed because of that!"); 35 | } 36 | 37 | file_put_contents(DataConst::GetSessionDateFile(), (string)$dateTime); 38 | } 39 | 40 | $_SESSION[self::SESSION_KEY] = $isLoggedIn; 41 | } 42 | 43 | public function IsAuthenticated() : bool { 44 | return isset($_SESSION[self::SESSION_KEY]) && $_SESSION[self::SESSION_KEY] === true; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /php/src/Container/AioVariables.php: -------------------------------------------------------------------------------- 1 | variables[] = $variable; 11 | } 12 | 13 | /** 14 | * @return string[] 15 | */ 16 | public function GetVariables() : array { 17 | return $this->variables; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /php/src/Container/ContainerEnvironmentVariables.php: -------------------------------------------------------------------------------- 1 | variables[] = $variable; 11 | } 12 | 13 | /** 14 | * @return string[] 15 | */ 16 | public function GetVariables() : array { 17 | return $this->variables; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /php/src/Container/ContainerPort.php: -------------------------------------------------------------------------------- 1 | ports[] = $port; 11 | } 12 | 13 | /** 14 | * @return ContainerPort[] 15 | */ 16 | public function GetPorts() : array { 17 | return $this->ports; 18 | } 19 | } -------------------------------------------------------------------------------- /php/src/Container/ContainerState.php: -------------------------------------------------------------------------------- 1 | volumes[] = $volume; 11 | } 12 | 13 | /** 14 | * @return ContainerVolume[] 15 | */ 16 | public function GetVolumes() : array { 17 | return $this->volumes; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /php/src/Container/VersionState.php: -------------------------------------------------------------------------------- 1 | dockerActionManager->isLoginAllowed()) { 21 | $response->getBody()->write("The login is blocked since Nextcloud is running."); 22 | return $response->withHeader('Location', '/')->withStatus(422); 23 | } 24 | $password = $request->getParsedBody()['password'] ?? ''; 25 | if($this->authManager->CheckCredentials($password)) { 26 | $this->authManager->SetAuthState(true); 27 | return $response->withHeader('Location', '/')->withStatus(201); 28 | } 29 | 30 | $response->getBody()->write("The password is incorrect."); 31 | return $response->withHeader('Location', '/')->withStatus(422); 32 | } 33 | 34 | public function GetTryLogin(Request $request, Response $response, array $args) : Response { 35 | $token = $request->getQueryParams()['token'] ?? ''; 36 | if($this->authManager->CheckToken($token)) { 37 | $this->authManager->SetAuthState(true); 38 | return $response->withHeader('Location', '/')->withStatus(302); 39 | } 40 | 41 | return $response->withHeader('Location', '/')->withStatus(302); 42 | } 43 | 44 | public function Logout(Request $request, Response $response, array $args) : Response 45 | { 46 | $this->authManager->SetAuthState(false); 47 | return $response 48 | ->withHeader('Location', '/') 49 | ->withStatus(302); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /php/src/Cron/BackupNotification.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Docker\DockerActionManager::class); 15 | /** @var \AIO\ContainerDefinitionFetcher $containerDefinitionFetcher */ 16 | $containerDefinitionFetcher = $container->get(\AIO\ContainerDefinitionFetcher::class); 17 | 18 | $id = 'nextcloud-aio-nextcloud'; 19 | $nextcloudContainer = $containerDefinitionFetcher->GetContainerById($id); 20 | 21 | $backupExitCode = $dockerActionManger->GetBackupcontainerExitCode(); 22 | 23 | if ($backupExitCode === 0) { 24 | if (getenv('SEND_SUCCESS_NOTIFICATIONS') === "0") { 25 | error_log("Daily backup successful! Only logging successful backup and not sending backup notification since that has been disabled! You can get further info by looking at the backup logs in the AIO interface."); 26 | } else { 27 | $dockerActionManger->sendNotification($nextcloudContainer, 'Daily backup successful!', 'You can get further info by looking at the backup logs in the AIO interface.'); 28 | } 29 | } 30 | 31 | if ($backupExitCode > 0) { 32 | $dockerActionManger->sendNotification($nextcloudContainer, 'Daily backup failed!', 'You can get further info by looking at the backup logs in the AIO interface.'); 33 | } 34 | -------------------------------------------------------------------------------- /php/src/Cron/CheckBackup.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 15 | 16 | // Stop container and start backup check 17 | $dockerController->checkBackup(); 18 | -------------------------------------------------------------------------------- /php/src/Cron/CheckFreeDiskSpace.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Docker\DockerActionManager::class); 16 | /** @var \AIO\ContainerDefinitionFetcher $containerDefinitionFetcher */ 17 | $containerDefinitionFetcher = $container->get(\AIO\ContainerDefinitionFetcher::class); 18 | 19 | $id = 'nextcloud-aio-nextcloud'; 20 | $nextcloudContainer = $containerDefinitionFetcher->GetContainerById($id); 21 | 22 | $df = disk_free_space(DataConst::GetDataDirectory()); 23 | if ($df !== false && (int)$df < 1024 * 1024 * 1024 * 5) { 24 | error_log("The drive that hosts the mastercontainer volume has less than 5 GB free space. Container updates and backups might not succeed due to that!"); 25 | $dockerActionManger->sendNotification($nextcloudContainer, 'Low on space!', 'The drive that hosts the mastercontainer volume has less than 5 GB free space. Container updates and backups might not succeed due to that!'); 26 | } 27 | -------------------------------------------------------------------------------- /php/src/Cron/CreateBackup.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 15 | 16 | // Stop container and start backup 17 | $dockerController->startBackup(); 18 | -------------------------------------------------------------------------------- /php/src/Cron/OutdatedNotification.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Docker\DockerActionManager::class); 15 | /** @var \AIO\ContainerDefinitionFetcher $containerDefinitionFetcher */ 16 | $containerDefinitionFetcher = $container->get(\AIO\ContainerDefinitionFetcher::class); 17 | 18 | $id = 'nextcloud-aio-nextcloud'; 19 | $nextcloudContainer = $containerDefinitionFetcher->GetContainerById($id); 20 | 21 | $isNextcloudImageOutdated = $dockerActionManger->isNextcloudImageOutdated(); 22 | 23 | if ($isNextcloudImageOutdated === true) { 24 | $dockerActionManger->sendNotification($nextcloudContainer, 'AIO is outdated!', 'Please open the AIO interface or ask an administrator to update it. If you do not want to do it manually each time, you can enable the daily backup feature from the AIO interface which automatically updates all containers.', '/notify-all.sh'); 25 | } 26 | 27 | -------------------------------------------------------------------------------- /php/src/Cron/StartAndUpdateContainers.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 18 | 19 | // Start apache 20 | $dockerController->startTopContainer(true); 21 | -------------------------------------------------------------------------------- /php/src/Cron/StartContainers.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 18 | 19 | // Start apache 20 | $dockerController->startTopContainer(false); 21 | -------------------------------------------------------------------------------- /php/src/Cron/StopContainers.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 15 | 16 | // Start apache 17 | $dockerController->stopTopContainer(); 18 | -------------------------------------------------------------------------------- /php/src/Cron/UpdateMastercontainer.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Controller\DockerController::class); 15 | 16 | # Update the mastercontainer 17 | $dockerController->startWatchtower(); 18 | -------------------------------------------------------------------------------- /php/src/Cron/UpdateNotification.php: -------------------------------------------------------------------------------- 1 | get(\AIO\Docker\DockerActionManager::class); 15 | /** @var \AIO\ContainerDefinitionFetcher $containerDefinitionFetcher */ 16 | $containerDefinitionFetcher = $container->get(\AIO\ContainerDefinitionFetcher::class); 17 | 18 | $id = 'nextcloud-aio-nextcloud'; 19 | $nextcloudContainer = $containerDefinitionFetcher->GetContainerById($id); 20 | 21 | $isMastercontainerUpdateAvailable = $dockerActionManger->IsMastercontainerUpdateAvailable(); 22 | $isAnyUpdateAvailable = $dockerActionManger->isAnyUpdateAvailable(); 23 | 24 | if ($isMastercontainerUpdateAvailable === true) { 25 | $dockerActionManger->sendNotification($nextcloudContainer, 'Mastercontainer update available!', 'Please open your AIO interface to update it. If you do not want to do it manually each time, you can enable the daily backup feature from the AIO interface which also automatically updates the mastercontainer.'); 26 | } 27 | 28 | if ($isAnyUpdateAvailable === true) { 29 | $dockerActionManger->sendNotification($nextcloudContainer, 'Container updates available!', 'Please open your AIO interface to update them. If you do not want to do it manually each time, you can enable the daily backup feature from the AIO interface which also automatically updates your containers and your Nextcloud apps.'); 30 | } 31 | -------------------------------------------------------------------------------- /php/src/Data/DataConst.php: -------------------------------------------------------------------------------- 1 | CanBeInstalled()) { 16 | return ''; 17 | } 18 | 19 | $password = $this->passwordGenerator->GeneratePassword(8); 20 | $this->configurationManager->SetPassword($password); 21 | return $password; 22 | } 23 | 24 | public function CanBeInstalled() : bool { 25 | return !file_exists(DataConst::GetConfigFile()); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /php/src/DependencyInjection.php: -------------------------------------------------------------------------------- 1 | set( 15 | DockerHubManager::class, 16 | new DockerHubManager() 17 | ); 18 | 19 | $container->set( 20 | GitHubContainerRegistryManager::class, 21 | new GitHubContainerRegistryManager() 22 | ); 23 | 24 | $container->set( 25 | \AIO\Data\ConfigurationManager::class, 26 | new \AIO\Data\ConfigurationManager() 27 | ); 28 | $container->set( 29 | \AIO\Docker\DockerActionManager::class, 30 | new \AIO\Docker\DockerActionManager( 31 | $container->get(\AIO\Data\ConfigurationManager::class), 32 | $container->get(\AIO\ContainerDefinitionFetcher::class), 33 | $container->get(DockerHubManager::class), 34 | $container->get(GitHubContainerRegistryManager::class) 35 | ) 36 | ); 37 | $container->set( 38 | \AIO\Auth\PasswordGenerator::class, 39 | new \AIO\Auth\PasswordGenerator() 40 | ); 41 | $container->set( 42 | \AIO\Auth\AuthManager::class, 43 | new \AIO\Auth\AuthManager($container->get(\AIO\Data\ConfigurationManager::class)) 44 | ); 45 | $container->set( 46 | \AIO\Data\Setup::class, 47 | new \AIO\Data\Setup( 48 | $container->get(\AIO\Auth\PasswordGenerator::class), 49 | $container->get(\AIO\Data\ConfigurationManager::class) 50 | ) 51 | ); 52 | 53 | return $container; 54 | } 55 | } -------------------------------------------------------------------------------- /php/src/Middleware/AuthMiddleware.php: -------------------------------------------------------------------------------- 1 | getUri()->getPath(), $publicRoutes)) { 28 | if(!$this->authManager->IsAuthenticated()) { 29 | $status = 302; 30 | $headers = ['Location' => '/']; 31 | $response = new Response($status, $headers); 32 | return $response; 33 | } 34 | } 35 | 36 | $response = $handler->handle($request); 37 | return $response; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /php/src/Twig/ClassExtension.php: -------------------------------------------------------------------------------- 1 | csrf->getTokenNameKey(); 19 | $csrfValueKey = $this->csrf->getTokenValueKey(); 20 | $csrfName = $this->csrf->getTokenName(); 21 | $csrfValue = $this->csrf->getTokenValue(); 22 | 23 | return [ 24 | 'csrf' => [ 25 | 'keys' => [ 26 | 'name' => $csrfNameKey, 27 | 'value' => $csrfValueKey 28 | ], 29 | 'name' => $csrfName, 30 | 'value' => $csrfValue 31 | ] 32 | ]; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /php/templates/already-installed.twig: -------------------------------------------------------------------------------- 1 | {% extends "layout.twig" %} 2 | 3 | {% block body %} 4 | 13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /php/templates/components/container-state.twig: -------------------------------------------------------------------------------- 1 | {# @var c \App\Containers\Container #} 2 |
  • 3 | 4 | {% if c.GetStartingState().value == 'starting' %} 5 | 6 | {{ c.GetDisplayName() }} 7 | (Starting) 8 | {% elseif c.GetRunningState().value == 'running' %} 9 | 10 | {{ c.GetDisplayName() }} 11 | (Running) 12 | {% else %} 13 | 14 | {{ c.GetDisplayName() }} 15 | (Stopped) 16 | {% endif %} 17 | {% if c.GetDocumentation() != '' %} 18 | (docs) 19 | {% endif %} 20 | 21 | {% if c.GetUiSecret() != '' %} 22 |
    23 | Show password for {{ c.GetDisplayName() }} 24 | 25 |
    26 | {% endif %} 27 |
  • -------------------------------------------------------------------------------- /php/templates/includes/backup-dirs.twig: -------------------------------------------------------------------------------- 1 |

    The folder path that you enter must start with / and must not end with /.

    2 |

    An example for Linux is /mnt/backup.

    3 |

    On Synology it could be /volume1/docker/nextcloud/backup.

    4 |

    For macOS it may be /var/backup.

    5 |

    On Windows it might be /run/desktop/mnt/host/c/backup. (This path is equivalent to 'C:\backup' on your Windows host so you need to translate the path accordingly. Hint: the path that you enter needs to start with '/run/desktop/mnt/host/'. Append to that the exact location on your windows host, e.g. 'c/backup' which is equivalent to 'C:\backup'.) ⚠️ Please note: This does not work with external drives like USB or network drives and only with internal drives like SATA or NVME drives.

    6 |

    Another option is to enter a specific volume name here: nextcloud_aio_backupdir. This volume needs to be created beforehand manually by you in order to be able to use it. See this documentation for an example.

    7 | -------------------------------------------------------------------------------- /php/templates/layout.twig: -------------------------------------------------------------------------------- 1 | 2 | 3 | AIO 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
    12 | {% block body %}{% endblock %} 13 |
    14 |
    15 |
    16 |
    17 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /php/templates/login.twig: -------------------------------------------------------------------------------- 1 | {% extends "layout.twig" %} 2 | 3 | {% block body %} 4 | 24 | 25 | {% endblock %} 26 | -------------------------------------------------------------------------------- /php/templates/setup.twig: -------------------------------------------------------------------------------- 1 | {% extends "layout.twig" %} 2 | 3 | {% block body %} 4 | 16 | {% endblock %} 17 | -------------------------------------------------------------------------------- /php/tests/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Playwright 3 | node_modules/ 4 | /test-results/ 5 | /playwright-report/ 6 | /blob-report/ 7 | /playwright/.cache/ 8 | -------------------------------------------------------------------------------- /php/tests/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nextcloud-aio-mastercontainer-tests", 3 | "version": "1.0.0", 4 | "license": "AGPL-3.0-or-later", 5 | "devDependencies": { 6 | "@playwright/test": "^1.51.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /php/tests/playwright.config.js: -------------------------------------------------------------------------------- 1 | import { defineConfig, devices } from '@playwright/test' 2 | 3 | /** 4 | * @see https://playwright.dev/docs/test-configuration 5 | */ 6 | export default defineConfig({ 7 | testDir: './tests', 8 | fullyParallel: false, 9 | forbidOnly: !!process.env.CI, 10 | retries: 0, 11 | workers: 1, 12 | reporter: [ 13 | ['list'], 14 | ['html'], 15 | ], 16 | use: { 17 | baseURL: process.env.BASE_URL ?? 'http://localhost:8080', 18 | trace: 'on', 19 | }, 20 | projects: [ 21 | { 22 | name: 'chromium', 23 | use: { 24 | ...devices['Desktop Chrome'], 25 | ignoreHTTPSErrors: true, 26 | }, 27 | }, 28 | ], 29 | }) 30 | -------------------------------------------------------------------------------- /tests/QA/001-initial-setup.md: -------------------------------------------------------------------------------- 1 | # Initial setup 2 | 3 | - [ ] Verify that after starting the test container, you can access the AIO interface using https://internal.ip.address:8080 4 | - [ ] After clicking the self-signed-certificate warning away, it should show the setup page with an explanation what AIO is and the initial password and a button that contains a link to the AIO login page 5 | - [ ] After copying the password and clicking on this button, it should open a new tab with the login page 6 | - [ ] The login page should show an input field that allows to enter the AIO password and a `Log in` button 7 | - [ ] After pasting the new password into the input field and clicking on this button button, you should be logged in 8 | - [ ] You should now see the containers page and you should see three sections: one general section which explains what AIO is, one `New AIO instance` section and one section that allows to restore the whole AIO instance from backup. 9 | 10 | You can now continue with [002-new-instance.md](./002-new-instance.md) or [010-restore-instance.md](./010-restore-instance.md). 11 | -------------------------------------------------------------------------------- /tests/QA/003-automatic-login.md: -------------------------------------------------------------------------------- 1 | # Automatic login 2 | 3 | - [ ] After you log in to Nextcloud using the provided initial credentials, open https://yourdomain.com/settings/admin/overview 4 | - [ ] There you should see a Nextcloud AIO section and a button that allows to log into the AIO interface. 5 | - [ ] Clicking on this button should open the AIO interface in a new tab and should automatically log you in 6 | - [ ] All sessions in other tabs that are currently open should be closed (you can verify by reloading all other AIO tabs) 7 | 8 | You can now continue with [004-initial-backup.md](./004-initial-backup.md). -------------------------------------------------------------------------------- /tests/QA/004-initial-backup.md: -------------------------------------------------------------------------------- 1 | # Initial backup 2 | 3 | - [ ] In the Backup and restore section, you should now see and input box where you should type in the path where the backup should get created and some explanation below 4 | - [ ] Enter `/` which should send an error 5 | - [ ] Enter `/mnt/` or `/media/` or `/host_mnt/` or `/var/backups/` should send an error as well 6 | - [ ] Accepted should be `/mnt/backup`, `/media/backup`, `/host_mnt/c/backup` and `/var/backups`. 7 | - [ ] The side should now reload 8 | - [ ] The initial Nextcloud credentials on top of the page that are visible when the containers are running should now be hidden in a details tag 9 | - [ ] In the Backup restore section you should now see a Backup information section with important info like the encryption password, the backup location and more. 10 | - [ ] Also you should see a Backup cretion section that contains a `Create backup` button. 11 | - [ ] Clicking on the `Create backup` button should open a window prompt that allows to cancel the operation. 12 | - [ ] Canceling should return to the website, confirming should reveal the big spinner again which should block the website again. 13 | - [ ] After a while you should see the information that Backup container is currently running 14 | - [ ] Below the Containers section you should see the option to `Start containers` again. 15 | - [ ] After a while and a few automatic reloads (as long as the side is focused), you should be redirected to the usual page and seen in the Backup and restore section that the last backup was successful. 16 | - [ ] Below thhat you should see a details tag that allows to reveal all backup options 17 | 18 | You can now continue with [020-backup-and-restore.md](.//020-backup-and-restore.md) -------------------------------------------------------------------------------- /tests/QA/020-backup-and-restore.md: -------------------------------------------------------------------------------- 1 | # Backup and restore 2 | 3 | - [ ] Expanding all backup options in the Backup and restore sectioin should reveal a Backup information section, Backup creation section, Backup check section, Backup restore section and a Daily backup section. 4 | - [ ] The backup restore section should list all available backup archives and list them from most recent to least recent. 5 | - [ ] Clicking on either option of Create backup, Check backup integrity or Restore selected backup should run the corresponding action and report after a while in the last check, backup or restore was successful. 6 | - [ ] Daily backup creatio should allow to enter a time in 24h format e.g. `04:00` should be accepted, `24:00` or `dfjlk` not. 7 | - [ ] Submitting a time here should reload the page and reveal at the same place the option to delete the setting again. 8 | - [ ] When the time of the automatic backup has come (you can test it by choosing a time that is e.g. only a minute away), it should automatically log you out (you can verify by reloading) and after you log in again you should see that the automatic backup is currently running. 9 | - [ ] After a while you should see that your container are starting and in the Backup and restore section you should see that the backup was successful 10 | - [ ] When entering additional backup directories, it should allow e.g. `/etc` and `nextcloud_aio_mastercontainer` but not `nextcloud/test`. Running a backup with this should back up these directories/volumes successfully. 11 | 12 | You can now continue with [030-aio-password-change.md](./030-aio-password-change.md) -------------------------------------------------------------------------------- /tests/QA/030-aio-password-change.md: -------------------------------------------------------------------------------- 1 | # AIO password change 2 | 3 | - [ ] In the AIO password change section you should see two input fields. And below the requirements for a new password 4 | - [ ] When entering nothing it should report that you need to enter your current aio password 5 | - [ ] When entering a false password, it should report that to you 6 | - [ ] After entering your current password and leaving the new password empty it should report that you need to enter a new password 7 | - [ ] After entering a new passwort shorter than 24 characters or not allowed characters, it should report that the password requirements are not met. 8 | - [ ] `sdfjlksj` should not be accepted 9 | - [ ] `jdsfklöjiroewoäsadjkfölk` should not be accepted 10 | - [ ] `sdjlfj SDJFLK 32489 sdjklf` should which should reload the page 11 | 12 | You can now continue with [040-login-behavior.md](./040-login-behavior.md) -------------------------------------------------------------------------------- /tests/QA/040-login-behavior.md: -------------------------------------------------------------------------------- 1 | # Login behavior 2 | 3 | - [ ] When opening the AIO interface in a new tab while the apache container is running, it should report on the login page that Nextcloud is running and you should use the automatic login 4 | - [ ] When the apache container is stopped, you should see here an input field that allows you to enter the AIO password which should log you in 5 | - [ ] Starting and stopping the containers multiple times should every time produce a new token that is used in the admin overview in Nextcloud as link in the button to log you into the AIO interface. (see [003-automatic-login.md](./003-automatic-login.md)) 6 | 7 | You can now continue with [050-optional-addons.md](./050-optional-addons.md) -------------------------------------------------------------------------------- /tests/QA/050-optional-addons.md: -------------------------------------------------------------------------------- 1 | # Optional addons 2 | 3 | - [ ] Close to the bottom of the page in the AIO interface, you should see the optional addons section 4 | - [ ] You should be able to change optional addons when containers are stopped and not change them when containers are running 5 | - [ ] Enabling either of the options should start a new container with the same or comparable name and should also list them in the containers section 6 | - [ ] After all containers are started with the new config active, you should verify that the options were automatically activated/deactivated. 7 | - [ ] ClamAV by trying to upload a testvirus to Nextcloud https://www.eicar.org/?page_id=3950 8 | - [ ] Collabora by trying to open a .docx or .odt file in Nextcloud 9 | - [ ] Nextcloud Talk by opening the Talk app in Nextcloud, creating a new chat and trying to join a call in this chat. Also verifying in the settings that the HPB and turn server work. 10 | - [ ] Imaginary by having a look if when uploading a new picture in Nextcloud, it adds some log entries to the container 11 | - [ ] Fulltextsearch by trying to search for a heading inside a file in Nextcloud 12 | - [ ] Talk-recording by starting a call and trying to record something 13 | - [ ] When Collabora is enabled, it should show below the Optional Addons section a section where you can change the dictionaries for collabora. `de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru` should be a valid setting. E.g. `de.De` not. If already set, it should show a button that allows to remove the setting again. 14 | 15 | You can now continue with [060-environmental-variables.md](./060-environmental-variables.md) -------------------------------------------------------------------------------- /tests/QA/070-timezone-change.md: -------------------------------------------------------------------------------- 1 | # Timezone change 2 | 3 | - [ ] At the very bottom of the page you should see the timezone change section 4 | - [ ] When the containers are stopped, you should be able to change it and set/reset it 5 | - [ ] If not already set, it should show an input field where you can enter a timezone 6 | - [ ] `Europe/Berlin` should be accepted, e.g. `Europe Berlin` not 7 | - [ ] When it is set, it should show that it is set to which timezone and display a button that allows to reset it again which does this on a press 8 | - [ ] When it is set, running `date` inside Nextcloud related containers should return the correct timezone 9 | 10 | You can now continue with [080-daily-backup-script.md](./080-daily-backup-script.md) -------------------------------------------------------------------------------- /tests/QA/080-daily-backup-script.md: -------------------------------------------------------------------------------- 1 | # Daily backup script 2 | 3 | The script is delivered within the mastercontainer and allows to run a few things like daily backup and container updates from an external script. 4 | 5 | You can find the documentation on this here which needs to work as documented: https://github.com/nextcloud/all-in-one#how-to-stopstartupdate-containers-or-trigger-the-daily-backup-from-a-script-externally 6 | -------------------------------------------------------------------------------- /tests/QA/assets/backup-archive/readme.md: -------------------------------------------------------------------------------- 1 | # Backup archive 2 | 3 | The backup archive was moved here because of Git LFS limitations: 4 | https://cloud.nextcloud.com/s/m5DF3AjRs72kWKY 5 | -------------------------------------------------------------------------------- /tests/QA/readme.md: -------------------------------------------------------------------------------- 1 | # QA test plans 2 | 3 | In this folder are manual test plans for QA located that allow to manually step through certain features and make sure that everything works as expected. 4 | 5 | For a test instance, you should make sure that all potentially breaking changes are merged, build new containers by following https://github.com/nextcloud/all-in-one/blob/main/develop.md#how-to-build-new-containers, stop a potential old instance, remove it and delete all volumes. Afterwards start a new clean test instance by following https://github.com/nextcloud/all-in-one/blob/main/develop.md#developer-channel. 6 | 7 | Best is to start testing with [001-initial-setup.md](./001-initial-setup.md). 8 | --------------------------------------------------------------------------------