├── system_files_overrides
├── gdx
│ ├── .gitkeep
│ └── usr
│ │ └── share
│ │ └── ublue-os
│ │ └── user-setup.hooks.d
│ │ └── 30-gdx-vscode.sh
├── x86_64
│ └── .gitkeep
├── aarch64-dx
│ └── .gitkeep
├── aarch64-gdx
│ ├── .gitkeep
│ └── usr
│ │ └── share
│ │ └── ublue-os
│ │ ├── gdx-demo
│ │ ├── bench-container
│ │ │ └── Dockerfile
│ │ └── ramalama
│ │ │ ├── ramalama-serve-ampere.py
│ │ │ └── demo-ai-server.py
│ │ └── just
│ │ └── 66-ampere.just
├── aarch64
│ └── .gitkeep
├── dx
│ ├── usr
│ │ ├── bin
│ │ │ └── .gitkeep
│ │ └── share
│ │ │ └── ublue-os
│ │ │ ├── user-setup.hooks.d
│ │ │ └── 11-vscode.sh
│ │ │ └── privileged-setup.hooks.d
│ │ │ └── 20-dx.sh
│ └── etc
│ │ └── skel
│ │ └── .config
│ │ └── Code
│ │ └── User
│ │ └── settings.json
├── x86_64-dx
│ └── .gitkeep
└── x86_64-gdx
│ └── .gitkeep
├── .github
├── FUNDING.yml
├── semantic.yaml
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── feature_request.yml
│ ├── pin.yml
│ └── bug-report.yml
├── workflows
│ ├── content-filter.yaml
│ ├── build-regular.yml
│ ├── build-dx.yml
│ ├── build-gdx.yml
│ ├── validate-renovate.yaml
│ ├── build-regular-hwe.yml
│ ├── build-dx-hwe.yml
│ ├── generate-changelog-release.yml
│ ├── build-iso.yml
│ └── reusable-build-image.yml
├── renovate.json5
├── changelog_config.yaml
└── changelogs.py
├── CONTRIBUTING.md
├── system_files
├── etc
│ ├── ublue-os
│ │ ├── bling.json
│ │ ├── setup.json
│ │ ├── changelog.json
│ │ ├── rebase_helper.json
│ │ └── system-flatpaks.list
│ ├── systemd
│ │ └── zram-generator.conf
│ ├── environment
│ ├── dnf
│ │ └── dnf.conf
│ ├── rpm-ostreed.conf
│ └── firewalld
│ │ └── firewalld.conf
└── usr
│ ├── share
│ ├── ublue-os
│ │ ├── flatpak-overrides
│ │ │ └── io.github.kolunmi.Bazaar
│ │ ├── firefox-config
│ │ │ └── 01-bluefin-global.js
│ │ ├── user-setup.hooks.d
│ │ │ ├── 99-privileged.sh
│ │ │ └── 10-theming.sh
│ │ ├── privileged-setup.hooks.d
│ │ │ ├── 20-atd-file-fix.sh
│ │ │ ├── 10-tailscale.sh
│ │ │ └── 99-flatpaks.sh
│ │ ├── motd
│ │ │ ├── tips
│ │ │ │ ├── 20-bluefin.md
│ │ │ │ └── 10-tips.md
│ │ │ └── template.md
│ │ └── system-setup.hooks.d
│ │ │ └── 10-framework.sh
│ └── fish
│ │ └── vendor_functions.d
│ │ └── fish_prompt.fish
│ ├── lib
│ ├── tmpfiles.d
│ │ └── bazaar-flatpak-permission.conf
│ ├── systemd
│ │ ├── system
│ │ │ ├── dconf-update.service
│ │ │ ├── ublue-countme.timer
│ │ │ └── ublue-countme.service
│ │ └── user
│ │ │ └── bazaar.service
│ └── firewalld
│ │ └── zones
│ │ └── Workstation.xml
│ └── libexec
│ └── ublue-ts-exit-node
├── image.toml
├── cosign.pub
├── .gitignore
├── artifacthub-repo.yml
├── image-versions.yaml
├── .gitattributes
├── iso.toml
├── Containerfile
├── README.md
├── AGENTS.md
├── LICENSE
└── Justfile
/system_files_overrides/gdx/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/x86_64/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-dx/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-gdx/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/dx/usr/bin/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/x86_64-dx/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/system_files_overrides/x86_64-gdx/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | github: [castrojo, tulilirockz]
2 |
--------------------------------------------------------------------------------
/.github/semantic.yaml:
--------------------------------------------------------------------------------
1 | enabled: true
2 | titleOnly: true
3 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @castrojo @tulilirockz
2 |
3 | image-versions.yml
4 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | See here:
2 |
3 | ## https://docs.projectbluefin.io/contributing
4 |
--------------------------------------------------------------------------------
/system_files/etc/ublue-os/bling.json:
--------------------------------------------------------------------------------
1 | {
2 | "bling-cli-name": "bluefin-cli"
3 | }
4 |
--------------------------------------------------------------------------------
/system_files/etc/ublue-os/setup.json:
--------------------------------------------------------------------------------
1 | {
2 | "check-secureboot": false,
3 | "setup-version": 4
4 | }
5 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/flatpak-overrides/io.github.kolunmi.Bazaar:
--------------------------------------------------------------------------------
1 | [Context]
2 | filesystems=host-etc
3 |
--------------------------------------------------------------------------------
/system_files/etc/systemd/zram-generator.conf:
--------------------------------------------------------------------------------
1 | [zram0]
2 | zram-size = min(ram / 2, 4096)
3 | compression-algorithm = zstd
4 |
--------------------------------------------------------------------------------
/system_files/etc/ublue-os/changelog.json:
--------------------------------------------------------------------------------
1 | {
2 | "target-url": "https://api.github.com/repos/ublue-os/bluefin-lts/releases/latest"
3 | }
4 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/firefox-config/01-bluefin-global.js:
--------------------------------------------------------------------------------
1 | // Bluefin Global
2 | pref("gfx.webrender.all", true);
3 | pref("media.ffmpeg.vaapi.enabled", true);
4 |
--------------------------------------------------------------------------------
/image.toml:
--------------------------------------------------------------------------------
1 | [[customizations.user]]
2 | name = "centos"
3 | password = "centos"
4 | groups = ["wheel"]
5 |
6 | [[customizations.filesystem]]
7 | mountpoint = "/"
8 | minsize = "20 GiB"
9 |
--------------------------------------------------------------------------------
/system_files/etc/ublue-os/rebase_helper.json:
--------------------------------------------------------------------------------
1 | {
2 | "dx-helper-enabled": true,
3 | "image-base-name": "bluefin",
4 | "available-tags": ["lts", "stream10"],
5 | "image-date-separator": "-"
6 | }
7 |
--------------------------------------------------------------------------------
/cosign.pub:
--------------------------------------------------------------------------------
1 | -----BEGIN PUBLIC KEY-----
2 | MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEHLRpBfPRYiMl9wb7s6fx47PzzNWu
3 | 3zyJgXhWEvxoOgwv9CpwjbvUwR9qHxNMWkJhuGE6cjDA2hpy1I6NbA+24Q==
4 | -----END PUBLIC KEY-----
5 |
--------------------------------------------------------------------------------
/system_files/etc/environment:
--------------------------------------------------------------------------------
1 | # NOTE: Necessary due to cursor being laggy with VRR on GNOME 47
2 | MUTTER_DEBUG_FORCE_KMS_MODE=simple
3 | # NOTE: Makes GNOME slightly faster
4 | GNOME_SHELL_SLOWDOWN_FACTOR=0.8
5 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/user-setup.hooks.d/99-privileged.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | echo "Running all privileged units"
6 |
7 | pkexec /usr/libexec/ublue-privileged-setup
8 |
--------------------------------------------------------------------------------
/system_files_overrides/dx/etc/skel/.config/Code/User/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "window.titleBarStyle": "custom",
3 | "editor.fontFamily": "'Cascadia Code', 'Droid Sans Mono', 'monospace', monospace",
4 | "update.mode": "none"
5 | }
6 |
--------------------------------------------------------------------------------
/system_files/etc/dnf/dnf.conf:
--------------------------------------------------------------------------------
1 | [main]
2 | gpgcheck=1
3 | installonly_limit=3
4 | clean_requirements_on_remove=True
5 | best=False
6 | skip_if_unavailable=True
7 | countme=true
8 | deltarpm=true
9 | fastestmirror=true
10 | max_parallel_downloads=10
--------------------------------------------------------------------------------
/system_files/usr/lib/tmpfiles.d/bazaar-flatpak-permission.conf:
--------------------------------------------------------------------------------
1 | # This overrides the permissions for bazaar flatpak by default
2 | L /var/lib/flatpak/overrides/io.github.kolunmi.Bazaar - - - - /usr/share/ublue-os/flatpak-overrides/io.github.kolunmi.Bazaar
3 |
--------------------------------------------------------------------------------
/system_files/usr/lib/systemd/system/dconf-update.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Update the dconf database onboot
3 | Documentation=https://github.com/coreos/rpm-ostree/issues/1944
4 |
5 | [Service]
6 | Type=oneshot
7 | ExecStart=/usr/bin/dconf update
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | cosign.key
2 | output/
3 | *.raw
4 | *.qcow2
5 | __pycache__/
6 | *.pyc
7 |
8 | # IDE
9 | .idea/
10 | .vscode/
11 | .zed/
12 |
13 | # Changelog generation temporary files
14 | changelog.md
15 | output.env
16 | github_output.txt
17 |
18 | # IDE
19 | .idea/
20 | *.iml
21 | .vscode/
22 | .zed/
23 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/privileged-setup.hooks.d/20-atd-file-fix.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script tailscale-lts privileged 1 || exit 0
6 |
7 | set -xeuo pipefail
8 |
9 | # Create the folder for at jobs
10 | sudo mkdir -p /var/spool/at
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/motd/tips/20-bluefin.md:
--------------------------------------------------------------------------------
1 | The Project Bluefin team will use this banner to share important information and occasional tips
2 | Use `Super`-`Space` to run Search Light (Super is your Windows key!)
3 | Support [Bluefin's Paleoartists](https://universal-blue.discourse.group/docs?topic=299)
4 |
5 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/privileged-setup.hooks.d/10-tailscale.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script tailscale-lts privileged 1 || exit 0
6 |
7 | set -xeuo pipefail
8 |
9 | tailscale set --operator="$(getent passwd "$PKEXEC_UID" | cut -d: -f1)"
10 |
--------------------------------------------------------------------------------
/artifacthub-repo.yml:
--------------------------------------------------------------------------------
1 | repositoryID: d31cdc0c-6639-475b-9a51-35af5ca3f235
2 | owners: # (optional, used to claim repository ownership)
3 | - name: Jorge Castro
4 | email: jorge.castro@gmail.com
5 | #ignore: # (optional, packages that should not be indexed by Artifact Hub)
6 | # - name: package1
7 | # - name: package2 # Exact match
8 |
--------------------------------------------------------------------------------
/system_files/usr/lib/systemd/system/ublue-countme.timer:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Weekly DNF countme statistics reporting
3 | Documentation=https://docs.fedoraproject.org/en-US/quick-docs/dnf/#countme
4 |
5 | [Timer]
6 | OnCalendar=weekly
7 | Persistent=true
8 | RandomizedDelaySec=12h
9 |
10 | [Install]
11 | WantedBy=timers.target
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: Ask Bluefin
4 | url: https://github.com/ublue-os/bluefin/discussions/categories/ask-bluefin
5 | about: Ask our trained raptor questions about Bluefin
6 | - name: Questions and Feedback
7 | url: https://universal-blue.discourse.group/c/bluefin/6
8 | about: Share tips and help others
--------------------------------------------------------------------------------
/image-versions.yaml:
--------------------------------------------------------------------------------
1 | images:
2 | - name: centos-bootc
3 | image: quay.io/centos-bootc/centos-bootc
4 | tag: c10s
5 | digest: sha256:d8bc830f27034a57d362c83b0273b6b30b74fa98d509753b07a0b37d1d5e567d
6 | - name: common
7 | image: ghcr.io/projectbluefin/common
8 | tag: latest
9 | digest: sha256:d2443dae3b956b5af024e2b4767f20e82a630a47f176391417acecd096e77183
10 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.yml linguist-detectable=true
2 | *.yml linguist-language=YAML
3 |
4 | *.yaml linguist-detectable=true
5 | *.yaml linguist-language=YAML
6 |
7 | *.just linguist-detectable=true
8 | *.just linguist-documentation=false
9 | *.just linguist-language=Just
10 |
11 | *.json linguist-detectable=true
12 | *.json linguist-documentation=false
13 | *.json linguist-language=JSON
14 |
--------------------------------------------------------------------------------
/.github/workflows/content-filter.yaml:
--------------------------------------------------------------------------------
1 | name: Check for Spammy Issue Comments
2 |
3 | on:
4 | issue_comment:
5 | types: [created, edited]
6 |
7 | permissions:
8 | issues: write
9 |
10 | jobs:
11 | comment-filter:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - name: Comment filter
15 | uses: DecimalTurn/Comment-Filter@9c95bdb06ae1dd6b8185d58f52a07a2a71e19d94 # v0.2.2
16 |
--------------------------------------------------------------------------------
/system_files_overrides/gdx/usr/share/ublue-os/user-setup.hooks.d/30-gdx-vscode.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script gdx-vscode-lts user 1 || exit 0
6 |
7 | set -xeuo pipefail
8 |
9 | # cpptools is required by nsight-vscode
10 | code --install-extension ms-vscode.cpptools
11 | code --install-extension NVIDIA.nsight-vscode-edition
12 |
--------------------------------------------------------------------------------
/system_files/usr/lib/systemd/user/bazaar.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Bazaar background service
3 | Documentation=https://github.com/kolunmi/bazaar
4 | After=graphical-session.target
5 | StartLimitBurst=10
6 |
7 | [Service]
8 | Type=oneshot
9 | RemainAfterExit=yes
10 | ExecStart=flatpak run --command=bazaar io.github.kolunmi.Bazaar --no-window
11 | StandardOutput=journal
12 |
13 | [Install]
14 | WantedBy=graphical-session.target
15 |
--------------------------------------------------------------------------------
/system_files/usr/lib/systemd/system/ublue-countme.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=DNF countme statistics reporting
3 | Documentation=https://docs.fedoraproject.org/en-US/quick-docs/dnf/#countme
4 | Wants=network-online.target
5 | After=network-online.target
6 |
7 | [Service]
8 | Type=oneshot
9 | ExecStart=/usr/bin/dnf check-update --quiet
10 | User=root
11 | # Allow the service to succeed even if check-update returns non-zero (which is normal)
12 | SuccessExitStatus=0 1 100
--------------------------------------------------------------------------------
/system_files/etc/rpm-ostreed.conf:
--------------------------------------------------------------------------------
1 | # Entries in this file show the compile time defaults.
2 | # You can change settings by editing this file.
3 | # For option meanings, see rpm-ostreed.conf(5).
4 |
5 | [Daemon]
6 | AutomaticUpdatePolicy=stage
7 |
8 | ##########
9 | # Set this to false to enable local layering with dnf
10 | # This is an unsupported configuration that can lead to upgrade issues
11 | # You should know what you're doing before setting this to `false`
12 | #
13 | # See [future link] for more information
14 | ##########
15 | LockLayering=true
16 |
--------------------------------------------------------------------------------
/system_files/usr/lib/firewalld/zones/Workstation.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | Workstation
4 | Unsolicited incoming network packets are rejected from port 1 to 1023, except for select network services. Incoming packets that are related to outgoing network connections are accepted. Outgoing network connections are allowed.
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/system_files_overrides/dx/usr/share/ublue-os/user-setup.hooks.d/11-vscode.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script vscode-lts user 1 || exit 1
6 |
7 | set -x
8 |
9 | # Setup VSCode
10 | if test ! -e "$HOME"/.config/Code/User/settings.json; then
11 | mkdir -p "$HOME"/.config/Code/User
12 | cp -f /etc/skel/.config/Code/User/settings.json "$HOME"/.config/Code/User/settings.json
13 | fi
14 |
15 | code --install-extension ms-vscode-remote.remote-containers
16 | code --install-extension ms-vscode-remote.remote-ssh
17 | code --install-extension ms-azuretools.vscode-containers
18 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | name: Request a Package
2 | description: Request an RPM package to be included in an image
3 | labels: [package-request]
4 |
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thank you for taking the time to fill out this request!
10 | - type: textarea
11 | id: describe-bug
12 | attributes:
13 | label: Describe the package
14 | description: Include why you feel this should be on the image
15 | placeholder: Tell us what you need
16 | value: "I'd like to request the package `vim` because ..."
17 | validations:
18 | required: true
--------------------------------------------------------------------------------
/iso.toml:
--------------------------------------------------------------------------------
1 | [customizations.installer.kickstart]
2 | contents = """
3 | %post
4 | bootc switch --mutate-in-place --transport registry --enforce-container-sigpolicy ghcr.io/ublue-os/bluefin:lts
5 | %end
6 | """
7 |
8 | [customizations.installer.modules]
9 | enable = [
10 | "org.fedoraproject.Anaconda.Modules.Storage"
11 | ]
12 | disable = [
13 | "org.fedoraproject.Anaconda.Modules.Network",
14 | "org.fedoraproject.Anaconda.Modules.Security",
15 | "org.fedoraproject.Anaconda.Modules.Services",
16 | "org.fedoraproject.Anaconda.Modules.Users",
17 | "org.fedoraproject.Anaconda.Modules.Subscription",
18 | "org.fedoraproject.Anaconda.Modules.Timezone"
19 | ]
20 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/motd/template.md:
--------------------------------------------------------------------------------
1 | # Welcome to Bluefin LTS
2 |
3 | `%IMAGE_NAME%:%IMAGE_TAG%`
4 |
5 | | Command | Description |
6 | | ------- | ----------- |
7 | | `ujust --choose` | Show available commands |
8 | | `ujust toggle-user-motd` | Toggle this banner on/off |
9 | | `ujust bluefin-cli` | Enable terminal bling |
10 | | `brew help` | Manage command line packages |
11 |
12 | %TIP%
13 |
14 | - **** [Issues](https://issues.projectbluefin.io)
15 | - **** [Documentation](http://docs.projectbluefin.io/)
16 | - **** [Discuss](https://community.projectbluefin.io/)
17 | - **** [Leave Feedback](https://feedback.projectbluefin.io)
18 |
19 | %KEY_WARN%
20 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/privileged-setup.hooks.d/99-flatpaks.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script flatpaks-lts privileged 1 || exit 0
6 |
7 | set -x
8 |
9 | # Set up Firefox default configuration
10 | ARCH=$(arch)
11 | if [ "$ARCH" != "aarch64" ] ; then
12 | mkdir -p "/var/lib/flatpak/extension/org.mozilla.firefox.systemconfig/${ARCH}/stable/defaults/pref"
13 | rm -f "/var/lib/flatpak/extension/org.mozilla.firefox.systemconfig/${ARCH}/stable/defaults/pref/*bluefin*.js"
14 | /usr/bin/cp -rf /usr/share/ublue-os/firefox-config/* "/var/lib/flatpak/extension/org.mozilla.firefox.systemconfig/${ARCH}/stable/defaults/pref/"
15 | fi
16 |
--------------------------------------------------------------------------------
/.github/workflows/build-regular.yml:
--------------------------------------------------------------------------------
1 | name: Build Bluefin LTS
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | schedule:
8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC
9 | merge_group:
10 | workflow_dispatch:
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | build:
18 | uses: ./.github/workflows/reusable-build-image.yml
19 | secrets: inherit
20 | with:
21 | image-name: bluefin
22 | rechunk: ${{ github.event_name != 'pull_request' }}
23 | sbom: ${{ github.event_name != 'pull_request' }}
24 | publish: ${{ github.event_name != 'pull_request' }}
25 |
--------------------------------------------------------------------------------
/.github/workflows/build-dx.yml:
--------------------------------------------------------------------------------
1 | name: Build Bluefin LTS DX
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | schedule:
8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC
9 | merge_group:
10 | workflow_dispatch:
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | build:
18 | uses: ./.github/workflows/reusable-build-image.yml
19 | secrets: inherit
20 | with:
21 | image-name: bluefin-dx
22 | flavor: dx
23 | rechunk: ${{ github.event_name != 'pull_request' }}
24 | sbom: ${{ github.event_name != 'pull_request' }}
25 | publish: ${{ github.event_name != 'pull_request' }}
26 |
--------------------------------------------------------------------------------
/.github/workflows/build-gdx.yml:
--------------------------------------------------------------------------------
1 | name: Build Bluefin LTS GDX
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | schedule:
8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC
9 | merge_group:
10 | workflow_dispatch:
11 |
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
14 | cancel-in-progress: true
15 |
16 | jobs:
17 | build:
18 | uses: ./.github/workflows/reusable-build-image.yml
19 | secrets: inherit
20 | with:
21 | image-name: bluefin-gdx
22 | flavor: gdx
23 | rechunk: ${{ github.event_name != 'pull_request' }}
24 | sbom: ${{ github.event_name != 'pull_request' }}
25 | publish: ${{ github.event_name != 'pull_request' }}
26 |
--------------------------------------------------------------------------------
/.github/renovate.json5:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "config:best-practices",
5 | ],
6 |
7 | "rebaseWhen": "never",
8 |
9 | "packageRules": [
10 | {
11 | "automerge": true,
12 | "matchUpdateTypes": ["pin", "pinDigest"]
13 | },
14 | {
15 | "enabled": false,
16 | "matchUpdateTypes": ["digest", "pin", "pinDigest"],
17 | "matchDepTypes": ["container"],
18 | "matchFileNames": [".github/workflows/**.yaml", ".github/workflows/**.yml"],
19 | },
20 | {
21 | "automerge": true,
22 | "matchUpdateTypes": ["digest"],
23 | "matchDepNames": [
24 | "quay.io/centos-bootc/centos-bootc",
25 | "ghcr.io/projectbluefin/common"
26 | ]
27 | }
28 | ]
29 | }
30 |
--------------------------------------------------------------------------------
/system_files_overrides/dx/usr/share/ublue-os/privileged-setup.hooks.d/20-dx.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script dx-usergroups-lts privileged 1 || exit 0
6 |
7 | # Function to append a group entry to /etc/group
8 | append_group() {
9 | local group_name="$1"
10 | if ! grep -q "^$group_name:" /etc/group; then
11 | echo "Appending $group_name to /etc/group"
12 | grep "^$group_name:" /usr/lib/group | tee -a /etc/group >/dev/null
13 | fi
14 | }
15 |
16 | # Setup Groups
17 | append_group docker
18 |
19 | # We dont have incus on the image yet
20 | # append_group incus-admin
21 | # usermod -aG incus-admin $user
22 |
23 | mapfile -t wheelarray < <(getent group wheel | cut -d ":" -f 4 | tr ',' '\n')
24 | for user in "${wheelarray[@]}"; do
25 | usermod -aG docker "$user"
26 | done
27 |
--------------------------------------------------------------------------------
/system_files/etc/ublue-os/system-flatpaks.list:
--------------------------------------------------------------------------------
1 | be.alexandervanhee.gradia
2 | com.github.rafostar.Clapper
3 | com.github.tchx84.Flatseal
4 | com.mattjakeman.ExtensionManager
5 | io.github.flattool.Ignition
6 | io.github.flattool.Warehouse
7 | io.github.kolunmi.Bazaar
8 | io.github.pwr_solaar.solaar
9 | io.gitlab.adhami3310.Impression
10 | io.missioncenter.MissionCenter
11 | it.mijorus.smile
12 | org.gnome.Calculator
13 | org.gnome.Calendar
14 | org.gnome.Characters
15 | org.gnome.Connections
16 | org.gnome.Contacts
17 | org.gnome.DejaDup
18 | org.gnome.FileRoller
19 | org.gnome.Logs
20 | org.gnome.Loupe
21 | org.gnome.Maps
22 | org.gnome.NautilusPreviewer
23 | org.gnome.Papers
24 | org.gnome.TextEditor
25 | org.gnome.Weather
26 | org.gnome.baobab
27 | org.gnome.clocks
28 | org.gnome.font-viewer
29 | org.gtk.Gtk3theme.adw-gtk3
30 | org.gtk.Gtk3theme.adw-gtk3-dark
31 | org.mozilla.Thunderbird
32 | org.mozilla.firefox
33 | page.tesk.Refine
34 |
--------------------------------------------------------------------------------
/.github/workflows/validate-renovate.yaml:
--------------------------------------------------------------------------------
1 | name: Validate Renovate Config
2 |
3 | on:
4 | pull_request:
5 | paths:
6 | - ".github/renovate.json5"
7 | - ".github/workflows/renovate.yml"
8 | push:
9 | branches:
10 | - main
11 | paths:
12 | - ".github/renovate.json5"
13 | - ".github/workflows/renovate.yml"
14 |
15 | jobs:
16 | validate:
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
21 |
22 | - name: Setup Node.js
23 | uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6
24 | with:
25 | node-version: latest
26 |
27 | - name: Install dependencies
28 | shell: bash
29 | env:
30 | RENOVATE_VERSION: latest
31 | run: npm install -g renovate@${RENOVATE_VERSION}
32 |
33 | - name: Validate Renovate config
34 | shell: bash
35 | run: renovate-config-validator --strict
36 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/user-setup.hooks.d/10-theming.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script theming-lts user 1 || exit 0
6 |
7 | set -xeuo pipefail
8 |
9 | VEN_ID="$(cat /sys/devices/virtual/dmi/id/chassis_vendor)"
10 |
11 | if [[ ":Framework:" =~ :$VEN_ID: ]]; then
12 | echo 'Setting Framework logo menu'
13 | dconf write /org/gnome/shell/extensions/Logo-menu/symbolic-icon true
14 | dconf write /org/gnome/shell/extensions/Logo-menu/menu-button-icon-image 31
15 | echo 'Setting touch scroll type'
16 | dconf write /org/gnome/desktop/peripherals/mouse/natural-scroll true
17 | if [[ $SYS_ID == "Laptop ("* ]]; then
18 | echo 'Applying font fix for Framework 13'
19 | dconf write /org/gnome/desktop/interface/text-scaling-factor 1.25
20 | fi
21 | fi
22 |
23 | SYS_ID="$(cat /sys/devices/virtual/dmi/id/product_name)"
24 |
25 | if [[ ":Thelio Astra:" =~ :$SYS_ID: ]]; then
26 | echo 'Setting Ampere Logo'
27 | dconf write /org/gnome/shell/extensions/Logo-menu/symbolic-icon true
28 | dconf write /org/gnome/shell/extensions/Logo-menu/menu-button-icon-image 32
29 | fi
30 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/pin.yml:
--------------------------------------------------------------------------------
1 | name: Pin Request
2 | description: Ask to pin a package to a specific version in order to avoid regressions
3 | body:
4 | - type: markdown
5 | attributes:
6 | value: |
7 | Thank you for taking the time to fill out this bug report! (She bites sometimes). We can pin packages to older, known working versions if there's an issue with an update in Fedora.
8 | - type: textarea
9 | id: package
10 | attributes:
11 | label: Describe the Package
12 | description: Describe the package you want pinned and why
13 | placeholder: Pin foobar to version 1.2
14 | value: "Package foobar version 1.2 blew up, we need to revert to 1.1"
15 | validations:
16 | required: true
17 | - type: textarea
18 | id: bodhi
19 | attributes:
20 | label: Bodhi Link (Optional)
21 | description: Add the bodhi link to the working version, this is very useful in order to pin a package quickly
22 | placeholder: Bodhi link
23 | value: "Pin to this version please: https://bodhi.fedoraproject.org/updates/FEDORA-2024-45d587348e"
24 | validations:
25 | required: false
26 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-gdx/usr/share/ublue-os/gdx-demo/bench-container/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:24.04@sha256:c35e29c9450151419d9448b0fd75374fec4fff364a27f176fb458d472dfc9e54
2 |
3 | RUN apt-get update && apt-get install -y --no-install-recommends \
4 | sysbench \
5 | stress-ng \
6 | p7zip-full \
7 | git \
8 | build-essential \
9 | gfortran \
10 | wget \
11 | libssl-dev \
12 | ca-certificates \
13 | libsdl1.2-dev \
14 | && rm -rf /var/lib/apt/lists/* # Clean up apt cache
15 |
16 | # c-ray
17 | WORKDIR /opt
18 | RUN git clone https://github.com/jtsiomb/c-ray.git && \
19 | cd c-ray && \
20 | make -j$(nproc)
21 |
22 | # x264 (optional, but good) - example of compiling from source
23 | RUN git clone --depth 1 https://code.videolan.org/videolan/x264.git && \
24 | cd x264 && \
25 | ./configure --enable-static --enable-shared --enable-pic --enable-lto --extra-cflags="-march=armv8-a" --extra-ldflags="-march=armv8-a" && \
26 | make -j$(nproc) && \
27 | make install
28 |
29 | # Stream (optional)
30 | WORKDIR /opt
31 | RUN wget https://www.cs.virginia.edu/stream/FTP/Code/stream.c && \
32 | gcc -O3 -fopenmp -DSTREAM_ARRAY_SIZE=100000000 -march=armv8-a -mtune=neoverse-n1 -o stream stream.c -lm
33 |
34 | CMD ["bash"]
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/motd/tips/10-tips.md:
--------------------------------------------------------------------------------
1 | Follow us on [@UniversalBlue@fosstodon.org](https://fosstodon.org/@UniversalBlue)
2 | Bluefin is your gateway to Kubernetes `kind create cluster` to [get started](https://kind.sigs.k8s.io/)
3 | Bluefin is your gateway to Cloud Native - find your flock at [landscape.cncf.io](https://l.cncf.io)
4 | Need more indepth technical information?~Check out the [Bluefin Administrator's Guide](https://docs.projectbluefin.io/administration)
5 | Like servers? Check out [ucore](https://github.com/ublue-os/ucore)
6 | Update break something? You can roll back with `sudo bootc rollback`
7 | Use `brew search` and `brew install` to install packages. Bluefin will take care of the updates automatically
8 | Use `Ctrl`-`Alt`-`T` to quickly open a terminal
9 | Tailscale is included, check out [their docs](https://tailscale.com/kb/1017/install)
10 | `ujust --choose` will show you each shortcut and the script it's running
11 | `tldr vim` will give you the basic rundown on commands for a given tool
12 | `ujust rebase-helper` can help you roll back to a specific image, or to a different channel entirely, check the docs for more info
13 | `ujust changelogs` shows a summary of the package changes since the last update
14 | Don't forget to check the [release notes](https://github.com/ublue-os/bluefin/releases)
15 | Help keep Bluefin alive and healthy, consider [donating](https://docs.projectbluefin.io/donations)
16 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: Report an issue about using Bluefin
3 | body:
4 | - type: markdown
5 | attributes:
6 | value: |
7 | Thank you for taking the time to fill out this bug report! (She bites sometimes)
8 | - type: textarea
9 | id: describe-bug
10 | attributes:
11 | label: Describe the bug
12 | description: A clear and concise description of what the bug is.
13 | placeholder: Tell us what happened! WE DO NOT SUPPORT REBASING FROM ANOTHER OS! If you went off the beaten path we cannot help you!
14 | value: "When I entered 2 + 2, I got the answer 6."
15 | validations:
16 | required: true
17 | - type: textarea
18 | id: expected-bahavior
19 | attributes:
20 | label: What did you expect to happen?
21 | description: A clear and concise description of what you expected to happen.
22 | placeholder: What were you expecting to happen?
23 | value: "I expected 2 + 2 to equal 4, but instead 2 + 2 equaled 6!"
24 | validations:
25 | required: true
26 | - type: textarea
27 | id: version
28 | attributes:
29 | label: Output of `bootc status`
30 | description: Please run `sudo bootc status` and paste the output here, it is the only way we can verify the exact image you are on for debugging purposes.
31 | render: shell
32 | - type: textarea
33 | id: groups
34 | attributes:
35 | label: Output of `groups`
36 | description: Please run `groups` and paste the output here.
37 | render: shell
38 | - type: textarea
39 | id: extra-context
40 | attributes:
41 | label: Extra information or context
42 | description: Add any other context about the problem here.
--------------------------------------------------------------------------------
/Containerfile:
--------------------------------------------------------------------------------
1 | ARG MAJOR_VERSION="${MAJOR_VERSION:-c10s}"
2 | ARG BASE_IMAGE_SHA="${BASE_IMAGE_SHA:-sha256-feea845d2e245b5e125181764cfbc26b6dacfb3124f9c8d6a2aaa4a3f91082ed}"
3 | ARG ENABLE_HWE="${ENABLE_HWE:-0}"
4 | ARG AKMODS_VERSION="${AKMODS_VERSION:-centos-10}"
5 | ARG COMMON_IMAGE_REF
6 | # Upstream mounts akmods-zfs and akmods-nvidia-open; select their tag via AKMODS_VERSION
7 | FROM ghcr.io/ublue-os/akmods-zfs:${AKMODS_VERSION} AS akmods_zfs
8 | FROM ghcr.io/ublue-os/akmods-nvidia-open:${AKMODS_VERSION} AS akmods_nvidia_open
9 | FROM ${COMMON_IMAGE_REF} AS common
10 | FROM scratch AS context
11 |
12 | COPY system_files /files
13 | COPY --from=common /system_files/shared /files
14 | COPY --from=common /system_files/bluefin /files
15 | COPY system_files_overrides /overrides
16 | COPY build_scripts /build_scripts
17 |
18 | ARG MAJOR_VERSION="${MAJOR_VERSION:-c10s}"
19 | FROM quay.io/centos-bootc/centos-bootc:$MAJOR_VERSION
20 |
21 | ARG ENABLE_DX="${ENABLE_DX:-0}"
22 | ARG ENABLE_GDX="${ENABLE_GDX:-0}"
23 | ARG ENABLE_HWE="${ENABLE_HWE:-0}"
24 | ARG IMAGE_NAME="${IMAGE_NAME:-bluefin}"
25 | ARG IMAGE_VENDOR="${IMAGE_VENDOR:-ublue-os}"
26 | ARG MAJOR_VERSION="${MAJOR_VERSION:-lts}"
27 | ARG SHA_HEAD_SHORT="${SHA_HEAD_SHORT:-deadbeef}"
28 |
29 | RUN --mount=type=tmpfs,dst=/opt \
30 | --mount=type=tmpfs,dst=/tmp \
31 | --mount=type=tmpfs,dst=/var \
32 | --mount=type=tmpfs,dst=/boot \
33 | --mount=type=bind,from=akmods_zfs,src=/rpms,dst=/tmp/akmods-zfs-rpms \
34 | --mount=type=bind,from=akmods_zfs,src=/kernel-rpms,dst=/tmp/kernel-rpms \
35 | --mount=type=bind,from=akmods_nvidia_open,src=/rpms,dst=/tmp/akmods-nvidia-open-rpms \
36 | --mount=type=bind,from=context,source=/,target=/run/context \
37 | /run/context/build_scripts/build.sh
38 |
39 | # Makes `/opt` writeable by default
40 | # Needs to be here to make the main image build strict (no /opt there)
41 | RUN rm -rf /opt && ln -s /var/opt /opt
42 |
--------------------------------------------------------------------------------
/system_files/usr/share/fish/vendor_functions.d/fish_prompt.fish:
--------------------------------------------------------------------------------
1 | function fish_prompt --description 'Default prompt with container detection'
2 | set -l last_pipestatus $pipestatus
3 | set -lx __fish_last_status $status # Export for __fish_print_pipestatus.
4 | set -l normal (set_color normal)
5 | set -q fish_color_status
6 | or set -g fish_color_status red
7 | set -g fish_color_user brgreen
8 |
9 | # Color the prompt differently when we're root
10 | set -l color_cwd $fish_color_cwd
11 | set -l suffix '>'
12 | if functions -q fish_is_root_user; and fish_is_root_user
13 | if set -q fish_color_cwd_root
14 | set color_cwd $fish_color_cwd_root
15 | end
16 | set suffix '#'
17 | end
18 |
19 | # Detect if we are in a container
20 | if test -n "$CONTAINER_ID"
21 | set -g prompt_host "[$CONTAINER_ID]"
22 | set -g prefix_icon "📦 "
23 | else
24 | set -g prompt_host "$hostname"
25 | set -g prefix_icon ""
26 | end
27 |
28 | # Write pipestatus
29 | # If the status was carried over (if no command is issued or if `set` leaves the status untouched), don't bold it.
30 | set -l bold_flag --bold
31 | set -q __fish_prompt_status_generation; or set -g __fish_prompt_status_generation $status_generation
32 | if test $__fish_prompt_status_generation = $status_generation
33 | set bold_flag
34 | end
35 | set __fish_prompt_status_generation $status_generation
36 | set -l status_color (set_color $fish_color_status)
37 | set -l statusb_color (set_color $bold_flag $fish_color_status)
38 | set -l prompt_status (__fish_print_pipestatus "[" "]" "|" "$status_color" "$statusb_color" $last_pipestatus)
39 |
40 | echo -n -s $prefix_icon (set_color $fish_color_user) "$USER" $normal "@" $prompt_host' ' (set_color $color_cwd) (prompt_pwd) $normal (fish_vcs_prompt) $normal " "$prompt_status $suffix " "
41 | end
42 |
--------------------------------------------------------------------------------
/system_files/usr/share/ublue-os/system-setup.hooks.d/10-framework.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | source /usr/lib/ublue/setup-services/libsetup.sh
4 |
5 | version-script framework-lts system 1 || exit 0
6 |
7 | set -x
8 |
9 | # GLOBAL
10 | KARGS=$(rpm-ostree kargs)
11 | NEEDED_KARGS=()
12 | echo "Current kargs: $KARGS"
13 |
14 | if [[ $KARGS =~ "nomodeset" ]]; then
15 | echo "Removing nomodeset"
16 | NEEDED_KARGS+=("--delete-if-present=nomodeset")
17 | fi
18 |
19 | if [[ ":Framework:" =~ :$VEN_ID: ]]; then
20 | if [[ "GenuineIntel" == "$CPU_VENDOR" ]]; then
21 | if [[ ! $KARGS =~ "hid_sensor_hub" ]]; then
22 | echo "Intel Framework Laptop detected, applying needed keyboard fix"
23 | NEEDED_KARGS+=("--append-if-missing=module_blacklist=hid_sensor_hub")
24 | fi
25 | fi
26 | fi
27 |
28 | #shellcheck disable=SC2128
29 | if [[ -n "$NEEDED_KARGS" ]]; then
30 | echo "Found needed karg changes, applying the following: ${NEEDED_KARGS[*]}"
31 | plymouth display-message --text="Updating kargs - Please wait, this may take a while" || true
32 | rpm-ostree kargs "${NEEDED_KARGS[*]}" --reboot || exit 1
33 | else
34 | echo "No karg changes needed"
35 | fi
36 |
37 | SYS_ID="$(cat /sys/devices/virtual/dmi/id/product_name)"
38 |
39 | # FRAMEWORK 13 AMD FIXES
40 | if [[ ":Framework:" =~ :$VEN_ID: ]]; then
41 | if [[ $SYS_ID == "Laptop 13 ("* ]]; then
42 | if [[ "AuthenticAMD" == "$CPU_VENDOR" ]]; then
43 | if [[ ! -f /etc/modprobe.d/alsa.conf ]]; then
44 | echo 'Fixing 3.5mm jack'
45 | tee /etc/modprobe.d/alsa.conf <<<"options snd-hda-intel index=1,0 model=auto,dell-headset-multi"
46 | echo 0 | tee /sys/module/snd_hda_intel/parameters/power_save
47 | fi
48 | if [[ ! -f /etc/udev/rules.d/20-suspend-fixes.rules ]]; then
49 | echo 'Fixing suspend issue'
50 | echo "ACTION==\"add\", SUBSYSTEM==\"serio\", DRIVERS==\"atkbd\", ATTR{power/wakeup}=\"disabled\"" >/etc/udev/rules.d/20-suspend-fixes.rules
51 | fi
52 | fi
53 | fi
54 | fi
55 |
--------------------------------------------------------------------------------
/.github/workflows/build-regular-hwe.yml:
--------------------------------------------------------------------------------
1 | name: Build Bluefin LTS HWE
2 |
3 | permissions:
4 | contents: read
5 | packages: write
6 | id-token: write
7 |
8 | on:
9 | pull_request:
10 | branches:
11 | - main
12 | schedule:
13 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC
14 | merge_group:
15 | workflow_dispatch:
16 |
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
19 | cancel-in-progress: true
20 |
21 | jobs:
22 | build:
23 | uses: ./.github/workflows/reusable-build-image.yml
24 | secrets: inherit
25 | permissions:
26 | contents: read
27 | packages: write
28 | id-token: write
29 | with:
30 | image-name: bluefin
31 | rechunk: ${{ github.event_name != 'pull_request' }}
32 | sbom: ${{ github.event_name != 'pull_request' }}
33 | publish: ${{ github.event_name != 'pull_request' }}
34 | hwe: true
35 | tag:
36 | runs-on: ubuntu-latest
37 | needs: build
38 | permissions:
39 | contents: read
40 | packages: write
41 | id-token: write
42 | steps:
43 | - name: Checkout code
44 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
45 |
46 | - name: Log in to GitHub Container Registry
47 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
48 | with:
49 | registry: ghcr.io
50 | username: ${{ github.actor }}
51 | password: ${{ secrets.GITHUB_TOKEN }}
52 |
53 | - name: Pull image from registry
54 | run: |
55 | echo "Pulling hwe image from registry"
56 | docker pull ghcr.io/${{ github.repository_owner }}/bluefin:lts-hwe
57 |
58 | - name: Tag image
59 | id: tag
60 | run: |
61 | echo "Tagging hwe image with testing suffix"
62 | docker tag ghcr.io/${{ github.repository_owner }}/bluefin:lts-hwe ghcr.io/${{ github.repository_owner }}/bluefin:lts-testing
63 |
64 | - name: Push tagged image
65 | if: ${{ github.event_name != 'pull_request' }}
66 | run: |
67 | echo "Pushing tagged image"
68 | docker push ghcr.io/${{ github.repository_owner }}/bluefin:lts-testing
69 |
--------------------------------------------------------------------------------
/.github/workflows/build-dx-hwe.yml:
--------------------------------------------------------------------------------
1 | name: Build Bluefin DX LTS HWE
2 |
3 | permissions:
4 | contents: read
5 | packages: write
6 | id-token: write
7 |
8 | on:
9 | pull_request:
10 | branches:
11 | - main
12 | schedule:
13 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC
14 | merge_group:
15 | workflow_dispatch:
16 |
17 | concurrency:
18 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
19 | cancel-in-progress: true
20 |
21 | jobs:
22 | build:
23 | uses: ./.github/workflows/reusable-build-image.yml
24 | secrets: inherit
25 | permissions:
26 | contents: read
27 | packages: write
28 | id-token: write
29 | with:
30 | image-name: bluefin-dx
31 | flavor: dx
32 | rechunk: ${{ github.event_name != 'pull_request' }}
33 | sbom: ${{ github.event_name != 'pull_request' }}
34 | publish: ${{ github.event_name != 'pull_request' }}
35 | hwe: true
36 | tag:
37 | runs-on: ubuntu-latest
38 | needs: build
39 | permissions:
40 | contents: read
41 | packages: write
42 | id-token: write
43 | steps:
44 | - name: Checkout code
45 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
46 |
47 | - name: Log in to GitHub Container Registry
48 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3
49 | with:
50 | registry: ghcr.io
51 | username: ${{ github.actor }}
52 | password: ${{ secrets.GITHUB_TOKEN }}
53 |
54 | - name: Pull image from registry
55 | run: |
56 | echo "Pulling hwe image from registry"
57 | docker pull ghcr.io/${{ github.repository_owner }}/bluefin:lts-hwe
58 |
59 | - name: Tag image
60 | id: tag
61 | run: |
62 | echo "Tagging hwe image with testing suffix"
63 | docker tag ghcr.io/${{ github.repository_owner }}/bluefin:lts-hwe ghcr.io/${{ github.repository_owner }}/bluefin:lts-testing
64 |
65 | - name: Push tagged image
66 | if: ${{ github.event_name != 'pull_request' }}
67 | run: |
68 | echo "Pushing tagged image"
69 | docker push ghcr.io/${{ github.repository_owner }}/bluefin:lts-testing
70 |
--------------------------------------------------------------------------------
/system_files/etc/firewalld/firewalld.conf:
--------------------------------------------------------------------------------
1 | # firewalld config file
2 |
3 | # default zone
4 | # The default zone used if an empty zone string is used.
5 | # Default: public
6 | DefaultZone=Workstation
7 |
8 | # Minimal mark
9 | # Marks up to this minimum are free for use for example in the direct
10 | # interface. If more free marks are needed, increase the minimum
11 | # Default: 100
12 | #MinimalMark=100
13 |
14 | # Clean up on exit
15 | # If set to no or false the firewall configuration will not get cleaned up
16 | # on exit or stop of firewalld
17 | # Default: yes
18 | #CleanupOnExit=yes
19 |
20 | # Lockdown
21 | # If set to enabled, firewall changes with the D-Bus interface will be limited
22 | # to applications that are listed in the lockdown whitelist.
23 | # The lockdown whitelist file is lockdown-whitelist.xml
24 | # Default: no
25 | #Lockdown=no
26 |
27 | # IPv6_rpfilter
28 | # Performs a reverse path filter test on a packet for IPv6. If a reply to the
29 | # packet would be sent via the same interface that the packet arrived on, the
30 | # packet will match and be accepted, otherwise dropped.
31 | # The rp_filter for IPv4 is controlled using sysctl.
32 | # Default: yes
33 | #IPv6_rpfilter=yes
34 |
35 | # IndividualCalls
36 | # Do not use combined -restore calls, but individual calls. This increases the
37 | # time that is needed to apply changes and to start the daemon, but is good for
38 | # debugging.
39 | # Default: no
40 | #IndividualCalls=no
41 |
42 | # LogDenied
43 | # Add logging rules right before reject and drop rules in the INPUT, FORWARD
44 | # and OUTPUT chains for the default rules and also final reject and drop rules
45 | # in zones. Possible values are: all, unicast, broadcast, multicast and off.
46 | # Default: off
47 | #LogDenied=off
48 |
49 | # AutomaticHelpers
50 | # For the secure use of iptables and connection tracking helpers it is
51 | # recommended to turn AutomaticHelpers off. But this might have side effects on
52 | # other services. This setting can be overridden per zone using the
53 | # AutomaticHelpers zone setting. For more information on helpers and their
54 | # configuration, please have a look at the respective documentation.
55 | # Default: system
56 | #AutomaticHelpers=system
57 |
58 | # FirewallBackend
59 | # Selects the firewall backend implementation.
60 | # Choices are:
61 | # - nftables (default)
62 | # - iptables (iptables, ip6tables, ebtables and ipset)
63 | # Default: nftables
64 | #FirewallBackend=nftables
65 |
66 | # FlushAllOnReload
67 | # Flush all runtime rules on a reload. In previous releases some runtime
68 | # configuration was retained during a reload, namely; interface to zone
69 | # assignment, and direct rules. This was confusing to users. To get the old
70 | # behavior set this to "no".
71 | # Default: yes
72 | #FlushAllOnReload=yes
73 |
74 | # RFC3964_IPv4
75 | # As per RFC 3964, filter IPv6 over IPv4 tunnels (6to4).
76 | # This means we also filter protocol 41 and isatap.
77 | # Default: yes
78 | #RFC3964_IPv4=yes
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-gdx/usr/share/ublue-os/just/66-ampere.just:
--------------------------------------------------------------------------------
1 | export model_name := env("MODEL_NAME", "deepseek-r1:70b")
2 | export model_source := env("SOURCE", "ollama")
3 | export threads := env("THREADS", `sh -c 'echo $(( $(nproc) / 2 ))'`)
4 | export ngl := env("NGL", "0")
5 | export ramalama_image := env("RAMALAMA_IMAGE", "quay.io/ramalama/vulkan:latest")
6 | export script_dir := env("SCRIPT_DIR", "/usr/share/ublue-os/gdx-demo")
7 |
8 | demo-ai-server $ramalama_image=ramalama_image $threads=threads $ngl=ngl:
9 | #!/usr/bin/env bash
10 | python3 ${script_dir}/ramalama/demo-ai-server.py --image $ramalama_image --threads $threads --ngl $ngl
11 |
12 | [group('Just')]
13 | check:
14 | #!/usr/bin/bash
15 | echo "Checking syntax: Justfile"
16 | find . -type f -name "*.just" | while read -r file; do
17 | echo "Checking syntax: $file"
18 | just --unstable --fmt --check -f $file
19 | done
20 | echo "Checking syntax: Justfile"
21 | just --unstable --fmt --check -f Justfile
22 |
23 | [group('Just')]
24 | fix:
25 | #!/usr/bin/bash
26 | echo "Fixing syntax: Justfile"
27 | find . -type f -name "*.just" | while read -r file; do
28 | echo "Fixing syntax: $file"
29 | just --unstable --fmt -f $file
30 | done
31 | echo "Fixing syntax: Justfile"
32 | just --unstable --fmt -f Justfile || { echo "Error: Failed to fix Justfile syntax."; exit 1; }
33 |
34 | _demo-llama-server $ramalama_image=ramalama_image $model_source=model_source $model_name=model_name $threads=threads $ngl=ngl:
35 | #!/usr/bin/env bash
36 | cd ~/demo
37 | python3 ${script_dir}/ramalama/ramalama-serve-ampere.py --image $ramalama_image --threads $threads --ngl $ngl $model_source://$model_name
38 |
39 | demo-deepseek-server:
40 | just _demo-llama-server ramalama_image="quay.io/ramalama/vulkan:latest" model_source="ollama" model_name="deepseek-coder:6.7b-base" threads="96" ngl="0"
41 |
42 | demo-benchmark-sysbench:
43 | #!/usr/bin/env bash
44 | podman image inspect localhost/ampere-benchmarks || podman build -t localhost/ampere-benchmarks -f Dockerfile ${script_dir}/bench-container
45 | echo "Running sysbench cpu benchmark for 60 seconds"
46 | podman run -it --rm localhost/ampere-benchmarks sysbench cpu --threads=$(nproc) run --time=60
47 |
48 | demo-benchmark-stress-ng:
49 | #!/usr/bin/env bash
50 | podman image inspect localhost/ampere-benchmarks || podman build -t localhost/ampere-benchmarks -f Dockerfile ${script_dir}/bench-container
51 | echo "Running stress-ng cpu benchmark for 60 seconds"
52 | podman run -it --rm localhost/ampere-benchmarks stress-ng --cpu $(nproc) --cpu-method all --timeout 60s --metrics-brief
53 |
54 | demo-benchmark-7zip:
55 | #!/usr/bin/env bash
56 | podman image inspect localhost/ampere-benchmarks || podman build -t localhost/ampere-benchmarks -f Dockerfile ${script_dir}/bench-container
57 | echo "Running 7zip benchmark for 60 seconds"
58 | podman run -it --rm localhost/ampere-benchmarks 7z b -mmt$(nproc)
59 |
60 | demo-btop:
61 | #!/usr/bin/env bash
62 | btop
63 |
--------------------------------------------------------------------------------
/.github/workflows/generate-changelog-release.yml:
--------------------------------------------------------------------------------
1 | on:
2 | schedule:
3 | - cron: '0 2 * * 2' # Every Tuesday at 02:00 UTC
4 | workflow_call:
5 | inputs:
6 | stream_name:
7 | description: "Release Tag (e.g. stream10, latest)"
8 | type: string
9 | default: "lts"
10 | required: false
11 | force_publish:
12 | description: "Force publish changelog even if tag hasn't changed"
13 | type: boolean
14 | default: false
15 | required: false
16 | workflow_dispatch:
17 | inputs:
18 | handwritten:
19 | description: "Small Changelog about changes in this build"
20 | required: false
21 | type: string
22 | force_publish:
23 | description: "Force publish changelog even if tag hasn't changed"
24 | type: boolean
25 | default: false
26 | required: false
27 | permissions:
28 | contents: write
29 |
30 | name: Generate Release
31 | jobs:
32 | generate-release:
33 | runs-on: ubuntu-latest
34 | strategy:
35 | fail-fast: false
36 | matrix:
37 | version: ["lts"]
38 | steps:
39 | - name: Checkout repository with full history
40 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
41 | with:
42 | fetch-depth: 0 # Fetch all history for proper tag detection
43 |
44 | - name: Setup GitHub CLI
45 | run: |
46 | # GitHub CLI is already available in GitHub Actions runners
47 | gh --version
48 |
49 | - name: Check for new tags and generate changelog
50 | id: changelog
51 | shell: bash
52 | env:
53 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
54 | run: |
55 | # Build Python command with optional arguments
56 | python_args=("${{ matrix.version }}" --ci)
57 |
58 | # Add force flag if requested
59 | if [[ "${{ inputs.force_publish || 'false' }}" == "true" ]]; then
60 | python_args+=(--force)
61 | fi
62 |
63 | # Add handwritten content if provided
64 | if [[ -n "${{ inputs.handwritten }}" ]]; then
65 | echo "${{ inputs.handwritten }}" > handwritten.txt
66 | python_args+=(--handwritten handwritten.txt)
67 | fi
68 |
69 | # Execute the Python script directly
70 | python3 .github/changelogs.py "${python_args[@]}"
71 |
72 | - name: Create Release
73 | if: steps.changelog.outputs.SKIP_CHANGELOG != 'true'
74 | uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
75 | with:
76 | name: ${{ steps.changelog.outputs.CHANGELOG_TITLE }}
77 | tag_name: ${{ steps.changelog.outputs.CHANGELOG_TAG }}
78 | body_path: ${{ steps.changelog.outputs.CHANGELOG_PATH }}
79 | make_latest: true
80 |
81 | - name: Log skip reason
82 | if: steps.changelog.outputs.SKIP_CHANGELOG == 'true'
83 | run: |
84 | echo "Changelog generation was skipped - release already exists for the current tag"
85 | echo "Use the 'force_publish' option to regenerate if needed"
86 |
87 | - name: Upload changelog artifacts
88 | if: steps.changelog.outputs.SKIP_CHANGELOG != 'true'
89 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
90 | with:
91 | name: changelog-${{ matrix.version }}
92 | path: |
93 | ./output.env
94 | ./changelog.md
95 | retention-days: 30
96 |
97 |
--------------------------------------------------------------------------------
/.github/changelog_config.yaml:
--------------------------------------------------------------------------------
1 | # Changelog Generator Configuration
2 | # This file contains all constants, templates, and configuration for the changelog generator
3 |
4 | # Basic configuration
5 | os_name: "bluefin-lts"
6 | targets: ["lts", "dx", "gdx"]
7 | registry_url: "ghcr.io/ublue-os"
8 |
9 | # Package blacklist - packages to exclude from changelog
10 | package_blacklist:
11 | - "firefox"
12 | - "firefox-langpacks"
13 | - "thunderbird"
14 | - "thunderbird-langpacks"
15 | - "akmods-keys"
16 | - "akmods"
17 | - "kmod"
18 | - "glibc"
19 | - "glibc-common"
20 | - "glibc-gconv-extra"
21 | - "glibc-langpack"
22 | - "glibc-minimal-langpack"
23 |
24 | # Image variants for different targets
25 | image_variants:
26 | - ""
27 | - "-dx"
28 | - "-gdx"
29 |
30 | # Regex patterns
31 | patterns:
32 | centos: "\\.el\\d\\d"
33 | start_pattern: "{target}\\.\\d\\d\\d+"
34 |
35 | # Template patterns for package changes
36 | templates:
37 | pattern_add: "\n| ✨ | {name} | | {version} |"
38 | pattern_change: "\n| 🔄 | {name} | {prev} | {new} |"
39 | pattern_remove: "\n| ❌ | {name} | {version} | |"
40 | pattern_pkgrel_changed: "{prev} ➡️ {new}"
41 | pattern_pkgrel: "{version}"
42 | common_pattern: "### {title}\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n"
43 | commits_format: "### Commits\n| Hash | Subject |\n| --- | --- |{commits}\n\n"
44 | commit_format: "\n| **[{short}](https://github.com/ublue-os/bluefin-lts/commit/{githash})** | {subject} |"
45 | changelog_title: "{os} {tag}: {pretty}"
46 | handwritten_placeholder: "This is an automatically generated changelog for release `{curr}`."
47 | changelog_format: |
48 | {handwritten}
49 |
50 | From previous `{target}` version `{prev}` there have been the following changes. **One package per new version shown.**
51 |
52 | ### Major packages
53 | | Name | Version |
54 | | --- | --- |
55 | | **Kernel** | {pkgrel:kernel} |
56 | | **HWE Kernel** | {pkgrel:kernel-hwe} |
57 | | **GNOME** | {pkgrel:gnome-control-center-filesystem} |
58 | | **Mesa** | {pkgrel:mesa-filesystem} |
59 | | **Podman** | {pkgrel:podman} |
60 |
61 | ### Major DX packages
62 | | Name | Version |
63 | | --- | --- |
64 | | **Docker** | {pkgrel:docker-ce} |
65 | | **VSCode** | {pkgrel:code} |
66 | | **Ramalama** | {pkgrel:ramalama} |
67 |
68 | ### Major GDX packages
69 | | Name | Version |
70 | | --- | --- |
71 | | **Nvidia** | {pkgrel:nvidia-driver} |
72 | | **CUDA** | {pkgrel:nvidia-driver-cuda} |
73 |
74 | {changes}
75 |
76 | ### How to rebase
77 | For current users, type the following to rebase to this version:
78 | ```bash
79 | # Get Image Name
80 | IMAGE_NAME=$(jq -r '.["image-name"]' < /usr/share/ublue-os/image-info.json)
81 |
82 | # For this Stream
83 | sudo bootc switch --enforce-container-sigpolicy ghcr.io/ublue-os/$IMAGE_NAME:{target}
84 |
85 | # For this Specific Image:
86 | sudo bootc switch --enforce-container-sigpolicy ghcr.io/ublue-os/$IMAGE_NAME:{curr}
87 | ```
88 |
89 | ### Documentation
90 | Be sure to read the [documentation](https://docs.projectbluefin.io/lts) for more information
91 | on how to use your cloud native system.
92 |
93 | # Section names for different image types
94 | sections:
95 | all: "All Images"
96 | base: "Base Images"
97 | dx: "[Developer Experience Images](https://docs.projectbluefin.io/bluefin-dx)"
98 | gdx: "[Graphical Developer Experience Images](https://docs.projectbluefin.io/gdx)"
99 |
100 | # Default configuration values
101 | defaults:
102 | retries: 3
103 | retry_wait: 5
104 | timeout_seconds: 30
105 | output_file: "changelog.md"
106 | env_output_file: "output.env"
107 | enable_commits: true
108 |
--------------------------------------------------------------------------------
/.github/workflows/build-iso.yml:
--------------------------------------------------------------------------------
1 | name: Build ISO
2 |
3 | on:
4 | pull_request:
5 | workflow_dispatch:
6 | inputs:
7 | build_combinations:
8 | description: 'JSON array of builds. Ex: [{"image_suffix":"","tag_suffix":"-hwe"}]'
9 | required: true
10 | default: '[{"image_suffix":"","tag_suffix":""},{"image_suffix":"","tag_suffix":"-hwe"},{"image_suffix":"-gdx","tag_suffix":""}]'
11 | type: string
12 | upload-to-cloudflare:
13 | description: "Upload to Cloudflare"
14 | required: false
15 | default: false
16 | type: boolean
17 |
18 | env:
19 | IMAGE_REGISTRY: "ghcr.io/${{ github.repository_owner }}"
20 | IMAGE_NAME: "bluefin"
21 | DEFAULT_TAG: "lts"
22 |
23 | concurrency:
24 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }}
25 | cancel-in-progress: true
26 |
27 | jobs:
28 | determine-matrix:
29 | name: Determine Build Matrix
30 | runs-on: ubuntu-latest
31 | outputs:
32 | matrix: ${{ steps.set-matrix.outputs.matrix }}
33 | steps:
34 | - name: Set Build Matrix
35 | id: set-matrix
36 | run: |
37 | # For pull requests (or any non-manual trigger), use the hardcoded default.
38 | if [ "${{ github.event_name }}" != "workflow_dispatch" ]; then
39 | echo 'matrix={"include":[{"image_suffix":"","tag_suffix":""},{"image_suffix":"","tag_suffix":"-hwe"},{"image_suffix":"-gdx","tag_suffix":"-hwe"}]}' >> $GITHUB_OUTPUT
40 | # For manual runs, use the JSON provided in the workflow_dispatch input.
41 | else
42 | JSON_INPUT='${{ inputs.build_combinations }}'
43 | echo "matrix={\"include\":$JSON_INPUT}" >> $GITHUB_OUTPUT
44 | fi
45 | build:
46 | needs: determine-matrix
47 | strategy:
48 | fail-fast: false
49 | matrix: ${{ fromJson(needs.determine-matrix.outputs.matrix) }}
50 |
51 | name: Build ISO for bluefin${{ matrix.image_suffix }}:lts${{ matrix.tag_suffix }}
52 |
53 | runs-on: ubuntu-24.04
54 | permissions:
55 | contents: read
56 | packages: read
57 | id-token: write
58 |
59 | steps:
60 | - name: Checkout
61 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
62 |
63 | - name: Define Build Variables
64 | id: vars
65 | run: |
66 | echo "FULL_IMAGE_NAME=${{ env.IMAGE_NAME }}${{ matrix.image_suffix }}" >> $GITHUB_ENV
67 | echo "FULL_TAG_NAME=${{ env.DEFAULT_TAG }}${{ matrix.tag_suffix }}" >> $GITHUB_ENV
68 | echo "ARTIFACT_NAME=${{ env.IMAGE_NAME }}${{ matrix.image_suffix }}-${{ env.DEFAULT_TAG }}${{ matrix.tag_suffix }}-iso" >> $GITHUB_ENV
69 | - name: Build ISO
70 | id: build-iso
71 | uses: osbuild/bootc-image-builder-action@019bb59c5100ecec4e78c9e94e18a840110f7a0b # v0.0.2
72 | with:
73 | config-file: ./iso.toml
74 | image: ${{ env.IMAGE_REGISTRY }}/${{ env.FULL_IMAGE_NAME }}:${{ env.FULL_TAG_NAME }}
75 | types: |
76 | iso
77 | - name: Upload to Job Artifacts
78 | if: github.event.inputs.upload-to-cloudflare == 'false' || github.event_name != 'workflow_dispatch'
79 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
80 | with:
81 | name: ${{ env.ARTIFACT_NAME }}
82 | if-no-files-found: error
83 | path: ${{ steps.build-iso.outputs.output-directory }}
84 |
85 | - name: Upload to CloudFlare
86 | if: github.event.inputs.upload-to-cloudflare == 'true'
87 | env:
88 | RCLONE_CONFIG_R2_TYPE: s3
89 | RCLONE_CONFIG_R2_PROVIDER: Cloudflare
90 | RCLONE_CONFIG_R2_ACCESS_KEY_ID: ${{ secrets.R2_ACCESS_KEY_ID }}
91 | RCLONE_CONFIG_R2_SECRET_ACCESS_KEY: ${{ secrets.R2_SECRET_ACCESS_KEY }}
92 | RCLONE_CONFIG_R2_REGION: auto
93 | RCLONE_CONFIG_R2_ENDPOINT: ${{ secrets.R2_ENDPOINT }}
94 | SOURCE_DIR: ${{ steps.build-iso.outputs.output-directory }}
95 | run: |
96 | sudo apt-get update
97 | sudo apt-get install -y rclone
98 | rclone copy "$SOURCE_DIR" "R2:bluefin/${{ env.ARTIFACT_NAME }}"
99 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-gdx/usr/share/ublue-os/gdx-demo/ramalama/ramalama-serve-ampere.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import subprocess
4 | import argparse
5 | import re
6 |
7 | def main():
8 | parser = argparse.ArgumentParser(description="Run ramalama serve, extract podman command, remove GPU options, and execute.")
9 | parser.add_argument("model", help="Model to serve (e.g., huggingface://...)")
10 | parser.add_argument("--image", help="Image to use for the container")
11 | parser.add_argument("--authfile", help="Path of the authentication file")
12 | parser.add_argument("--device", help="Device to leak into the running container")
13 | parser.add_argument("-n", "--name", help="Name of container in which the Model will be run")
14 | parser.add_argument("--ngl", type=int, help="Number of layers to offload to the GPU, if available")
15 | parser.add_argument("--privileged", action="store_true", help="Give extended privileges to container")
16 | parser.add_argument("--pull", choices=["always", "missing", "never", "newer"], default="newer", help="Pull image policy")
17 | parser.add_argument("--seed", type=int, help="Override random seed")
18 | parser.add_argument("--temp", type=float, default=0.8, help="Temperature of the response from the AI model")
19 | parser.add_argument("--tls-verify", action="store_true", help="Require HTTPS and verify certificates when contacting registries")
20 | parser.add_argument("-c", "--ctx-size", type=int, default=2048, help="Size of the prompt context (0 = loaded from model)")
21 | parser.add_argument("--network", "--net", help="Set the network mode for the container")
22 | parser.add_argument("-d", "--detach", action="store_true", help="Run the container in detached mode")
23 | parser.add_argument("--host", default="0.0.0.0", help="IP address to listen")
24 | parser.add_argument("--generate", choices=["quadlet", "kube", "quadlet/kube"], help="Generate configuration format")
25 | parser.add_argument("-p", "--port", type=int, help="Port for AI Model server to listen on")
26 | parser.add_argument("-t", "--threads", type=int, help="Number of threads for llama.cpp")
27 |
28 | args = parser.parse_args()
29 |
30 | ramalama_args = ["ramalama", "--dryrun"]
31 | if args.image:
32 | ramalama_args.extend(["--image", args.image])
33 | ramalama_args.extend(["serve"])
34 | if args.authfile:
35 | ramalama_args.extend(["--authfile", args.authfile])
36 | if args.device:
37 | ramalama_args.extend(["--device", args.device])
38 | if args.name:
39 | ramalama_args.extend(["--name", args.name])
40 | if args.ngl:
41 | ramalama_args.extend(["--ngl", str(args.ngl)])
42 | if args.privileged:
43 | ramalama_args.append("--privileged")
44 | if args.pull:
45 | ramalama_args.extend(["--pull", args.pull])
46 | if args.seed:
47 | ramalama_args.extend(["--seed", str(args.seed)])
48 | if args.temp:
49 | ramalama_args.extend(["--temp", str(args.temp)])
50 | if args.tls_verify:
51 | ramalama_args.append("--tls-verify")
52 | if args.ctx_size:
53 | ramalama_args.extend(["-c", str(args.ctx_size)])
54 | if args.network:
55 | ramalama_args.extend(["--network", args.network])
56 | if args.detach:
57 | ramalama_args.append("-d")
58 | if args.host:
59 | ramalama_args.extend(["--host", args.host])
60 | if args.generate:
61 | ramalama_args.extend(["--generate", args.generate])
62 | if args.port:
63 | ramalama_args.extend(["-p", str(args.port)])
64 | if args.threads:
65 | ramalama_args.extend(["--threads", str(args.threads)])
66 | if args.threads:
67 | ramalama_args.extend(["-t", str(args.threads)])
68 |
69 | ramalama_args.append(args.model)
70 |
71 | try:
72 | result = subprocess.run(ramalama_args, capture_output=True, text=True, check=True)
73 | output = result.stdout
74 | except subprocess.CalledProcessError as e:
75 | print(f"Error running ramalama: {e.stderr}")
76 | return
77 |
78 | podman_command_match = re.search(r"podman run.*", output)
79 | if not podman_command_match:
80 | print("Error: Could not extract podman command.")
81 | return
82 |
83 | podman_command = podman_command_match.group(0)
84 | modified_command = re.sub(r" --device nvidia.com/gpu=all -e CUDA_VISIBLE_DEVICES=0", "", podman_command)
85 |
86 | print("Executing modified podman command:")
87 | print(modified_command)
88 |
89 | try:
90 | subprocess.Popen(["xdg-open", "http://localhost:8080"])
91 | subprocess.run(modified_command, shell=True)
92 | except subprocess.CalledProcessError as e:
93 | print(f"Error running podman: {e}")
94 |
95 | if __name__ == "__main__":
96 | main()
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Bluefin LTS
2 | *Achillobator giganticus*
3 |
4 | [](https://github.com/ublue-os/bluefin-lts/actions/workflows/build-regular.yml)
5 | [](https://app.codacy.com/gh/ublue-os/bluefin-lts/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
6 | [](https://www.bestpractices.dev/projects/10098)
7 | [](https://artifacthub.io/packages/container/bluefin/bluefin)
8 | [](https://deepwiki.com/ublue-os/bluefin-lts) [
](https://github.com/ublue-os/bluefin)
9 |
10 | Larger, more lethal [Bluefin](https://projectbluefin.io). `bluefin:lts` is built on CentOS Stream 10.
11 |
12 | 
13 |
14 | ## Instructions
15 |
16 | Check [the documentation](https://docs.projectbluefin.io/lts) for the latest instructions.
17 |
18 | ## Metrics
19 |
20 | 
21 |
--------------------------------------------------------------------------------
/system_files_overrides/aarch64-gdx/usr/share/ublue-os/gdx-demo/ramalama/demo-ai-server.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import subprocess
4 | import json
5 | import os
6 | import argparse
7 |
8 | def demo_ai_server():
9 | """Interactively select an AI model and start llama-server."""
10 |
11 | parser = argparse.ArgumentParser(description="Interactively select an AI model and start llama-server.",
12 | formatter_class=argparse.RawTextHelpFormatter)
13 | parser.add_argument("--image", type=str, default=os.environ.get("ramalama_image", "quay.io/ramalama/vulkan:latest"),
14 | help="Docker image to use for llama-server (default: quay.io/ramalama/vulkan:latest, can also be set via RAMALAMA_IMAGE environment variable)")
15 | parser.add_argument("--threads", type=int, default=None,
16 | help="Number of threads to use (default: number of cores / 2, can also be set via THREADS environment variable)")
17 | parser.add_argument("--ngl", type=int, default=None,
18 | help="Number of layers to offload to GPU (default: 0, can also be set via NGL environment variable)")
19 | args = parser.parse_args()
20 |
21 | try:
22 | # Get the list of models in JSON format using ramalama
23 | process = subprocess.run(["ramalama", "list", "--json"], capture_output=True, text=True, check=True)
24 | models_json_str = process.stdout
25 | except subprocess.CalledProcessError as e:
26 | print("Error: Failed to get model list from 'ramalama list --json'.")
27 | print(f"Return code: {e.returncode}")
28 | print(f"Stdout: {e.stdout}")
29 | print(f"Stderr: {e.stderr}")
30 | print("Please check ramalama installation and model setup.")
31 | return 1
32 | except FileNotFoundError:
33 | print("Error: 'ramalama' command not found. Please ensure ramalama CLI is installed and in your PATH.")
34 | return 1
35 |
36 | if not models_json_str:
37 | print("No models found by ramalama. Please add models using 'ramalama pull ...'.")
38 | return 1
39 |
40 | try:
41 | models_json = json.loads(models_json_str)
42 | except json.JSONDecodeError as e:
43 | print("Error: Failed to parse JSON output from 'ramalama list --json'.")
44 | print(f"JSONDecodeError: {e}")
45 | print("Output from ramalama list --json was:")
46 | print(models_json_str)
47 | return 1
48 |
49 | if not models_json:
50 | print("No models found by ramalama (after JSON parsing). Please add models using 'ramalama pull ...'.")
51 | return 1
52 |
53 | model_array = []
54 | for item in models_json:
55 | model_name_full = item.get("name")
56 | if not model_name_full:
57 | print("Error: Model entry missing 'name' field in ramalama list output.")
58 | return 1
59 |
60 | source = "ollama" # Default source
61 | model_name = model_name_full
62 |
63 | if model_name_full.startswith("huggingface://"):
64 | source = "huggingface"
65 | model_name = model_name_full[len("huggingface://"):]
66 | elif model_name_full.startswith("ollama://"):
67 | source = "ollama"
68 | model_name = model_name_full[len("ollama://"):]
69 | elif model_name_full.startswith("oci://"):
70 | source = "oci"
71 | model_name = model_name_full[len("oci://"):]
72 |
73 | model_array.append({"source": source, "model_name": model_name, "original_name": model_name_full})
74 |
75 | if not model_array:
76 | print("No valid models found with recognized source prefixes (huggingface://, ollama://, oci://) or default source.")
77 | return 1
78 |
79 | selected_original_name = None
80 | if subprocess.run(["command", "-v", "fzf"], capture_output=True).returncode == 0:
81 | # Use fzf for interactive selection
82 | print("Using fzf for interactive model selection.")
83 | display_models = "\n".join([model["original_name"] for model in model_array])
84 | try:
85 | fzf_process = subprocess.run(["fzf", "--height", "40%", "--border", "--ansi", "--prompt", "Select a model: "],
86 | input=display_models, capture_output=True, text=True, check=True)
87 | selected_original_name = fzf_process.stdout.strip()
88 | except subprocess.CalledProcessError as e:
89 | if e.returncode == 130: # fzf returns 130 when user exits with Ctrl+C
90 | print("No model selected using fzf.")
91 | return 1
92 | else:
93 | print(f"Error running fzf: Return code: {e.returncode}, Stderr: {e.stderr}")
94 | # Fallback to list selection instead of exiting, if fzf fails for other reasons.
95 | print("Falling back to simple list selection due to fzf error.")
96 | selected_original_name = None # Ensure fallback happens
97 | except FileNotFoundError:
98 | print("Error: fzf command not found, but command -v fzf succeeded earlier. This is unexpected.")
99 | print("Falling back to simple list selection.")
100 | selected_original_name = None # Ensure fallback happens
101 |
102 |
103 | if not selected_original_name:
104 | # Fallback to simple numbered list selection
105 | print("fzf not found or failed. Falling back to simple list selection.")
106 | print("Available models:")
107 | for index, model in enumerate(model_array):
108 | print(f"{index + 1}) {model['original_name']}")
109 |
110 | while True:
111 | try:
112 | selected_index = int(input(f"Select model number (1-{len(model_array)}): "))
113 | if 1 <= selected_index <= len(model_array):
114 | selected_original_name = model_array[selected_index - 1]["original_name"]
115 | break
116 | else:
117 | print("Invalid selection number. Please try again.")
118 | except ValueError:
119 | print("Invalid input. Please enter a number.")
120 |
121 | if not selected_original_name:
122 | print("No model selected.")
123 | return 1
124 |
125 | selected_model_source = None
126 | selected_model_name = None
127 | for model in model_array:
128 | if model["original_name"] == selected_original_name:
129 | selected_model_source = model["source"]
130 | selected_model_name = model["model_name"]
131 | break
132 |
133 | if not selected_model_source or not selected_model_name:
134 | print("Error: Could not find selected model details in parsed model array.")
135 | return 1
136 |
137 | threads = str(args.threads) if args.threads is not None else os.environ.get("threads")
138 | if not threads:
139 | try:
140 | nproc_output = subprocess.run(["nproc"], capture_output=True, text=True, check=True).stdout.strip()
141 | num_cores = int(nproc_output)
142 | threads = str(num_cores // 2)
143 | except (subprocess.CalledProcessError, ValueError, FileNotFoundError):
144 | threads = "4" # Default threads if nproc fails
145 |
146 | ngl = str(args.ngl) if args.ngl is not None else os.environ.get("ngl")
147 | if not ngl:
148 | ngl = "0" # Default to 0 so to show off CPU
149 |
150 | ramalama_image = args.image
151 |
152 | print(f"Starting llama-server with image: {ramalama_image} source: {selected_model_source}, model: {selected_model_name}, threads: {threads} ngl: {ngl}")
153 |
154 | try:
155 | subprocess.run(["just", "_demo-llama-server", ramalama_image, selected_model_source, selected_model_name, threads, ngl], check=True, cwd=os.getcwd())
156 | print(f"Started llama-server with source: {selected_model_source}, model: {selected_model_name}.")
157 | except subprocess.CalledProcessError as e:
158 | print("Error: Failed to start llama-server using 'just _demo-llama-server'.")
159 | print(f"Return code: {e.returncode}")
160 | print(f"Stdout: {e.stdout}")
161 | print(f"Stderr: {e.stderr}")
162 | print("Please check the error messages and podman logs (if applicable).")
163 | return 1
164 | except FileNotFoundError:
165 | print("Error: 'just' command not found. Please ensure just is installed and in your PATH.")
166 | return 1
167 |
168 | return 0 # Success
169 |
170 | if __name__ == "__main__":
171 | exit(demo_ai_server())
--------------------------------------------------------------------------------
/AGENTS.md:
--------------------------------------------------------------------------------
1 | # Bluefin LTS
2 |
3 | Bluefin LTS is a container-based operating system image built on CentOS Stream 10 using bootc technology. It creates bootable container images that can be converted to disk images, ISOs, and VM images.
4 |
5 | Always reference these instructions first and fallback to search or bash commands only when you encounter unexpected information that does not match the info here.
6 |
7 | ## Working Effectively
8 |
9 | ### Prerequisites and Setup
10 | - **CRITICAL**: Ensure `just` command runner is installed.
11 | Check with `which just`. If missing, install to `~/.local/bin`:
12 | ```bash
13 | mkdir -p ~/.local/bin
14 | wget -qO- "https://github.com/casey/just/releases/download/1.34.0/just-1.34.0-x86_64-unknown-linux-musl.tar.gz" | tar --no-same-owner -C ~/.local/bin -xz just
15 | export PATH="$HOME/.local/bin:$PATH"
16 | ```
17 | - Ensure podman is available: `which podman` (should be present)
18 | - Verify git is available: `which git`
19 |
20 | ### Build Commands - NEVER CANCEL BUILDS
21 | - **Build container image**: `just build [IMAGE_NAME] [TAG] [DX] [GDX] [HWE]`
22 | - Defaults: `just build` is equivalent to `just build bluefin lts 0 0 0`
23 | - Takes 45-90 minutes. NEVER CANCEL. Set timeout to 120+ minutes.
24 | - Example: `just build bluefin lts 0 0 0` (basic build)
25 | - Example: `just build bluefin lts 1 0 0` (with DX - developer tools)
26 | - Example: `just build bluefin lts 0 1 0` (with GDX - GPU/AI tools)
27 | - **Build VM images**:
28 | - `just build-qcow2` - Converts *existing* container image to QCOW2 (45-90 minutes)
29 | - `just rebuild-qcow2` - Builds container image THEN converts to QCOW2 (90-180 minutes)
30 | - `just build-iso` - ISO installer image (45-90 minutes)
31 | - `just build-raw` - RAW disk image (45-90 minutes)
32 | - NEVER CANCEL any build command. Set timeout to 120+ minutes.
33 |
34 | ### Validation and Testing
35 | - **ALWAYS run syntax checks before making changes**:
36 | - `just check` - validates Just syntax (takes <30 seconds)
37 | - `just lint` - runs shellcheck on all shell scripts (takes <10 seconds)
38 | - `just format` - formats shell scripts with shfmt (takes <10 seconds)
39 | - **Build validation workflow**:
40 | 1. Always run `just check` before committing changes
41 | 2. Always run `just lint` before committing changes
42 | 3. Test build with `just build bluefin lts` (120+ minute timeout)
43 | 4. Test VM creation with `just build-qcow2` if modifying VM-related code
44 |
45 | ### Running Virtual Machines
46 | - **Run VM from built images**:
47 | - `just run-vm-qcow2` - starts QCOW2 VM with web console on http://localhost:8006
48 | - `just run-vm-iso` - starts ISO installer VM
49 | - `just spawn-vm` - uses systemd-vmspawn for VM management
50 | - **NEVER run VMs in CI environments** - they require KVM/graphics support
51 |
52 | ## Build System Architecture
53 |
54 | ### Key Build Variants
55 | - **Regular**: Basic Bluefin LTS (`just build bluefin lts 0 0 0`)
56 | - **DX**: Developer Experience with VSCode, Docker, development tools (`just build bluefin lts 1 0 0`)
57 | - **GDX**: GPU Developer Experience with CUDA, AI tools (`just build bluefin lts 0 1 0`)
58 | - **HWE**: Hardware Enablement for newer hardware (`just build bluefin lts 0 0 1`)
59 |
60 | ### Core Build Process
61 | 1. **Container Build**: Uses Containerfile with CentOS Stream 10 base
62 | 2. **Build Scripts**: Located in `build_scripts/` directory
63 | 3. **System Overrides**: Architecture and variant-specific files in `system_files_overrides/`
64 | 4. **Bootc Conversion**: Container images converted to bootable formats via Bootc Image Builder
65 |
66 | ### Build Timing Expectations
67 | - **Container builds**: 45-90 minutes (timeout: 120+ minutes)
68 | - **VM image builds**: 45-90 minutes (timeout: 120+ minutes)
69 | - **Syntax checks**: <30 seconds
70 | - **Linting**: <10 seconds
71 | - **Git operations**: <5 seconds
72 |
73 | ## Repository Structure
74 |
75 | ### Key Directories
76 | - `build_scripts/` - Build automation and package installation scripts
77 | - `system_files/` - Base system configuration files
78 | - `system_files_overrides/` - Variant-specific overrides (dx, gdx, arch-specific)
79 | - `.github/workflows/` - CI/CD automation (60-minute timeout configured)
80 | - `Justfile` - Primary build automation (13KB+ file with all commands)
81 |
82 | ### Important Files
83 | - `Containerfile` - Main container build definition
84 | - `image.toml` - VM image build configuration
85 | - `iso.toml` - ISO build configuration
86 | - `Justfile` - Build command definitions (use `just --list` to see all)
87 |
88 | ## Common Development Tasks
89 |
90 | ### Making Changes to Build Scripts
91 | 1. Edit files in `build_scripts/` for package changes
92 | 2. Edit `system_files_overrides/[variant]/` for variant-specific changes
93 | 3. Always run `just lint` before committing
94 | 4. Test with full build: `just build bluefin lts` (120+ minute timeout)
95 |
96 | ### Adding New Packages
97 | - Edit `build_scripts/20-packages.sh` for base packages
98 | - Use variant-specific overrides in `build_scripts/overrides/[variant]/`
99 | - Use architecture-specific overrides in `build_scripts/overrides/[arch]/`
100 | - Use combined overrides in `build_scripts/overrides/[arch]/[variant]/`
101 | - Package installation uses dnf/rpm package manager
102 |
103 | ### Modifying System Configuration
104 | - Base configs: `system_files/`
105 | - Variant configs: `system_files_overrides/[variant]/`
106 | - Architecture-specific: `system_files_overrides/[arch]/`
107 | - Combined: `system_files_overrides/[arch]-[variant]/`
108 |
109 | ## GitHub Actions Integration
110 |
111 | ### CI Build Process
112 | - **Timeout**: 60 minutes configured in reusable-build-image.yml
113 | - **Platforms**: amd64, arm64
114 | - **Validation**: Runs `just check` before building
115 | - **Build Command**: `sudo just build [IMAGE] [TAG] [DX] [GDX] [HWE]`
116 |
117 | ### Available Workflows
118 | - `build-regular.yml` - Standard Bluefin LTS build
119 | - `build-dx.yml` - Developer Experience variant
120 | - `build-gdx.yml` - GPU Developer Experience variant
121 | - `build-iso.yml` - ISO installer builds
122 |
123 | ## Validation Scenarios
124 |
125 | ### After Making Changes
126 | 1. **Syntax validation**: `just check && just lint`
127 | 2. **Build test**: `just build bluefin lts` (full 120+ minute build)
128 | 3. **VM test**: `just build-qcow2` (if modifying VM components)
129 | 4. **Manual testing**: Run VM and verify basic OS functionality
130 |
131 | ### Code Quality Requirements
132 | - All shell scripts must pass shellcheck (`just lint`)
133 | - Just syntax must be valid (`just check`)
134 | - CI builds must complete within 60 minutes
135 | - Always test the specific variant you're modifying (dx, gdx, regular)
136 |
137 | ## Common Commands Reference
138 |
139 | ```bash
140 | # Essential validation (run before every commit)
141 | just check # <30 seconds
142 | just lint # <10 seconds
143 |
144 | # Core builds (NEVER CANCEL - 120+ minute timeout)
145 | just build bluefin lts # Standard build
146 | just build bluefin lts 1 0 0 # With DX (developer tools)
147 | just build bluefin lts 0 1 0 # With GDX (GPU/AI tools)
148 |
149 | # VM images (NEVER CANCEL - 120+ minute timeout)
150 | just build-qcow2 # QCOW2 VM image
151 | just build-iso # ISO installer
152 | just build-raw # Raw disk image
153 |
154 | # Development utilities
155 | just --list # Show all available commands
156 | just clean # Clean build artifacts
157 | git status # Check repository state
158 | ```
159 |
160 | ## Critical Reminders
161 |
162 | - **NEVER CANCEL builds or long-running commands** - they may take 45-90 minutes
163 | - **ALWAYS set 120+ minute timeouts** for build commands
164 | - **ALWAYS run `just check && just lint`** before committing changes
165 | - **This is an OS image project**, not a traditional application
166 | - **Internet access may be limited** in some build environments
167 | - **VM functionality requires KVM/graphics support** - not available in all CI environments
168 |
169 | ## Build Failures and Debugging
170 |
171 | ### Common Issues
172 | - **Network timeouts**: Build pulls packages from CentOS repositories
173 | - **Disk space**: Container builds require significant space (clean with `just clean`)
174 | - **Permission errors**: Some commands require sudo/root access
175 | - **Missing dependencies**: Ensure just, podman, git are installed
176 |
177 | ### Recovery Steps
178 | 1. Clean build artifacts: `just clean`
179 | 2. Verify tools: `which just podman git`
180 | 3. Check syntax: `just check && just lint`
181 | 4. Retry with full timeout: `just build bluefin lts` (120+ minutes)
182 |
183 | Never attempt to fix builds by canceling and restarting - let them complete or fail naturally.
184 |
185 | ## Other Rules that are Important to the Maintainers
186 |
187 | - Ensure that [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#specification) are used and enforced for every commit and pull request title.
188 | - Always be surgical with the least amount of code, the project strives to be easy to maintain.
189 | - Documentation for this project exists in @projectbluefin/documentation
190 | - Bluefin and Bluefin GTS exist in @ublue-os/bluefin
191 |
192 | ## Attribution Requirements
193 |
194 | AI agents must disclose what tool and model they are using in the "Assisted-by" commit footer:
195 |
196 | ```text
197 | Assisted-by: [Model Name] via [Tool Name]
198 | ```
199 |
200 | Example:
201 |
202 | ```text
203 | Assisted-by: Claude 3.5 Sonnet via GitHub Copilot
204 | ```
205 |
--------------------------------------------------------------------------------
/system_files/usr/libexec/ublue-ts-exit-node:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Tailscale exit node picker
4 |
5 | ERROR_TEXT="..this should not have happned please report a bug at https://github.com/ublue-os/packages"
6 |
7 | # --- Prerequisite Checks ---
8 | if ! command -v gum &> /dev/null; then
9 | echo "gum could not be found. Please install gum: https://github.com/charmbracelet/gum"
10 | exit 1
11 | fi
12 | if ! command -v tailscale &> /dev/null; then
13 | echo "tailscale could not be found. Please install tailscale."
14 | exit 1
15 | fi
16 |
17 | # --- AWK Script to get unique Country Keywords for Stage 1 ---
18 | AWK_UNIQUE_COUNTRY_KEYWORDS='
19 | NR > 1 {
20 | country_keyword = $3;
21 | if (country_keyword != "" && country_keyword != "-") {
22 | keywords[country_keyword] = 1;
23 | } else if (country_keyword == "-") {
24 | keywords["1. Exit Nodes not on Mullvad"] = 1;
25 | }
26 | }
27 | END {
28 | for (k in keywords) { print k; }
29 | }
30 | '
31 |
32 | # --- AWK Script to Format Nodes for Gum Display (Stage 2) ---
33 | AWK_FORMAT_NODES_FOR_GUM='
34 | # Process lines that are data lines: Skip header (NR>1), first field is IP, second field (hostname) contains a dot.
35 | NR > 1 && $1 ~ /^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$/ && $2 ~ /\./ {
36 | hostname = $2;
37 | description = "";
38 |
39 | if (NF > 3) {
40 | for (i = 3; i < NF; i++) {
41 | description = description $i " ";
42 | }
43 | sub(/[[:space:]]+$/, "", description);
44 | }
45 |
46 | clean_desc = description;
47 | if (clean_desc == "- -" || clean_desc == "-") {
48 | clean_desc = "";
49 | } else {
50 | if (clean_desc ~ /[[:space:]]-$/) { clean_desc = substr(clean_desc, 1, length(clean_desc)-2); }
51 | if (clean_desc ~ /^- /) { clean_desc = substr(clean_desc, 3); }
52 | gsub(/ +/, " ", clean_desc);
53 | gsub(/^ | $/, "", clean_desc);
54 | }
55 |
56 | if (clean_desc == "" || clean_desc == hostname) {
57 | printf "%s\n", hostname;
58 | } else {
59 | printf "%s (%s)\n", clean_desc, hostname;
60 | }
61 | }
62 | '
63 |
64 | # --- Fetch initial node list ONCE ---
65 | # This will be used for displaying current status and for Stage 1 options
66 | initial_node_list_raw=$(tailscale exit-node list 2>&1)
67 | ts_initial_list_status=$?
68 |
69 | # --- Display Current Exit Node (derived from initial_node_list_raw) ---
70 | current_display_node="None" # Default if no node is selected or list is empty
71 | if [ $ts_initial_list_status -eq 0 ] && [[ -n "$initial_node_list_raw" ]] && ! (echo "$initial_node_list_raw" | grep -q "No exit nodes found"); then
72 | # Find the line where the last field is "selected", skip header with NR > 1
73 | selected_node_line=$(echo "$initial_node_list_raw" | awk 'NR > 1 && $NF == "selected" {print; exit}')
74 |
75 | if [[ -n "$selected_node_line" ]]; then
76 | selected_hostname=$(echo "$selected_node_line" | awk '{print $2}')
77 | selected_ip=$(echo "$selected_node_line" | awk '{print $1}')
78 |
79 | # Attempt to get Country and City for a richer display
80 | location_info=$(echo "$selected_node_line" | awk '{
81 | desc = "";
82 | for (i = 3; i < NF; i++) { # Iterate from 3rd field to one before last (STATUS)
83 | # Only add if not "-"
84 | if ($i != "-") {
85 | desc = desc $i " ";
86 | }
87 | }
88 | sub(/[[:space:]]+$/, "", desc); # Remove trailing space
89 | # If after processing, desc is empty (e.g. both country/city were "-"), make it empty
90 | if (desc == "" || desc == "-") { desc = "" }
91 | print desc
92 | }')
93 |
94 | if [[ -n "$location_info" ]]; then
95 | current_display_node="$selected_hostname ($location_info, IP: $selected_ip)"
96 | else
97 | current_display_node="$selected_hostname (IP: $selected_ip)"
98 | fi
99 | fi
100 | else echo "No exit nodes found on your Tailnet please add some or enable Mullvad VPN for your account at https://login.tailscale.com/admin/settings/general/mullvad"
101 | fi
102 |
103 | echo "This is a tool to quickly set/change your Exit Node for Tailscale"
104 |
105 | gum style --border double --border-foreground 212 --padding "0 1" \
106 | "Current Tailscale Exit Node: $(gum style --bold "$current_display_node")"
107 | echo
108 |
109 |
110 | # --- Stage 1: Select Country Keyword or Action ---
111 | gum style --bold "Select Country Keyword or Action"
112 |
113 | unique_country_keywords=$(echo "$initial_node_list_raw" | awk "$AWK_UNIQUE_COUNTRY_KEYWORDS" | sort -u)
114 |
115 | stage1_options_array=()
116 | stage1_options_array+=("0. Turn off Exit Node")
117 |
118 | if [[ -n "$unique_country_keywords" ]]; then
119 | while IFS= read -r keyword; do
120 | stage1_options_array+=("$keyword")
121 | done <<< "$unique_country_keywords"
122 | fi
123 |
124 | if [ ${#stage1_options_array[@]} -le 2 ] && ! (echo "$initial_node_list_raw" | grep -q "No exit nodes found") ; then
125 | gum style --faint "(No specific country keywords could be extracted for filtering Stage 1)"
126 | fi
127 |
128 | printf -v stage1_options_string '%s\n' "${stage1_options_array[@]}"
129 | stage1_options_string=${stage1_options_string%?}
130 |
131 | selected_stage1_choice=$(echo -e "$stage1_options_string" | gum filter \
132 | --placeholder="Select a country keyword to filter by, or an action..." \
133 | --height="${GUM_FILTER_HEIGHT_STAGE1:-12}" \
134 | --prompt="❯ " --indicator="◉" --selected-prefix="✅ " \
135 | --header="Stage 1: Filter target or Action" \
136 | --strict)
137 | gum_stage1_exit_code=$?
138 |
139 | if [ $gum_stage1_exit_code -ne 0 ]; then
140 | gum style --faint "Stage 1 selection cancelled. Exiting."
141 | exit 0
142 | fi
143 |
144 | # --- Stage 2: Select Specific Node from (Potentially) Filtered List ---
145 | nodes_for_stage2_raw_output=""
146 | header_for_stage2=""
147 |
148 | if [[ "$selected_stage1_choice" == "0. Turn off Exit Node" ]]; then
149 | gum spin --show-output --spinner dot --title "Turning off Tailscale exit node..." -- tailscale set --exit-node=
150 | if [ $? -eq 0 ]; then gum style "✅ Tailscale exit node turned off."; else gum style --error "❌ Failed to turn off exit node."; fi
151 | exit 0
152 | fi
153 |
154 | gum style --bold "Select Specific Exit Node"
155 |
156 | if [[ "$selected_stage1_choice" == "1. Exit Nodes not on Mullvad" ]]; then
157 | header_for_stage2=$(echo "$initial_node_list_raw" | head -n1)
158 | body_no_country=$(echo "$initial_node_list_raw" | awk 'NR > 1 && $3 == "-"')
159 | if [[ -n "$body_no_country" ]]; then
160 | nodes_for_stage2_raw_output="$header_for_stage2"$'\n'"$body_no_country"
161 | else
162 | nodes_for_stage2_raw_output="$header_for_stage2"
163 | fi
164 | else
165 | # User selected a specific country keyword, use tailscale's filter
166 | # Corrected gum style flag from --dim to --faint
167 | gum style --faint "Fetching nodes for filter: '$selected_stage1_choice'..."
168 |
169 | nodes_for_stage2_raw_output=$(tailscale exit-node list --filter="$selected_stage1_choice" 2>&1)
170 | ts_filter_status=$?
171 |
172 | if [[ $ts_filter_status -ne 0 ]]; then
173 | gum style --error "Failed to fetch filtered exit node list for '$selected_stage1_choice'."
174 | echo "$nodes_for_stage2_raw_output" >&2
175 | exit 1
176 | fi
177 | fi
178 |
179 | formatted_nodes_for_stage2="" # Initialize
180 | if [[ -n "$nodes_for_stage2_raw_output" && "$nodes_for_stage2_raw_output" != *"No exit nodes found"* ]]; then
181 | # Check if there's more than just a header (or if it's an empty valid list)
182 | if (echo "$nodes_for_stage2_raw_output" | awk 'NR > 1 {found=1; exit} END{exit !found}'); then
183 | formatted_nodes_for_stage2=$(echo "$nodes_for_stage2_raw_output" | awk "$AWK_FORMAT_NODES_FOR_GUM")
184 | else
185 | # Contains only header or is an error/empty message we already logged
186 | gum style --faint "No actual node data to format for '$selected_stage1_choice' $ERROR_TEXT"
187 | fi
188 | else
189 | gum style --warning "No exit nodes found matching the filter: '$selected_stage1_choice'."
190 | fi
191 |
192 |
193 | if [[ -z "$formatted_nodes_for_stage2" ]]; then
194 | gum style --faint "No exit nodes matched the filter"
195 | fi
196 |
197 | final_node_selection=$(echo -e "$formatted_nodes_for_stage2" | gum filter \
198 | --placeholder="Select exit node..." \
199 | --height="${GUM_FILTER_HEIGHT_STAGE2:-15}" \
200 | --prompt="❯ " --indicator="◉" --selected-prefix="✅ " \
201 | --header="Stage 2: Node Selection" \
202 | --strict)
203 | gum_stage2_exit_code=$?
204 |
205 | if [ $gum_stage2_exit_code -ne 0 ]; then
206 | gum style --faint "Selection cancelled. Exiting."
207 | exit 0
208 | fi
209 |
210 | # --- Set Exit Node ---
211 | if [[ -z "$final_node_selection" ]]; then
212 | gum style --error "No node selected. Exiting."
213 | exit 1
214 | fi
215 |
216 | actual_hostname=""
217 | if [[ "$final_node_selection" =~ ^(.*)[[:space:]]\(([^()]+)\)$ ]]; then
218 | actual_hostname="${BASH_REMATCH[2]}"
219 | else
220 | actual_hostname="$final_node_selection"
221 | fi
222 |
223 | if [[ -z "$actual_hostname" ]]; then
224 | gum style --error "Could not extract a valid hostname from '$final_node_selection'. Exiting."
225 | exit 1
226 | fi
227 |
228 | gum spin --show-output --spinner dot --title "Setting Tailscale exit node to '$actual_hostname'..." -- tailscale set --exit-node="$actual_hostname"
229 | if [ $? -eq 0 ]; then
230 | gum style "✅ Tailscale exit node set to $(gum style --bold "$actual_hostname")."
231 | else
232 | gum style --error "❌ Failed to set exit node to $(gum style --bold "$actual_hostname")."
233 | fi
234 |
235 | exit 0
236 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Justfile:
--------------------------------------------------------------------------------
1 | export repo_organization := env("GITHUB_REPOSITORY_OWNER", "ublue-os")
2 | export image_name := env("IMAGE_NAME", "bluefin")
3 | export centos_version := env("CENTOS_VERSION", "stream10")
4 | export default_tag := env("DEFAULT_TAG", "lts")
5 | export bib_image := env("BIB_IMAGE", "quay.io/centos-bootc/bootc-image-builder:latest")
6 | export coreos_stable_version := env("COREOS_STABLE_VERSION", "42")
7 | export common_image := env("COMMON_IMAGE", "ghcr.io/projectbluefin/common:latest")
8 |
9 | alias build-vm := build-qcow2
10 | alias rebuild-vm := rebuild-qcow2
11 | alias run-vm := run-vm-qcow2
12 |
13 | [private]
14 | default:
15 | @just --list
16 |
17 | # Check Just Syntax
18 | [group('Just')]
19 | check:
20 | #!/usr/bin/env bash
21 | find . -type f -name "*.just" | while read -r file; do
22 | echo "Checking syntax: $file"
23 | just --unstable --fmt --check -f $file
24 | done
25 | echo "Checking syntax: Justfile"
26 | just --unstable --fmt --check -f Justfile
27 |
28 | # Fix Just Syntax
29 | [group('Just')]
30 | fix:
31 | #!/usr/bin/env bash
32 | find . -type f -name "*.just" | while read -r file; do
33 | echo "Checking syntax: $file"
34 | just --unstable --fmt -f $file
35 | done
36 | echo "Checking syntax: Justfile"
37 | just --unstable --fmt -f Justfile || { exit 1; }
38 |
39 | # Clean Repo
40 | [group('Utility')]
41 | clean:
42 | #!/usr/bin/env bash
43 | set -eoux pipefail
44 | touch _build
45 | find *_build* -exec rm -rf {} \;
46 | rm -f previous.manifest.json
47 | rm -f changelog.md
48 | rm -f output.env
49 |
50 | # Sudo Clean Repo
51 | [group('Utility')]
52 | [private]
53 | sudo-clean:
54 | just sudoif just clean
55 |
56 | # sudoif bash function
57 | [group('Utility')]
58 | [private]
59 | sudoif command *args:
60 | #!/usr/bin/env bash
61 | function sudoif(){
62 | if [[ "${UID}" -eq 0 ]]; then
63 | "$@"
64 | elif [[ "$(command -v sudo)" && -n "${SSH_ASKPASS:-}" ]] && [[ -n "${DISPLAY:-}" || -n "${WAYLAND_DISPLAY:-}" ]]; then
65 | /usr/bin/sudo --askpass "$@" || exit 1
66 | elif [[ "$(command -v sudo)" ]]; then
67 | /usr/bin/sudo "$@" || exit 1
68 | else
69 | exit 1
70 | fi
71 | }
72 | sudoif {{ command }} {{ args }}
73 |
74 | # This Justfile recipe builds a container image using Podman.
75 | #
76 | # Arguments:
77 | # $target_image - The tag you want to apply to the image (default: bluefin).
78 | # $tag - The tag for the image (default: lts).
79 | # $dx - Enable DX (default: "0").
80 | # $gdx - Enable GDX (default: "0").
81 | #
82 | # DX:
83 | # Developer Experience (DX) is a feature that allows you to install the latest developer tools for your system.
84 | # Packages include VScode, Docker, Distrobox, and more.
85 | # GDX: https://docs.projectbluefin.io/gdx/
86 | # GPU Developer Experience (GDX) creates a base as an AI and Graphics platform.
87 | # Installs Nvidia drivers, CUDA, and other tools.
88 | #
89 | # The script constructs the version string using the tag and the current date.
90 | # If the git working directory is clean, it also includes the short SHA of the current HEAD.
91 | #
92 | # just build $target_image $tag $dx $gdx $hwe
93 | #
94 | # Example usage:
95 | # just build bluefin lts 1 0 1
96 | #
97 | # This will build an image 'bluefin:lts' with DX and HWE enabled.
98 | #
99 |
100 | # Build the image using the specified parameters
101 | build $target_image=image_name $tag=default_tag $dx="0" $gdx="0" $hwe="0":
102 | #!/usr/bin/env bash
103 |
104 | # Get Version
105 | ver="${tag}-${centos_version}.$(date +%Y%m%d)"
106 |
107 | common_image_sha=$(yq -r '.images[] | select(.name == "common") | .digest' image-versions.yaml)
108 | common_image_ref="${common_image}@${common_image_sha}"
109 |
110 | BUILD_ARGS=()
111 | BUILD_ARGS+=("--build-arg" "COMMON_IMAGE_REF=${common_image_ref}")
112 | BUILD_ARGS+=("--build-arg" "COMMON_IMAGE=${common_image}")
113 | BUILD_ARGS+=("--build-arg" "COMMON_IMAGE_SHA=${common_image_sha}")
114 | BUILD_ARGS+=("--build-arg" "MAJOR_VERSION=${centos_version}")
115 | BUILD_ARGS+=("--build-arg" "IMAGE_NAME=${image_name}")
116 | BUILD_ARGS+=("--build-arg" "IMAGE_VENDOR=${repo_organization}")
117 | BUILD_ARGS+=("--build-arg" "ENABLE_DX=${dx}")
118 | BUILD_ARGS+=("--build-arg" "ENABLE_GDX=${gdx}")
119 | BUILD_ARGS+=("--build-arg" "ENABLE_HWE=${hwe}")
120 | # Select akmods source tag for mounted ZFS/NVIDIA images
121 | if [[ "${hwe}" -eq "1" ]]; then
122 | BUILD_ARGS+=("--build-arg" "AKMODS_VERSION=coreos-stable-${coreos_stable_version}")
123 | else
124 | BUILD_ARGS+=("--build-arg" "AKMODS_VERSION=centos-10")
125 | fi
126 | if [[ -z "$(git status -s)" ]]; then
127 | BUILD_ARGS+=("--build-arg" "SHA_HEAD_SHORT=$(git rev-parse --short HEAD)")
128 | fi
129 |
130 | echo "Building image ${target_image}:${tag} with args: ${BUILD_ARGS[*]}"
131 | podman build \
132 | "${BUILD_ARGS[@]}" \
133 | --pull=newer \
134 | --tag "${target_image}:${tag}" \
135 | .
136 |
137 | # Command: _rootful_load_image
138 | # Description: This script checks if the current user is root or running under sudo. If not, it attempts to resolve the image tag using podman inspect.
139 | # If the image is found, it loads it into rootful podman. If the image is not found, it pulls it from the repository.
140 | #
141 | # Parameters:
142 | # $target_image - The name of the target image to be loaded or pulled.
143 | # $tag - The tag of the target image to be loaded or pulled. Default is 'default_tag'.
144 | #
145 | # Example usage:
146 | # _rootful_load_image my_image latest
147 | #
148 | # Steps:
149 | # 1. Check if the script is already running as root or under sudo.
150 | # 2. Check if target image is in the non-root podman container storage)
151 | # 3. If the image is found, load it into rootful podman using podman scp.
152 | # 4. If the image is not found, pull it from the remote repository into reootful podman.
153 |
154 | rootful_load_image $target_image=image_name $tag=default_tag:
155 | #!/usr/bin/env bash
156 | set -eoux pipefail
157 |
158 | # Check if already running as root or under sudo
159 | if [[ -n "${SUDO_USER:-}" || "${UID}" -eq "0" ]]; then
160 | echo "Already root or running under sudo, no need to load image from user podman."
161 | exit 0
162 | fi
163 |
164 | # Try to resolve the image tag using podman inspect
165 | set +e
166 | resolved_tag=$(podman inspect -t image "${target_image}:${tag}" | jq -r '.[].RepoTags.[0]')
167 | return_code=$?
168 | set -e
169 |
170 | if [[ $return_code -eq 0 ]]; then
171 | # If the image is found, load it into rootful podman
172 | ID=$(just sudoif podman images --filter reference="${target_image}:${tag}" --format "'{{ '{{.ID}}' }}'")
173 | if [[ -z "$ID" ]]; then
174 | # If the image ID is not found, copy the image from user podman to root podman
175 | COPYTMP=$(mktemp -p "${PWD}" -d -t _build_podman_scp.XXXXXXXXXX)
176 | just sudoif TMPDIR=${COPYTMP} podman image scp ${UID}@localhost::"${target_image}:${tag}" root@localhost::"${target_image}:${tag}"
177 | rm -rf "${COPYTMP}"
178 | fi
179 | else
180 | # If the image is not found, pull it from the repository
181 | just sudoif podman pull "${target_image}:${tag}"
182 | fi
183 |
184 | # Build a bootc bootable image using Bootc Image Builder (BIB)
185 | # Converts a container image to a bootable image
186 | # Parameters:
187 | # target_image: The name of the image to build (ex. localhost/fedora)
188 | # tag: The tag of the image to build (ex. latest)
189 | # type: The type of image to build (ex. qcow2, raw, iso)
190 | # config: The configuration file to use for the build (default: image.toml)
191 |
192 | # Example: just _rebuild-bib localhost/fedora latest qcow2 image.toml
193 | _build-bib $target_image $tag $type $config:
194 | #!/usr/bin/env bash
195 | set -euo pipefail
196 |
197 | mkdir -p "output"
198 |
199 | echo "Cleaning up previous build"
200 | if [[ $type == iso ]]; then
201 | sudo rm -rf "output/bootiso" || true
202 | else
203 | sudo rm -rf "output/${type}" || true
204 | fi
205 |
206 | args="--type ${type} "
207 | args+="--use-librepo=True"
208 |
209 | if [[ $target_image == localhost/* ]]; then
210 | args+=" --local"
211 | fi
212 |
213 | just sudoif podman run \
214 | --rm \
215 | -it \
216 | --privileged \
217 | --pull=newer \
218 | --net=host \
219 | --security-opt label=type:unconfined_t \
220 | -v $(pwd)/${config}:/config.toml:ro \
221 | -v $(pwd)/output:/output \
222 | -v /var/lib/containers/storage:/var/lib/containers/storage \
223 | "${bib_image}" \
224 | ${args} \
225 | "${target_image}:${tag}"
226 |
227 | sudo chown -R $USER:$USER output
228 |
229 | # Podman build's the image from the Containerfile and creates a bootable image
230 | # Parameters:
231 | # target_image: The name of the image to build (ex. localhost/fedora)
232 | # tag: The tag of the image to build (ex. latest)
233 | # type: The type of image to build (ex. qcow2, raw, iso)
234 | # config: The configuration file to use for the build (deafult: image.toml)
235 |
236 | # Example: just _rebuild-bib localhost/fedora latest qcow2 image.toml
237 | _rebuild-bib $target_image $tag $type $config: (build target_image tag) && (_build-bib target_image tag type config)
238 |
239 | # Build a QCOW2 virtual machine image
240 | [group('Build Virtal Machine Image')]
241 | build-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "qcow2" "image.toml")
242 |
243 | # Build a RAW virtual machine image
244 | [group('Build Virtal Machine Image')]
245 | build-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "raw" "image.toml")
246 |
247 | # Build an ISO virtual machine image
248 | [group('Build Virtal Machine Image')]
249 | build-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "iso" "iso.toml")
250 |
251 | # Rebuild a QCOW2 virtual machine image
252 | [group('Build Virtal Machine Image')]
253 | rebuild-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "qcow2" "image.toml")
254 |
255 | # Rebuild a RAW virtual machine image
256 | [group('Build Virtal Machine Image')]
257 | rebuild-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "raw" "image.toml")
258 |
259 | # Rebuild an ISO virtual machine image
260 | [group('Build Virtal Machine Image')]
261 | rebuild-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "iso" "iso.toml")
262 |
263 | # Run a virtual machine with the specified image type and configuration
264 | _run-vm $target_image $tag $type $config:
265 | #!/usr/bin/env bash
266 | set -eoux pipefail
267 |
268 | # Determine the image file based on the type
269 | image_file="output/${type}/disk.${type}"
270 | if [[ $type == iso ]]; then
271 | image_file="output/bootiso/install.iso"
272 | fi
273 |
274 | # Build the image if it does not exist
275 | if [[ ! -f "${image_file}" ]]; then
276 | just "build-${type}" "$target_image" "$tag"
277 | fi
278 |
279 | # Determine an available port to use
280 | port=8006
281 | while grep -q :${port} <<< $(ss -tunalp); do
282 | port=$(( port + 1 ))
283 | done
284 | echo "Using Port: ${port}"
285 | echo "Connect to http://localhost:${port}"
286 |
287 | # Set up the arguments for running the VM
288 | run_args=()
289 | run_args+=(--rm --privileged)
290 | run_args+=(--pull=newer)
291 | run_args+=(--publish "127.0.0.1:${port}:8006")
292 | run_args+=(--env "CPU_CORES=4")
293 | run_args+=(--env "RAM_SIZE=4G")
294 | run_args+=(--env "DISK_SIZE=64G")
295 | run_args+=(--env "TPM=Y")
296 | run_args+=(--env "GPU=Y")
297 | run_args+=(--device=/dev/kvm)
298 | run_args+=(--volume "${PWD}/${image_file}":"/boot.${type}")
299 | run_args+=(docker.io/qemux/qemu)
300 |
301 | # Run the VM and open the browser to connect
302 | podman run "${run_args[@]}" &
303 | xdg-open http://localhost:${port}
304 | fg "%podman"
305 |
306 | # Run a virtual machine from a QCOW2 image
307 | [group('Run Virtal Machine')]
308 | run-vm-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "qcow2" "image.toml")
309 |
310 | # Run a virtual machine from a RAW image
311 | [group('Run Virtal Machine')]
312 | run-vm-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "raw" "image.toml")
313 |
314 | # Run a virtual machine from an ISO
315 | [group('Run Virtal Machine')]
316 | run-vm-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "iso" "iso.toml")
317 |
318 | # Run a virtual machine using systemd-vmspawn
319 | [group('Run Virtal Machine')]
320 | spawn-vm rebuild="0" type="qcow2" ram="6G":
321 | #!/usr/bin/env bash
322 |
323 | set -euo pipefail
324 |
325 | [ "{{ rebuild }}" -eq 1 ] && echo "Rebuilding the ISO" && just build-vm {{ rebuild }} {{ type }}
326 |
327 | systemd-vmspawn \
328 | -M "achillobator" \
329 | --console=gui \
330 | --cpus=2 \
331 | --ram=$(echo {{ ram }}| /usr/bin/numfmt --from=iec) \
332 | --network-user-mode \
333 | --vsock=false --pass-ssh-key=false \
334 | -i ./output/**/*.{{ type }}
335 |
336 | ##########################
337 | # 'customize-iso-build' #
338 | ##########################
339 | # Description:
340 | # Enables the manual customization of the osbuild manifest before running the ISO build
341 | #
342 | # Mount the configuration file and output directory
343 | # Clear the entrypoint to run the custom command
344 |
345 | # Run osbuild with the specified parameters
346 | customize-iso-build:
347 | sudo podman run \
348 | --rm -it \
349 | --privileged \
350 | --pull=newer \
351 | --net=host \
352 | --security-opt label=type:unconfined_t \
353 | -v $(pwd)/iso.toml \
354 | -v $(pwd)/output:/output \
355 | -v /var/lib/containers/storage:/var/lib/containers/storage \
356 | --entrypoint "" \
357 | "${bib_image}" \
358 | osbuild --store /store --output-directory /output /output/manifest-iso.json --export bootiso
359 |
360 | ##########################
361 | # 'patch-iso-branding' #
362 | ##########################
363 | # Description:
364 | # creates a custom branded ISO image. As per https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/anaconda_customization_guide/sect-iso-images#sect-product-img
365 | # Parameters:
366 | # override: A flag to determine if the final ISO should replace the original ISO (default is 0).
367 | # iso_path: The path to the original ISO file.
368 | # Runs a Podman container with Fedora image. Installs 'lorax' and 'mkksiso' tools inside the container. Creates a compressed 'product.img'
369 | # from the Brnading images in the 'iso_files' directory. Uses 'mkksiso' to add the 'product.img' to the original ISO and creates 'final.iso'
370 | # in the output directory. If 'override' is not 0, replaces the original ISO with the newly created 'final.iso'.
371 |
372 | # applies custom branding to an ISO image.
373 | patch-iso-branding override="0" iso_path="output/bootiso/install.iso":
374 | #!/usr/bin/env bash
375 | podman run \
376 | --rm \
377 | -it \
378 | --pull=newer \
379 | --privileged \
380 | -v ./output:/output \
381 | -v ./iso_files:/iso_files \
382 | quay.io/centos/centos:stream10 \
383 | bash -c 'dnf install -y lorax && \
384 | mkdir /images && cd /iso_files/product && find . | cpio -c -o | gzip -9cv > /images/product.img && cd / \
385 | && mkksiso --add images --volid bluefin-boot /{{ iso_path }} /output/final.iso'
386 |
387 | if [ {{ override }} -ne 0 ] ; then
388 | mv output/final.iso {{ iso_path }}
389 | fi
390 |
391 | # Runs shell check on all Bash scripts
392 | lint:
393 | /usr/bin/find . -iname "*.sh" -type f -exec shellcheck "{}" ';'
394 |
395 | # Runs shfmt on all Bash scripts
396 | format:
397 | /usr/bin/find . -iname "*.sh" -type f -exec shfmt --write "{}" ';'
398 |
--------------------------------------------------------------------------------
/.github/workflows/reusable-build-image.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Build Image
3 | on:
4 | workflow_call:
5 | inputs:
6 | image-name:
7 | description: "The name of the image to build"
8 | required: true
9 | type: string
10 | image-desc:
11 | description: "The description of the image to build"
12 | required: false
13 | type: string
14 | default: "Bluefin LTS, built on CentOS Stream with bootc"
15 | flavor:
16 | description: "The flavor of the image to build"
17 | required: false
18 | type: string
19 | default: ""
20 | platforms:
21 | description: "The platforms to build the image for"
22 | required: false
23 | type: string
24 | default: "amd64,arm64"
25 | centos-version:
26 | description: "The version of CentOS to build the image on"
27 | required: false
28 | type: string
29 | default: "stream10"
30 | rechunk:
31 | description: "Rechunk the build"
32 | required: false
33 | type: boolean
34 | default: true
35 | sbom:
36 | description: "Generate/publish SBOMs for the artifacts"
37 | required: false
38 | type: boolean
39 | default: true
40 | cleanup_runner:
41 | description: "Use the ublue cleanup action to clean up the runner before running the build"
42 | required: false
43 | type: boolean
44 | default: true
45 | hwe:
46 | description: "Enable experimental things during builds, such as gnome backport. Will build an extra `-hwe` tag."
47 | required: false
48 | type: boolean
49 | default: false
50 | publish:
51 | description: "Publish this image"
52 | required: false
53 | type: boolean
54 | # default: ${{ github.event_name != 'pull_request' }}
55 | default: true
56 | secrets:
57 | SIGNING_SECRET:
58 | description: "The private key used to sign the image"
59 | required: false
60 |
61 | env:
62 | IMAGE_NAME: ${{ inputs.image-name }}
63 | IMAGE_DESC: ${{ inputs.image-desc }}
64 | IMAGE_REGISTRY: "ghcr.io/${{ github.repository_owner }}"
65 | DEFAULT_TAG: "lts"
66 | CENTOS_VERSION: ${{ inputs.centos-version }}
67 | PLATFORMS: ${{ inputs.platforms }}
68 |
69 | jobs:
70 | generate_matrix:
71 | runs-on: ubuntu-latest
72 | outputs:
73 | matrix: ${{ steps.set-matrix.outputs.matrix }}
74 | steps:
75 | - name: Set matrix
76 | id: set-matrix
77 | env:
78 | PLATFORMS: "${{ inputs.platforms }}"
79 | ENABLE_HWE: "${{ inputs.hwe }}"
80 | run: |
81 | # turn the comma separated string into a list
82 | platforms=()
83 | IFS=',' read -r -a platforms <<< "${PLATFORMS}"
84 |
85 | MATRIX="{\"include\":[]}"
86 | for platform in "${platforms[@]}"; do
87 | MATRIX="$(echo "${MATRIX}" | jq ".include += [{\"platform\": \"${platform}\"}]")"
88 | done
89 | echo "matrix=$(echo "${MATRIX}" | jq -c '.')" >> $GITHUB_OUTPUT
90 |
91 | build_push:
92 | name: Build and push image
93 | runs-on: ${{ matrix.platform == 'amd64' && 'ubuntu-24.04' || 'ubuntu-24.04-arm' }}
94 | timeout-minutes: 60
95 | needs: generate_matrix
96 | strategy:
97 | fail-fast: false
98 | matrix: ${{fromJson(needs.generate_matrix.outputs.matrix)}}
99 | permissions:
100 | contents: read
101 | packages: write
102 | id-token: write
103 | outputs:
104 | image_tag: ${{ steps.build-image.outputs.image_tag }}
105 |
106 | steps:
107 | - name: Install dependencies
108 | if: matrix.platform == 'arm64'
109 | run: |
110 | sudo apt update -y
111 | sudo apt install -y \
112 | podman
113 |
114 | - name: Checkout
115 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6
116 |
117 | - name: Maximize build space
118 | if: ${{ matrix.platform != 'arm64' && inputs.cleanup_runner }}
119 | uses: ublue-os/container-storage-action@main
120 | with:
121 | target-dir: /var/lib/containers
122 |
123 | - name: Setup Just
124 | uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3
125 |
126 | - name: Check Just Syntax
127 | shell: bash
128 | run: just check
129 |
130 | - name: Build Image
131 | id: build-image
132 | shell: bash
133 | env:
134 | FLAVOR: ${{ inputs.flavor }}
135 | HWE: ${{ inputs.hwe }}
136 | run: |
137 | set -x
138 | just=$(which just)
139 |
140 | ENABLE_HWE=0
141 | ENABLE_GDX=0
142 | ENABLE_DX=0
143 | if [[ "${HWE}" == "true" ]] ; then
144 | export DEFAULT_TAG="${DEFAULT_TAG}-hwe"
145 | echo "DEFAULT_TAG=${DEFAULT_TAG}" >> "${GITHUB_ENV}"
146 | ENABLE_HWE=1
147 | fi
148 | if [[ "${FLAVOR}" =~ "gdx" ]] ; then
149 | ENABLE_GDX=1
150 | fi
151 | if [[ "${FLAVOR}" =~ "dx" ]] ; then
152 | ENABLE_DX=1
153 | fi
154 |
155 | sudo $just build "${IMAGE_NAME}" "${DEFAULT_TAG}" "${ENABLE_DX}" "${ENABLE_GDX}" "${ENABLE_HWE}"
156 | echo "image_tag=${DEFAULT_TAG}" >> "${GITHUB_OUTPUT}"
157 |
158 | - name: Setup Syft
159 | id: setup-syft
160 | if: ${{ inputs.sbom && inputs.publish }}
161 | uses: anchore/sbom-action/download-syft@a930d0ac434e3182448fe678398ba5713717112a # v0
162 |
163 | - name: Generate SBOM
164 | id: generate-sbom
165 | if: ${{ inputs.sbom && inputs.publish }}
166 | env:
167 | IMAGE: ${{ env.IMAGE_NAME }}
168 | DEFAULT_TAG: ${{ env.DEFAULT_TAG }}
169 | SYFT_CMD: ${{ steps.setup-syft.outputs.cmd }}
170 | run: |
171 | sudo systemctl start podman.socket
172 | OUTPUT_PATH="$(mktemp -d)/sbom.json"
173 | export SYFT_PARALLELISM=$(($(nproc)*2))
174 | sudo "$SYFT_CMD" "${IMAGE}:${DEFAULT_TAG}" -o "spdx-json=${OUTPUT_PATH}"
175 | echo "OUTPUT_PATH=${OUTPUT_PATH}" >> "${GITHUB_OUTPUT}"
176 |
177 | - name: Run Rechunker
178 | if: ${{ inputs.rechunk && inputs.publish }}
179 | id: rechunk
180 | uses: hhd-dev/rechunk@5fbe1d3a639615d2548d83bc888360de6267b1a2 # v1.2.4
181 | with:
182 | rechunk: ghcr.io/hhd-dev/rechunk:v1.2.1
183 | ref: localhost/${{ env.IMAGE_NAME }}:${{ env.DEFAULT_TAG }}
184 | prev-ref: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.DEFAULT_TAG }}
185 | skip_compression: true
186 | version: ${{ env.CENTOS_VERSION }}
187 |
188 | - name: Load Image
189 | if: ${{ inputs.publish }}
190 | id: load
191 | env:
192 | RECHUNK_RAN: ${{ inputs.rechunk }}
193 | RECHUNK_REF: ${{ steps.rechunk.outputs.ref }}
194 | RECHUNK_LOCATION: ${{ steps.rechunk.outputs.location }}
195 | run: |
196 | if [ "${RECHUNK_RAN}" == "true" ] ; then
197 | IMAGE="$(podman pull "${RECHUNK_REF}")"
198 | sudo rm -rf "${RECHUNK_LOCATION}"
199 | else
200 | IMAGE="localhost/${IMAGE_NAME}:${DEFAULT_TAG}"
201 | fi
202 | podman image tag "${IMAGE}" "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}"
203 |
204 | IMAGE="${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}"
205 | IMAGE_DIGEST="$(podman image inspect --format '{{.Digest}}' "${IMAGE}")"
206 | echo "image=${IMAGE}" >> "${GITHUB_OUTPUT}"
207 | echo "digest=${IMAGE_DIGEST}" >> "${GITHUB_OUTPUT}"
208 |
209 | - name: Login to GitHub Container Registry
210 | if: ${{ inputs.publish }}
211 | env:
212 | REGISTRY: ghcr.io
213 | run: |
214 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}"
215 | echo "${{ secrets.GITHUB_TOKEN }}" | docker login -u "${{ github.actor }}" --password-stdin "${REGISTRY}"
216 |
217 | - name: Push to GHCR
218 | if: ${{ inputs.publish }}
219 | id: push
220 | env:
221 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }}
222 | IMAGE_NAME: ${{ env.IMAGE_NAME }}
223 | IMAGE_DIGEST: ${{ steps.load.outputs.digest }}
224 | PLATFORM: ${{ matrix.platform }}
225 | HWE: ${{ inputs.hwe }}
226 | MAX_RETRIES: 3
227 | run: |
228 | set -x
229 | podman tag "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}" "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}-${PLATFORM}"
230 | for i in $(seq "${MAX_RETRIES}"); do
231 | podman push --digestfile=/tmp/digestfile "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}-${PLATFORM}" && break || sleep $((5 * i));
232 | done
233 | REMOTE_IMAGE_DIGEST=$(cat /tmp/digestfile)
234 | echo "remote_image_digest=${REMOTE_IMAGE_DIGEST}" >> $GITHUB_OUTPUT
235 |
236 | - name: Install Cosign
237 | uses: sigstore/cosign-installer@7e8b541eb2e61bf99390e1afd4be13a184e9ebc5 # v3.10.1
238 | if: ${{ inputs.publish }}
239 |
240 | - name: Sign Image
241 | if: ${{ inputs.publish }}
242 | run: |
243 | IMAGE_FULL="${IMAGE_REGISTRY}/${IMAGE_NAME}"
244 | cosign sign -y --key env://COSIGN_PRIVATE_KEY ${IMAGE_FULL}@${{ steps.push.outputs.remote_image_digest }}
245 | env:
246 | TAGS: ${{ steps.push.outputs.digest }}
247 | COSIGN_EXPERIMENTAL: false
248 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }}
249 |
250 | - name: Add SBOM Attestation
251 | if: ${{ inputs.sbom }}
252 | env:
253 | IMAGE: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_NAME }}
254 | DIGEST: ${{ steps.push.outputs.remote_image_digest }}
255 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }}
256 | SBOM_OUTPUT: ${{ steps.generate-sbom.outputs.OUTPUT_PATH }}
257 | run: |
258 | cd "$(dirname "$SBOM_OUTPUT")"
259 |
260 | # Compress the SBOM and create the predicate
261 | TYPE="urn:ublue-os:attestation:spdx+json+zstd:v1"
262 | zstd -19 "./sbom.json" -o "./sbom.json.zst"
263 | BASE64_SBOM_FILE="payload.b64"
264 | base64 "./sbom.json.zst" | tr -d '\n' > "${BASE64_SBOM_FILE}"
265 | PREDICATE_FILE="payload.json"
266 | jq -n \
267 | --arg compression "zstd" \
268 | --arg mediaType "application/spdx+json" \
269 | --rawfile payload "${BASE64_SBOM_FILE}" \
270 | '{compression: $compression, mediaType: $mediaType, payload: $payload}' \
271 | > "$PREDICATE_FILE"
272 | rm -f "${BASE64_SBOM_FILE}"
273 |
274 | # Create the attestation
275 | cosign attest -y \
276 | --predicate "${PREDICATE_FILE}" \
277 | --type $TYPE \
278 | --key env://COSIGN_PRIVATE_KEY \
279 | "${IMAGE}@${DIGEST}"
280 |
281 | - name: Create Job Outputs
282 | if: ${{ inputs.publish }}
283 | env:
284 | IMAGE_NAME: ${{ env.IMAGE_NAME }}
285 | PLATFORM: ${{ matrix.platform }}
286 | HWE: ${{ inputs.hwe }}
287 | DIGEST: ${{ steps.push.outputs.remote_image_digest }}
288 | run: |
289 | mkdir -p /tmp/outputs/digests
290 | echo "${DIGEST}" > "/tmp/outputs/digests/${IMAGE_NAME}-${HWE}-${PLATFORM}.txt"
291 |
292 | - name: Upload Output Artifacts
293 | if: ${{ inputs.publish }}
294 | uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6
295 | with:
296 | name: ${{ env.IMAGE_NAME }}-${{ matrix.platform }}
297 | retention-days: 1
298 | if-no-files-found: error
299 | path: |
300 | /tmp/outputs/digests/*.txt
301 |
302 | manifest:
303 | name: Create ${{ inputs.image-name }}:${{ inputs.centos-version }} Manifest
304 | runs-on: ubuntu-latest
305 | if: always()
306 | needs:
307 | - generate_matrix
308 | - build_push
309 | container:
310 | image: cgr.dev/chainguard/wolfi-base:latest
311 | options: --privileged --security-opt seccomp=unconfined
312 | permissions:
313 | contents: read
314 | packages: write
315 | id-token: write
316 | outputs:
317 | image: ${{ steps.push_manifest.outputs.IMAGE }}
318 | digest: ${{ steps.push_manifest.outputs.DIGEST }}
319 | steps:
320 | - name: Install dependencies
321 | run: |
322 | apk add jq git podman uutils bash conmon crun netavark fuse-overlayfs libstdc++
323 | ln -sf /bin/bash /bin/sh
324 | mkdir -p /etc/containers
325 | echo '{"default":[{"type":"insecureAcceptAnything"}]}' | jq . > /etc/containers/policy.json
326 |
327 | - name: Exit on failure
328 | env:
329 | JOBS: ${{ toJson(needs) }}
330 | run: |
331 | echo "Job status:"
332 | echo $JOBS | jq -r 'to_entries[] | " - \(.key): \(.value.result)"'
333 |
334 | for i in $(echo "${JOBS}" | jq -r 'to_entries[] | .value.result'); do
335 | if [ "$i" != "success" ] && [ "$i" != "skipped" ]; then
336 | echo ""
337 | echo "Status check not okay!"
338 | exit 1
339 | fi
340 | done
341 |
342 | - name: Get current date
343 | id: date
344 | run: |
345 | # Should generate a timestamp like what is defined on the ArtifactHub documentation
346 | # E.G: 2022-02-08T15:38:15Z'
347 | # https://artifacthub.io/docs/topics/repositories/container-images/
348 | # https://linux.die.net/man/1/date
349 | echo "date=$(date -u +%Y\-%m\-%d\T%H\:%M\:%S\Z)" >> $GITHUB_OUTPUT
350 |
351 | - name: Extract numbers from input
352 | id: extract-numbers
353 | env:
354 | CENTOS_VERSION: ${{ env.CENTOS_VERSION }}
355 | run: |
356 | numbers_only=$(echo "${CENTOS_VERSION}" | tr -cd '0-9')
357 | echo "CENTOS_VERSION_NUMBER=${numbers_only}" >> "${GITHUB_ENV}"
358 |
359 | - name: Image Metadata
360 | uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5
361 | id: metadata
362 | with:
363 | tags: |
364 | type=raw,value=${{ env.DEFAULT_TAG }}
365 | type=raw,value=${{ env.DEFAULT_TAG }}.{{date 'YYYYMMDD'}}
366 | type=raw,value=${{ env.CENTOS_VERSION }}
367 | type=raw,value=${{ env.CENTOS_VERSION }}.{{date 'YYYYMMDD'}}
368 | type=raw,value=${{ env.CENTOS_VERSION_NUMBER }}
369 | type=raw,value=${{ env.CENTOS_VERSION_NUMBER }}.{{date 'YYYYMMDD'}}
370 | type=ref,event=pr
371 | flavor: |
372 | ${{ inputs.hwe && 'suffix=-hwe' || '' }}
373 | labels: |
374 | bluefin.commit=${{ github.sha }}
375 | io.artifacthub.package.readme-url=https://raw.githubusercontent.com/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}/refs/heads/main/README.md
376 | org.opencontainers.image.created=${{ steps.date.outputs.date }}
377 | org.opencontainers.image.description=${{ env.IMAGE_DESC }}
378 | org.opencontainers.image.documentation=https://docs.projectbluefin.io
379 | org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}/blob/main/Containerfile
380 | org.opencontainers.image.title=${{ env.IMAGE_NAME }}
381 | org.opencontainers.image.url=https://projectbluefin.io
382 | org.opencontainers.image.vendor=${{ github.repository_owner }}
383 | org.opencontainers.image.version=${{ env.CENTOS_VERSION }}
384 | io.artifacthub.package.deprecated=false
385 | io.artifacthub.package.keywords=bootc,centos,bluefin,ublue,universal-blue
386 | io.artifacthub.package.license=Apache-2.0
387 | io.artifacthub.package.logo-url=https://avatars.githubusercontent.com/u/120078124?s=200&v=4
388 | io.artifacthub.package.maintainers=[{\"name\":\"tulilirockz\",\"email\":\"tulilirockz@outlook.com\"},{\"name\":\"castrojo\",\"email\":\"jorge.castro@gmail.com\"}]
389 | io.artifacthub.package.prerelease=true
390 | containers.bootc=1
391 |
392 | - name: Fetch Build Outputs
393 | if: ${{ inputs.publish }}
394 | uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7
395 | with:
396 | pattern: ${{ env.IMAGE_NAME }}-*
397 | merge-multiple: true
398 | path: /tmp/artifacts
399 |
400 | - name: Load Outputs
401 | if: ${{ inputs.publish }}
402 | id: load-outputs
403 | run: |
404 | DIGESTS_JSON="$(jq -n '{}')"
405 | for digest_file in /tmp/artifacts/*.txt; do
406 | # Extract the platform from the file name
407 | PLATFORM="$(basename "${digest_file}" | rev | cut -d'-' -f1 | rev | cut -d'.' -f1)"
408 | DIGEST="$(cat "${digest_file}")"
409 | # Add the platform and digest to the JSON object
410 | DIGESTS_JSON="$(echo "${DIGESTS_JSON}" | jq --arg key "${PLATFORM}" --arg value "${DIGEST}" '. + {($key): $value}')"
411 | done
412 | echo "DIGESTS_JSON=$(echo "$DIGESTS_JSON" | jq -c '.')" >> "${GITHUB_OUTPUT}"
413 |
414 | - name: Create Manifest
415 | if: ${{ inputs.publish }}
416 | id: create-manifest
417 | env:
418 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }}
419 | IMAGE_NAME: ${{ env.IMAGE_NAME }}
420 | run: |
421 | podman manifest create ${IMAGE_REGISTRY}/${IMAGE_NAME}
422 | echo "MANIFEST=${IMAGE_REGISTRY}/${IMAGE_NAME}" >> $GITHUB_OUTPUT
423 |
424 | - name: Populate Manifest
425 | if: ${{ inputs.publish }}
426 | env:
427 | MANIFEST: ${{ steps.create-manifest.outputs.MANIFEST }}
428 | DIGESTS_JSON: ${{ steps.load-outputs.outputs.DIGESTS_JSON }}
429 | LABELS: ${{ steps.metadata.outputs.labels }}
430 | PLATFORMS: "${{ inputs.platforms }}"
431 | run: |
432 | DIGESTS=$(echo "$DIGESTS_JSON" | jq -c '.')
433 | # turn the comma separated string into a list
434 | platforms=()
435 | IFS=',' read -r -a platforms <<< "${PLATFORMS}"
436 | for platform in ${platforms[@]}; do
437 | digest="$(echo $DIGESTS | jq -r ".$platform")"
438 | echo "Adding ${IMAGE_REGISTRY}/${IMAGE_NAME}@${digest} for ${platform}"
439 | podman manifest add "${MANIFEST}" "${IMAGE_REGISTRY}/${IMAGE_NAME}@${digest}" --arch "${platform}"
440 | done
441 |
442 | # Apply the labels to the manifest (separated by newlines)
443 | while IFS= read -r label; do
444 | echo "Applying label ${label} to manifest"
445 | podman manifest annotate --index --annotation "$label" "${MANIFEST}"
446 | done <<< "${LABELS}"
447 |
448 | - name: Login to GitHub Container Registry
449 | if: ${{ inputs.publish }}
450 | env:
451 | REGISTRY: ghcr.io
452 | run: |
453 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}"
454 |
455 | - name: Push Manifest
456 | if: github.event_name != 'pull_request'
457 | id: push_manifest
458 | env:
459 | MANIFEST: ${{ steps.create-manifest.outputs.MANIFEST }}
460 | TAGS: ${{ steps.metadata.outputs.tags }}
461 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }}
462 | IMAGE_NAME: ${{ env.IMAGE_NAME }}
463 | run: |
464 | while IFS= read -r tag; do
465 | podman manifest push --all=false --digestfile=/tmp/digestfile $MANIFEST $IMAGE_REGISTRY/$IMAGE_NAME:$tag
466 | done <<< "$TAGS"
467 |
468 | DIGEST=$(cat /tmp/digestfile)
469 | echo "DIGEST=$DIGEST" >> $GITHUB_OUTPUT
470 | echo "IMAGE=$IMAGE_REGISTRY/$IMAGE_NAME" >> $GITHUB_OUTPUT
471 |
472 | # Cosign throws errors when ran inside the Fedora container for one reason or another
473 | # so we move this to another step in order to run on Ubuntu
474 | sign:
475 | needs: manifest
476 | if: github.event_name != 'pull_request'
477 | runs-on: ubuntu-latest
478 | permissions:
479 | contents: read
480 | packages: write
481 | id-token: write
482 | steps:
483 | - name: Login to GitHub Container Registry
484 | if: ${{ inputs.publish }}
485 | env:
486 | REGISTRY: ghcr.io
487 | run: |
488 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}"
489 | cat ${XDG_RUNTIME_DIR}/containers/auth.json > ~/.docker/config.json
490 |
491 | - name: Install Cosign
492 | uses: sigstore/cosign-installer@7e8b541eb2e61bf99390e1afd4be13a184e9ebc5 # v3.10.1
493 |
494 | - name: Sign Manifest
495 | env:
496 | DIGEST: ${{ needs.manifest.outputs.digest }}
497 | IMAGE: ${{ needs.manifest.outputs.image }}
498 | COSIGN_EXPERIMENTAL: false
499 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }}
500 | run: |
501 | cosign sign -y --key env://COSIGN_PRIVATE_KEY "${IMAGE}@${DIGEST}"
502 |
--------------------------------------------------------------------------------
/.github/changelogs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | Changelog generation script for Bluefin LTS container images.
4 |
5 | This script generates changelogs by comparing container image manifests
6 | and extracting package differences between versions.
7 | """
8 |
9 | import argparse
10 | import json
11 | import logging
12 | import os
13 | import re
14 | import subprocess
15 | import sys
16 | import time
17 | import yaml
18 | from collections import defaultdict
19 | from dataclasses import dataclass
20 | from pathlib import Path
21 | from typing import Any, Dict, List, Optional, Set, Tuple
22 |
23 | # Configure logging
24 | logging.basicConfig(
25 | level=logging.INFO,
26 | format='%(asctime)s - %(levelname)s - %(message)s',
27 | datefmt='%Y-%m-%d %H:%M:%S'
28 | )
29 | logger = logging.getLogger(__name__)
30 |
31 |
32 | @dataclass
33 | class Config:
34 | """Configuration loaded from YAML file"""
35 | os_name: str
36 | targets: List[str]
37 | registry_url: str
38 | package_blacklist: List[str]
39 | image_variants: List[str]
40 | patterns: Dict[str, str]
41 | templates: Dict[str, str]
42 | sections: Dict[str, str]
43 | defaults: Dict[str, any]
44 |
45 |
46 | def load_config(config_path: str = ".github/changelog_config.yaml") -> Config:
47 | """Load configuration from YAML file"""
48 | try:
49 | with open(config_path, 'r') as f:
50 | data = yaml.safe_load(f)
51 | return Config(**data)
52 | except FileNotFoundError:
53 | logging.error(f"Configuration file not found: {config_path}")
54 | sys.exit(1)
55 | except yaml.YAMLError as e:
56 | logging.error(f"Error parsing YAML configuration: {e}")
57 | sys.exit(1)
58 | except Exception as e:
59 | logging.error(f"Error loading configuration: {e}")
60 | sys.exit(1)
61 |
62 |
63 | class ChangelogError(Exception):
64 | """Custom exception for changelog generation errors."""
65 | pass
66 |
67 |
68 | class ManifestFetchError(ChangelogError):
69 | """Exception raised when manifest fetching fails."""
70 | pass
71 |
72 |
73 | class TagDiscoveryError(ChangelogError):
74 | """Exception raised when tag discovery fails."""
75 | pass
76 |
77 | # Compiled regex patterns for better performance will be loaded from config
78 |
79 | # Templates and patterns will be loaded from config
80 |
81 | class GitHubReleaseError(ChangelogError):
82 | """Error related to GitHub release operations."""
83 | pass
84 |
85 |
86 | def check_github_release_exists(tag: str) -> bool:
87 | """Check if a GitHub release already exists for the given tag."""
88 | try:
89 | result = subprocess.run(
90 | ["gh", "release", "view", tag],
91 | capture_output=True,
92 | text=True,
93 | timeout=30
94 | )
95 | return result.returncode == 0
96 | except (subprocess.TimeoutExpired, FileNotFoundError):
97 | logger.warning("GitHub CLI not available or timeout - skipping release check")
98 | return False
99 |
100 |
101 | def get_last_published_release_tag() -> Optional[str]:
102 | """Get the tag of the last published GitHub release."""
103 | try:
104 | result = subprocess.run(
105 | ["gh", "release", "list", "--limit", "1", "--json", "tagName", "--jq", ".[0].tagName"],
106 | capture_output=True,
107 | text=True,
108 | timeout=30
109 | )
110 | if result.returncode == 0 and result.stdout.strip():
111 | return result.stdout.strip()
112 | return None
113 | except (subprocess.TimeoutExpired, FileNotFoundError):
114 | logger.warning("GitHub CLI not available or timeout - cannot get last published release")
115 | return None
116 |
117 |
118 | def write_github_output(output_file: str, variables: Dict[str, str]) -> None:
119 | """Write variables to GitHub Actions output file."""
120 | try:
121 | with open(output_file, 'a', encoding='utf-8') as f:
122 | for key, value in variables.items():
123 | f.write(f"{key}={value}\n")
124 | logger.info(f"Written {len(variables)} variables to GitHub output: {output_file}")
125 | except Exception as e:
126 | logger.error(f"Failed to write GitHub output: {e}")
127 |
128 |
129 | class ChangelogGenerator:
130 | """Main class for generating changelogs from container manifests."""
131 |
132 | def __init__(self, config: Optional[Config] = None):
133 | """Initialize the changelog generator with configuration."""
134 | self.config = config or load_config()
135 | self._manifest_cache: Dict[str, Dict[str, Any]] = {}
136 |
137 | # Compile regex patterns from config
138 | self.centos_pattern = re.compile(self.config.patterns["centos"])
139 | self.start_patterns = {
140 | target: re.compile(self.config.patterns["start_pattern"].format(target=target))
141 | for target in self.config.targets
142 | }
143 |
144 | def get_images(self, target: str) -> List[Tuple[str, str]]:
145 | """Generate image names and experiences for a given target."""
146 | images = []
147 | base_name = "bluefin" # Base image name is always "bluefin"
148 |
149 | for experience in self.config.image_variants:
150 | img = base_name
151 |
152 | if "-hwe" in target:
153 | images.append((img, target))
154 | break
155 |
156 | # Add experience suffix if it's not empty
157 | if experience: # experience is like "", "-dx", "-gdx"
158 | img += experience
159 |
160 | images.append((img, target)) # Use target instead of experience
161 | return images
162 |
163 | def _run_skopeo_command(self, image_url: str) -> Optional[bytes]:
164 | """Run skopeo inspect command with retries."""
165 | for attempt in range(self.config.defaults["retries"]):
166 | try:
167 | result = subprocess.run(
168 | ["skopeo", "inspect", image_url],
169 | check=True,
170 | stdout=subprocess.PIPE,
171 | stderr=subprocess.PIPE,
172 | timeout=self.config.defaults["timeout_seconds"]
173 | )
174 | return result.stdout
175 | except subprocess.CalledProcessError as e:
176 | logger.warning(f"Failed to get {image_url} (exit code {e.returncode}), "
177 | f"retrying in {self.config.defaults['retry_wait']} seconds "
178 | f"({attempt + 1}/{self.config.defaults['retries']})")
179 | if e.stderr:
180 | logger.error(f"Error: {e.stderr.decode().strip()}")
181 | except subprocess.TimeoutExpired:
182 | logger.warning(f"Timeout getting {image_url}, "
183 | f"retrying in {self.config.defaults['retry_wait']} seconds "
184 | f"({attempt + 1}/{self.config.defaults['retries']})")
185 | except Exception as e:
186 | logger.warning(f"Unexpected error getting {image_url}: {e}, "
187 | f"retrying in {self.config.defaults['retry_wait']} seconds "
188 | f"({attempt + 1}/{self.config.defaults['retries']})")
189 |
190 | if attempt < self.config.defaults["retries"] - 1:
191 | time.sleep(self.config.defaults["retry_wait"])
192 |
193 | return None
194 |
195 | def get_manifests(self, target: str) -> Dict[str, Any]:
196 | """Fetch container manifests for all image variants."""
197 | # Check cache first
198 | if target in self._manifest_cache:
199 | logger.info(f"Using cached manifest for {target}")
200 | return self._manifest_cache[target]
201 |
202 | manifests = {}
203 | images = self.get_images(target)
204 |
205 | logger.info(f"Fetching manifests for {len(images)} images with target '{target}'")
206 | for i, (img, _) in enumerate(images, 1):
207 | logger.info(f"Getting {img}:{target} manifest ({i}/{len(images)})")
208 | image_url = f"docker://{self.config.registry_url}/{img}:{target}"
209 |
210 | output = self._run_skopeo_command(image_url)
211 | if output is None:
212 | logger.error(f"Failed to get {img}:{target} after {self.config.defaults['retries']} attempts")
213 | continue
214 |
215 | try:
216 | manifests[img] = json.loads(output)
217 | except json.JSONDecodeError as e:
218 | logger.error(f"Failed to parse JSON for {img}:{target}: {e}")
219 | continue
220 |
221 | if not manifests:
222 | raise ManifestFetchError(f"Failed to fetch any manifests for target '{target}'")
223 |
224 | # Cache the result
225 | self._manifest_cache[target] = manifests
226 | return manifests
227 |
228 |
229 | def get_tags(self, target: str, manifests: Dict[str, Any], previous_tag: Optional[str] = None) -> Tuple[str, str]:
230 | """Extract previous and current tags from manifests."""
231 | if not manifests:
232 | raise TagDiscoveryError("No manifests provided for tag discovery")
233 |
234 | # Find the current tag from manifests
235 | tags = set()
236 | first_manifest = next(iter(manifests.values()))
237 |
238 | for tag in first_manifest["RepoTags"]:
239 | # Tags ending with .0 should not exist
240 | if tag.endswith(".0"):
241 | continue
242 | if re.match(self.start_patterns[target], tag):
243 | tags.add(tag)
244 |
245 | # Filter tags that exist in all manifests
246 | for manifest in manifests.values():
247 | tags = {tag for tag in tags if tag in manifest["RepoTags"]}
248 |
249 | sorted_tags = sorted(tags)
250 | if len(sorted_tags) < 1:
251 | raise TagDiscoveryError(
252 | f"No tags found for target '{target}'. "
253 | f"Available tags: {sorted_tags}"
254 | )
255 |
256 | current_tag = sorted_tags[-1] # Latest tag
257 |
258 | # Use provided previous_tag or fall back to automatic detection
259 | if previous_tag:
260 | logger.info(f"Using provided previous tag: {previous_tag}")
261 | prev_tag = previous_tag
262 | else:
263 | if len(sorted_tags) < 2:
264 | raise TagDiscoveryError(
265 | f"Insufficient tags found for target '{target}' and no previous tag provided. "
266 | f"Found {len(sorted_tags)} tags, need at least 2 or explicit previous tag. "
267 | f"Available tags: {sorted_tags}"
268 | )
269 | prev_tag = sorted_tags[-2] # Second latest tag
270 | logger.info(f"Auto-detected previous tag: {prev_tag}")
271 |
272 | logger.info(f"Found {len(sorted_tags)} tags for target '{target}'")
273 | logger.info(f"Comparing {prev_tag} -> {current_tag}")
274 | return prev_tag, current_tag
275 |
276 | def get_packages(self, manifests: Dict[str, Any]) -> Dict[str, Dict[str, str]]:
277 | """Extract package information from manifests."""
278 | packages = {}
279 | for img, manifest in manifests.items():
280 | try:
281 | rechunk_info = manifest["Labels"].get("dev.hhd.rechunk.info")
282 | if not rechunk_info:
283 | logger.warning(f"No rechunk info found for {img}")
284 | continue
285 |
286 | packages[img] = json.loads(rechunk_info)["packages"]
287 | logger.debug(f"Extracted {len(packages[img])} packages for {img}")
288 | except (KeyError, json.JSONDecodeError, TypeError) as e:
289 | logger.error(f"Failed to get packages for {img}: {e}")
290 | return packages
291 |
292 |
293 | def get_package_groups(self, target: str, prev: Dict[str, Any],
294 | manifests: Dict[str, Any]) -> Tuple[List[str], Dict[str, List[str]]]:
295 | """Categorize packages into common and variant-specific groups."""
296 | common = set()
297 | others = {k: set() for k in self.config.sections.keys()}
298 |
299 | npkg = self.get_packages(manifests)
300 | ppkg = self.get_packages(prev)
301 |
302 | keys = set(npkg.keys()) | set(ppkg.keys())
303 | pkg = defaultdict(set)
304 | for k in keys:
305 | pkg[k] = set(npkg.get(k, {})) | set(ppkg.get(k, {}))
306 |
307 | # Find common packages
308 | first = True
309 | for img, experience in self.get_images(target):
310 | if img not in pkg:
311 | continue
312 |
313 | if first:
314 | common.update(pkg[img])
315 | else:
316 | common.intersection_update(pkg[img])
317 | first = False
318 |
319 | # Find other packages
320 | for t, other in others.items():
321 | first = True
322 | for img, experience in self.get_images(target):
323 | if img not in pkg:
324 | continue
325 |
326 | if t == "base" and experience != "base":
327 | continue
328 | if t == "dx" and experience != "dx":
329 | continue
330 |
331 | if first:
332 | other.update(p for p in pkg[img] if p not in common)
333 | else:
334 | other.intersection_update(pkg[img])
335 | first = False
336 |
337 | return sorted(common), {k: sorted(v) for k, v in others.items()}
338 |
339 | def get_versions(self, manifests: Dict[str, Any]) -> Dict[str, str]:
340 | """Extract package versions from manifests."""
341 | versions = {}
342 | pkgs = self.get_packages(manifests)
343 | for img_pkgs in pkgs.values():
344 | for pkg, version in img_pkgs.items():
345 | versions[pkg] = re.sub(self.centos_pattern, "", version)
346 | return versions
347 |
348 |
349 | def calculate_changes(self, pkgs: List[str], prev: Dict[str, str],
350 | curr: Dict[str, str]) -> str:
351 | """Calculate package changes between versions."""
352 | added = []
353 | changed = []
354 | removed = []
355 |
356 | blacklist_ver = {curr.get(v) for v in self.config.package_blacklist if curr.get(v)}
357 |
358 | for pkg in pkgs:
359 | # Clean up changelog by removing mentioned packages
360 | if pkg in self.config.package_blacklist:
361 | continue
362 | if pkg in curr and curr.get(pkg) in blacklist_ver:
363 | continue
364 | if pkg in prev and prev.get(pkg) in blacklist_ver:
365 | continue
366 |
367 | if pkg not in prev:
368 | added.append(pkg)
369 | elif pkg not in curr:
370 | removed.append(pkg)
371 | elif prev[pkg] != curr[pkg]:
372 | changed.append(pkg)
373 |
374 | # Add current versions to blacklist
375 | if pkg in curr:
376 | blacklist_ver.add(curr[pkg])
377 | if pkg in prev:
378 | blacklist_ver.add(prev[pkg])
379 |
380 | logger.info(f"Package changes: {len(added)} added, {len(changed)} changed, {len(removed)} removed")
381 |
382 | output = ""
383 | for pkg in added:
384 | output += self.config.templates["pattern_add"].format(name=pkg, version=curr[pkg])
385 | for pkg in changed:
386 | output += self.config.templates["pattern_change"].format(name=pkg, prev=prev[pkg], new=curr[pkg])
387 | for pkg in removed:
388 | output += self.config.templates["pattern_remove"].format(name=pkg, version=prev[pkg])
389 |
390 | return output
391 |
392 | def get_commits(self, prev_manifests: Dict[str, Any],
393 | manifests: Dict[str, Any], target: str, workdir: Optional[str] = None) -> str:
394 | """Extract commit information between versions."""
395 | # Check if commits are enabled in configuration
396 | if not self.config.defaults.get("enable_commits", False):
397 | logger.debug("Commit extraction disabled in configuration")
398 | return ""
399 |
400 | if not workdir:
401 | logger.warning("No workdir provided, skipping commit extraction")
402 | return ""
403 |
404 | try:
405 | # Get commit hashes from container manifests
406 | start = self._get_commit_hash(prev_manifests)
407 | finish = self._get_commit_hash(manifests)
408 |
409 | if not start or not finish:
410 | logger.warning("Missing commit hashes, skipping commit extraction")
411 | return ""
412 |
413 | if start == finish:
414 | logger.info("Same commit hash for both versions, no commits to show")
415 | return ""
416 |
417 | logger.info(f"Extracting commits from {start[:7]} to {finish[:7]}")
418 |
419 | # Use git log with commit hashes from container manifests
420 | commits = subprocess.run(
421 | ["git", "-C", workdir, "log", "--pretty=format:%H %h %s",
422 | f"{start}..{finish}"],
423 | check=True,
424 | stdout=subprocess.PIPE,
425 | stderr=subprocess.PIPE,
426 | timeout=30
427 | ).stdout.decode("utf-8")
428 |
429 | output = ""
430 | commit_count = 0
431 | for commit in commits.split("\n"):
432 | if not commit.strip():
433 | continue
434 |
435 | parts = commit.split(" ", 2)
436 | if len(parts) < 3:
437 | logger.debug(f"Skipping malformed commit line: {commit}")
438 | continue
439 |
440 | githash, short, subject = parts
441 |
442 | # Skip merge commits and chore commits
443 | if subject.lower().startswith(("merge", "chore")):
444 | continue
445 |
446 | output += self.config.templates["commit_format"].format(
447 | short=short, subject=subject, githash=githash
448 | )
449 | commit_count += 1
450 |
451 | logger.info(f"Found {commit_count} relevant commits")
452 | return self.config.templates["commits_format"].format(commits=output) if output else ""
453 |
454 | except subprocess.CalledProcessError as e:
455 | # Check if the error is due to unknown revision (commit not in repo)
456 | stderr_output = e.stderr.decode() if e.stderr else ""
457 | if "unknown revision" in stderr_output.lower() or "bad revision" in stderr_output.lower():
458 | logger.warning(f"Container commit hashes not found in git repository - trying timestamp-based approach")
459 | logger.debug(f"Git error: {stderr_output}")
460 | return self._get_commits_by_timestamp(prev_manifests, manifests, workdir)
461 | else:
462 | logger.warning(f"Git command failed: {stderr_output}")
463 | return ""
464 | except subprocess.TimeoutExpired:
465 | logger.error("Git command timed out")
466 | return ""
467 | except Exception as e:
468 | logger.warning(f"Failed to get commits: {e}")
469 | return ""
470 |
471 | def _get_commits_by_timestamp(self, prev_manifests: Dict[str, Any],
472 | manifests: Dict[str, Any], workdir: str) -> str:
473 | """Get commits using container timestamps as fallback."""
474 | try:
475 | from datetime import datetime, timedelta
476 | import re
477 |
478 | # Get container creation timestamps
479 | prev_timestamp = self._get_container_timestamp(prev_manifests)
480 | curr_timestamp = self._get_container_timestamp(manifests)
481 |
482 | logger.debug(f"Container timestamps: prev={prev_timestamp}, curr={curr_timestamp}")
483 |
484 | if not prev_timestamp or not curr_timestamp:
485 | logger.warning("Missing container timestamps for commit correlation")
486 | return ""
487 |
488 | # Parse ISO 8601 timestamps
489 | def parse_timestamp(ts):
490 | # Remove microseconds and timezone info for simpler parsing
491 | ts = re.sub(r'\.\d+', '', ts) # Remove microseconds
492 | ts = ts.replace('Z', '+00:00') # Handle Z timezone
493 | return datetime.fromisoformat(ts.replace('Z', '+00:00'))
494 |
495 | prev_dt = parse_timestamp(prev_timestamp)
496 | curr_dt = parse_timestamp(curr_timestamp)
497 |
498 | logger.info(f"Searching commits between {prev_dt.strftime('%Y-%m-%d %H:%M')} and {curr_dt.strftime('%Y-%m-%d %H:%M')}")
499 |
500 | # Add some buffer time to account for build delays
501 | start_time = prev_dt - timedelta(hours=2)
502 | end_time = curr_dt + timedelta(hours=2)
503 |
504 | logger.debug(f"Git time range: {start_time.strftime('%Y-%m-%d %H:%M')} to {end_time.strftime('%Y-%m-%d %H:%M')}")
505 |
506 | # Use git log with date range
507 | git_cmd = [
508 | "git", "-C", workdir, "log",
509 | "--pretty=format:%H %h %s %ci",
510 | f"--since={start_time.strftime('%Y-%m-%d %H:%M')}",
511 | f"--until={end_time.strftime('%Y-%m-%d %H:%M')}",
512 | "--no-merges"
513 | ]
514 |
515 | logger.debug(f"Git command: {' '.join(git_cmd)}")
516 |
517 | commits = subprocess.run(git_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=30).stdout.decode("utf-8")
518 |
519 | logger.debug(f"Raw git output: {commits}")
520 |
521 | output = ""
522 | commit_count = 0
523 | for commit in commits.split("\n"):
524 | if not commit.strip():
525 | continue
526 |
527 | # Parse git output format: "hash short_hash subject date timezone"
528 | parts = commit.split(" ")
529 | if len(parts) < 4:
530 | logger.debug(f"Skipping malformed commit line: {commit}")
531 | continue
532 |
533 | githash = parts[0]
534 | short = parts[1]
535 | # Everything from index 2 up to the date (which has format YYYY-MM-DD)
536 | subject_parts = []
537 | for i, part in enumerate(parts[2:], 2):
538 | if part.startswith("2025-") or part.startswith("2024-"): # Date part
539 | break
540 | subject_parts.append(part)
541 |
542 | subject = " ".join(subject_parts)
543 |
544 | # Skip some chore commits but include dependency updates
545 | if (subject.lower().startswith("chore") and
546 | not any(keyword in subject.lower() for keyword in ["deps", "update", "bump"])):
547 | continue
548 |
549 | output += self.config.templates["commit_format"].format(
550 | short=short, subject=subject, githash=githash
551 | )
552 | commit_count += 1
553 |
554 | logger.info(f"Found {commit_count} commits in timestamp range")
555 | result = self.config.templates["commits_format"].format(commits=output) if output else ""
556 | logger.debug(f"Timestamp commit result: {result}")
557 | return result
558 |
559 | except Exception as e:
560 | logger.warning(f"Timestamp-based commit search failed: {e}")
561 | return ""
562 |
563 | def _get_commit_hash(self, manifests: Dict[str, Any]) -> str:
564 | """Extract commit hash from manifest labels."""
565 | if not manifests:
566 | return ""
567 |
568 | manifest = next(iter(manifests.values()))
569 | labels = manifest.get("Labels", {})
570 |
571 | # Try different label keys for commit hash
572 | commit_hash = (labels.get("org.opencontainers.image.revision") or
573 | labels.get("ostree.commit") or
574 | labels.get("org.opencontainers.image.source") or "")
575 |
576 | logger.debug(f"Available labels: {list(labels.keys())}")
577 | logger.debug(f"Extracted commit hash: {commit_hash}")
578 |
579 | return commit_hash
580 |
581 | def _get_container_timestamp(self, manifests: Dict[str, Any]) -> str:
582 | """Extract creation timestamp from manifest."""
583 | if not manifests:
584 | return ""
585 |
586 | manifest = next(iter(manifests.values()))
587 | return manifest.get("Labels", {}).get(
588 | "org.opencontainers.image.created",
589 | manifest.get("Created", "")
590 | )
591 |
592 | def get_hwe_kernel_change(self, prev: str, curr: str, target: str) -> Tuple[Optional[str], Optional[str]]:
593 | """Get HWE kernel version changes."""
594 | try:
595 | logger.info(f"Fetching HWE manifests for {curr}-hwe and {prev}-hwe...")
596 | hwe_curr_manifest = self.get_manifests(curr + "-hwe")
597 | hwe_prev_manifest = self.get_manifests(prev + "-hwe")
598 |
599 | # If either manifest is empty, return None values
600 | if not hwe_curr_manifest or not hwe_prev_manifest:
601 | logger.warning("One or both HWE manifests are empty")
602 | return (None, None)
603 |
604 | hwe_curr_versions = self.get_versions(hwe_curr_manifest)
605 | hwe_prev_versions = self.get_versions(hwe_prev_manifest)
606 |
607 | curr_kernel = hwe_curr_versions.get("kernel")
608 | prev_kernel = hwe_prev_versions.get("kernel")
609 | logger.debug(f"HWE kernel versions: {prev_kernel} -> {curr_kernel}")
610 |
611 | return (curr_kernel, prev_kernel)
612 | except Exception as e:
613 | logger.error(f"Failed to get HWE kernel versions: {e}")
614 | return (None, None)
615 |
616 | def _generate_pretty_version(self, manifests: Dict[str, Any], curr: str) -> str:
617 | """Generate a pretty version string if not provided."""
618 | try:
619 | finish = self._get_commit_hash(manifests)
620 | except Exception as e:
621 | logger.error(f"Failed to get finish hash: {e}")
622 | finish = ""
623 |
624 | try:
625 | linux = next(iter(manifests.values()))["Labels"]["ostree.linux"]
626 | start = linux.find(".el") + 3
627 | fedora_version = linux[start:start+2]
628 | except Exception as e:
629 | logger.error(f"Failed to get linux version: {e}")
630 | fedora_version = ""
631 |
632 | # Remove .0 from curr and target prefix
633 | curr_pretty = re.sub(r"\.\d{1,2}$", "", curr)
634 | curr_pretty = re.sub(r"^[a-z]+.|^[0-9]+\.", "", curr_pretty)
635 |
636 | pretty = curr_pretty + " (c" + fedora_version + "s"
637 | if finish:
638 | pretty += ", #" + finish[:7]
639 | pretty += ")"
640 |
641 | return pretty
642 |
643 | def _process_template_variables(self, changelog: str, prev: str, curr: str,
644 | hwe_kernel_version: Optional[str],
645 | hwe_prev_kernel_version: Optional[str],
646 | versions: Dict[str, str],
647 | prev_versions: Dict[str, str]) -> str:
648 | """Process all template variable replacements in the changelog."""
649 | # Handle HWE kernel version
650 | if hwe_kernel_version == hwe_prev_kernel_version:
651 | changelog = changelog.replace(
652 | "{pkgrel:kernel-hwe}",
653 | self.config.templates["pattern_pkgrel"].format(version=hwe_kernel_version or "N/A")
654 | )
655 | else:
656 | changelog = changelog.replace(
657 | "{pkgrel:kernel-hwe}",
658 | self.config.templates["pattern_pkgrel_changed"].format(
659 | prev=hwe_prev_kernel_version or "N/A",
660 | new=hwe_kernel_version or "N/A"
661 | ),
662 | )
663 |
664 | # Replace package version templates
665 | for pkg, version in versions.items():
666 | template = f"{{pkgrel:{pkg}}}"
667 | if pkg not in prev_versions or prev_versions[pkg] == version:
668 | replacement = self.config.templates["pattern_pkgrel"].format(version=version)
669 | else:
670 | replacement = self.config.templates["pattern_pkgrel_changed"].format(
671 | prev=prev_versions[pkg], new=version
672 | )
673 | changelog = changelog.replace(template, replacement)
674 |
675 | # Replace any remaining unreplaced template variables with "N/A"
676 | changelog = re.sub(r'\{pkgrel:[^}]+\}', 'N/A', changelog)
677 | return changelog
678 |
679 | def _generate_changes_section(self, prev_manifests: Dict[str, Any],
680 | manifests: Dict[str, Any], target: str, workdir: Optional[str],
681 | common: List[str], others: Dict[str, List[str]],
682 | prev_versions: Dict[str, str],
683 | versions: Dict[str, str]) -> str:
684 | """Generate the changes section of the changelog."""
685 | changes = ""
686 |
687 | # Add package changes first
688 | common_changes = self.calculate_changes(common, prev_versions, versions)
689 | if common_changes:
690 | changes += self.config.templates["common_pattern"].format(title=self.config.sections["all"], changes=common_changes)
691 |
692 | for k, v in others.items():
693 | chg = self.calculate_changes(v, prev_versions, versions)
694 | if chg:
695 | changes += self.config.templates["common_pattern"].format(title=self.config.sections[k], changes=chg)
696 |
697 | # Add commits section after all package changes
698 | commits_result = self.get_commits(prev_manifests, manifests, target, workdir)
699 | logger.debug(f"Commits result from get_commits: '{commits_result}'")
700 | changes += commits_result
701 |
702 | return changes
703 |
704 | def generate_changelog(self, handwritten: Optional[str], target: str,
705 | pretty: Optional[str], workdir: Optional[str],
706 | prev_manifests: Dict[str, Any],
707 | manifests: Dict[str, Any],
708 | previous_tag: Optional[str] = None) -> Tuple[str, str]:
709 | """Generate the complete changelog."""
710 | logger.info(f"Generating changelog for target '{target}'")
711 |
712 | try:
713 | # Get package data
714 | common, others = self.get_package_groups(target, prev_manifests, manifests)
715 | versions = self.get_versions(manifests)
716 | prev_versions = self.get_versions(prev_manifests)
717 |
718 | # Get tags and versions
719 | prev, curr = self.get_tags(target, manifests, previous_tag)
720 | logger.info(f"Tags: {prev} -> {curr}")
721 |
722 | hwe_kernel_version, hwe_prev_kernel_version = self.get_hwe_kernel_change(
723 | prev, curr, target
724 | )
725 |
726 | # Generate title
727 | version = target.capitalize()
728 | if target in self.config.targets:
729 | version = version.upper()
730 |
731 | if not pretty:
732 | pretty = self._generate_pretty_version(manifests, curr)
733 |
734 | title = self.config.templates["changelog_title"].format_map(
735 | defaultdict(str, os=self.config.os_name, tag=version, pretty=pretty)
736 | )
737 |
738 | # Process base template
739 | changelog = self.config.templates["changelog_format"]
740 | changelog = (
741 | changelog.replace("{handwritten}",
742 | handwritten if handwritten else self.config.templates["handwritten_placeholder"].format(curr=curr))
743 | .replace("{target}", target)
744 | .replace("{prev}", prev)
745 | .replace("{curr}", curr)
746 | )
747 |
748 | # Process template variables
749 | changelog = self._process_template_variables(
750 | changelog, prev, curr, hwe_kernel_version, hwe_prev_kernel_version,
751 | versions, prev_versions
752 | )
753 |
754 | # Generate and insert changes section
755 | changes = self._generate_changes_section(
756 | prev_manifests, manifests, target, workdir, common, others,
757 | prev_versions, versions
758 | )
759 | changelog = changelog.replace("{changes}", changes)
760 |
761 | logger.info("Changelog generated successfully")
762 | return title, changelog
763 |
764 | except Exception as e:
765 | logger.error(f"Failed to generate changelog: {e}")
766 | raise ChangelogError(f"Changelog generation failed: {e}") from e
767 |
768 |
769 | def setup_argument_parser() -> argparse.ArgumentParser:
770 | """Set up the command line argument parser."""
771 | parser = argparse.ArgumentParser(
772 | description="Generate changelogs for Bluefin LTS container images",
773 | formatter_class=argparse.RawDescriptionHelpFormatter,
774 | epilog="""
775 | Examples:
776 | # Simple usage
777 | %(prog)s lts
778 |
779 | # CI/CD usage (recommended for GitHub Actions)
780 | %(prog)s lts --ci
781 |
782 | # With custom options
783 | %(prog)s lts --workdir /path/to/git/repo --verbose
784 | %(prog)s lts --pretty "Custom Version" --handwritten notes.txt
785 | """
786 | )
787 |
788 | # Required arguments
789 | parser.add_argument("target", help="Target tag to generate changelog for")
790 |
791 | # Optional arguments
792 | parser.add_argument("--pretty", help="Custom subject for the changelog")
793 | parser.add_argument("--workdir", help="Git directory for commit extraction")
794 | parser.add_argument("--handwritten", help="Path to handwritten changelog content")
795 | parser.add_argument("--previous-tag", help="Previous tag to compare against (overrides automatic detection)")
796 |
797 | # Output control
798 | parser.add_argument("--verbose", "-v", action="store_true",
799 | help="Enable verbose logging")
800 | parser.add_argument("--dry-run", action="store_true",
801 | help="Generate changelog but don't write files")
802 |
803 | # Release management options
804 | parser.add_argument("--check-release", action="store_true",
805 | help="Check if release already exists before generating changelog")
806 | parser.add_argument("--force", action="store_true",
807 | help="Generate changelog even if release already exists")
808 | parser.add_argument("--github-output",
809 | help="Path to GitHub Actions output file for setting variables")
810 | parser.add_argument("--ci", action="store_true",
811 | help="Enable CI/CD mode (equivalent to --check-release --workdir . --github-output $GITHUB_OUTPUT)")
812 |
813 | return parser
814 |
815 |
816 | def validate_arguments(args: argparse.Namespace) -> None:
817 | """Validate command line arguments."""
818 | # Validate workdir if provided
819 | if args.workdir and not Path(args.workdir).is_dir():
820 | raise ValueError(f"Workdir does not exist or is not a directory: {args.workdir}")
821 |
822 | # Validate handwritten content if provided
823 | if args.handwritten:
824 | handwritten_path = Path(args.handwritten)
825 | if not handwritten_path.exists():
826 | raise ValueError(f"Handwritten changelog file not found: {args.handwritten}")
827 |
828 |
829 | def main():
830 | """Main entry point for the changelog generator."""
831 | parser = setup_argument_parser()
832 | args = parser.parse_args()
833 |
834 | try:
835 | # Validate arguments
836 | validate_arguments(args)
837 |
838 | # Handle CI mode - apply common CI/CD defaults
839 | if args.ci:
840 | args.check_release = True
841 | if not args.workdir:
842 | args.workdir = "."
843 | if not args.github_output and os.getenv('GITHUB_OUTPUT'):
844 | args.github_output = os.getenv('GITHUB_OUTPUT')
845 |
846 | # Configure logging based on verbosity
847 | if args.verbose:
848 | logging.getLogger().setLevel(logging.DEBUG)
849 |
850 | # Remove refs/tags, refs/heads, refs/remotes etc.
851 | target = args.target.split('/')[-1]
852 | logger.info(f"Processing target: {target}")
853 |
854 | # Create configuration with defaults
855 | config = load_config()
856 |
857 | # Load handwritten content if provided
858 | handwritten = None
859 | if args.handwritten:
860 | handwritten_path = Path(args.handwritten)
861 | handwritten = handwritten_path.read_text(encoding='utf-8')
862 | logger.info(f"Loaded handwritten content from {args.handwritten}")
863 |
864 | # Create generator and process
865 | generator = ChangelogGenerator(config)
866 |
867 | logger.info("Fetching current manifests...")
868 | manifests = generator.get_manifests(target)
869 |
870 | # Determine previous tag - use provided one or auto-detect
871 | if args.previous_tag:
872 | prev = args.previous_tag
873 | logger.info(f"Using provided previous tag: {prev}")
874 | else:
875 | prev, curr = generator.get_tags(target, manifests)
876 | logger.info(f"Auto-detected previous tag: {prev}")
877 |
878 | # Always get current tag from manifests
879 | _, curr = generator.get_tags(target, manifests, prev)
880 | logger.info(f"Current tag: {curr}")
881 |
882 | # Check if release already exists (if requested)
883 | if args.check_release and not args.force:
884 | if check_github_release_exists(curr):
885 | logger.info(f"Release already exists for tag {curr}. Skipping changelog generation.")
886 | if args.github_output:
887 | write_github_output(args.github_output, {
888 | "SKIP_CHANGELOG": "true",
889 | "CHANGELOG_TAG": curr,
890 | "EXISTING_RELEASE": "true"
891 | })
892 | return
893 | else:
894 | logger.info(f"No existing release found for {curr}. Generating changelog.")
895 |
896 | # Use last published release as previous tag if not specified and check-release is enabled
897 | if args.check_release and not args.previous_tag:
898 | last_published = get_last_published_release_tag()
899 | if last_published:
900 | prev = last_published
901 | logger.info(f"Using last published release as previous tag: {prev}")
902 |
903 | logger.info("Fetching previous manifests...")
904 | prev_manifests = generator.get_manifests(prev)
905 |
906 | logger.info("Generating changelog...")
907 | title, changelog = generator.generate_changelog(
908 | handwritten, target, args.pretty, args.workdir,
909 | prev_manifests, manifests, args.previous_tag
910 | )
911 |
912 | if not args.verbose:
913 | print(f"Changelog Title: {title}")
914 | print(f"Tag: {curr}")
915 |
916 | # Write output files unless dry-run
917 | if not args.dry_run:
918 | # Use paths from config
919 | changelog_path = Path(config.defaults["output_file"])
920 | changelog_path.write_text(changelog, encoding='utf-8')
921 | logger.info(f"Changelog written to {changelog_path}")
922 |
923 | output_path = Path(config.defaults["env_output_file"])
924 | output_content = f'TITLE="{title}"\nTAG={curr}\n'
925 | output_path.write_text(output_content, encoding='utf-8')
926 | logger.info(f"Environment variables written to {output_path}")
927 |
928 | # Write GitHub Actions output if requested
929 | if args.github_output:
930 | write_github_output(args.github_output, {
931 | "SKIP_CHANGELOG": "false",
932 | "CHANGELOG_TAG": curr,
933 | "CHANGELOG_TITLE": title,
934 | "CHANGELOG_PATH": str(changelog_path.absolute()),
935 | "EXISTING_RELEASE": "false"
936 | })
937 | else:
938 | logger.info("Dry run - no files written")
939 |
940 | except (ChangelogError, TagDiscoveryError, ManifestFetchError) as e:
941 | logger.error(f"Changelog generation failed: {e}")
942 | sys.exit(1)
943 | except KeyboardInterrupt:
944 | logger.info("Operation cancelled by user")
945 | sys.exit(130)
946 | except Exception as e:
947 | logger.error(f"Unexpected error: {e}")
948 | if args.verbose:
949 | import traceback
950 | traceback.print_exc()
951 | sys.exit(1)
952 |
953 |
954 | if __name__ == "__main__":
955 | main()
956 |
--------------------------------------------------------------------------------