├── .gitattributes ├── .github ├── CODEOWNERS ├── FUNDING.yml ├── ISSUE_TEMPLATE │ └── pin.yml ├── changelogs.py ├── renovate.json5 ├── semantic.yaml └── workflows │ ├── build-dx.yml │ ├── build-gdx.yml │ ├── build-regular-testing.yml │ ├── build-regular.yml │ ├── content-filter.yaml │ ├── generate-changelog-release.yml │ ├── reusable-build-image.yml │ └── validate-renovate.yaml ├── .gitignore ├── Containerfile ├── Justfile ├── LICENSE ├── README.md ├── artifacthub-repo.yml ├── build_scripts ├── 00-workarounds.sh ├── 10-packages-image-base.sh ├── 20-packages.sh ├── 26-packages-post.sh ├── 40-services.sh ├── 90-image-info.sh ├── build.sh ├── cleanup.sh ├── overrides │ ├── aarch64 │ │ ├── dx │ │ │ └── .gitkeep │ │ ├── gdx │ │ │ ├── .gitkeep │ │ │ └── gdx-demo-just.sh │ │ └── hwe │ │ │ └── .gitkeep │ ├── dx │ │ ├── 00-packages.sh │ │ ├── 05-dconf.sh │ │ ├── 20-services.sh │ │ ├── 30-flatpak.sh │ │ └── 90-image-info.sh │ ├── gdx │ │ ├── 05-pixi.sh │ │ ├── 20-nvidia.sh │ │ ├── 30-packages.sh │ │ └── 90-image-info.sh │ └── x86_64 │ │ ├── dx │ │ └── .gitkeep │ │ └── gdx │ │ └── .gitkeep └── scripts │ └── image-info-set ├── cosign.pub ├── image-versions.yaml ├── image.toml ├── system_files ├── etc │ ├── environment │ ├── rpm-ostreed.conf │ ├── systemd │ │ └── zram-generator.conf │ └── ublue-os │ │ ├── bling.json │ │ ├── changelog.json │ │ ├── fastfetch.json │ │ ├── rebase_helper.json │ │ ├── setup.json │ │ └── system-flatpaks.list └── usr │ ├── lib │ └── systemd │ │ └── system │ │ └── dconf-update.service │ └── share │ ├── fish │ └── vendor_functions.d │ │ └── fish_prompt.fish │ └── ublue-os │ ├── firefox-config │ └── 01-bluefin-global.js │ ├── just │ ├── 10-update.just │ └── 61-lts-custom.just │ ├── motd │ ├── template.md │ └── tips │ │ ├── 10-tips.md │ │ └── 20-bluefin.md │ ├── privileged-setup.hooks.d │ ├── 10-tailscale.sh │ └── 99-flatpaks.sh │ ├── system-setup.hooks.d │ └── 10-framework.sh │ └── user-setup.hooks.d │ ├── 10-theming.sh │ └── 99-privileged.sh └── system_files_overrides ├── aarch64-dx └── .gitkeep ├── aarch64-gdx ├── .gitkeep └── usr │ └── share │ └── ublue-os │ ├── gdx-demo │ ├── bench-container │ │ └── Dockerfile │ └── ramalama │ │ ├── demo-ai-server.py │ │ └── ramalama-serve-ampere.py │ └── just │ └── 66-ampere.just ├── aarch64 ├── .gitkeep ├── etc │ ├── profile.d │ │ └── pixi.sh │ └── ublue-os │ │ └── bling.json └── usr │ └── share │ └── ublue-os │ └── bling │ └── bluefin-cli.pixi.list ├── dx ├── etc │ └── skel │ │ └── .config │ │ └── Code │ │ └── User │ │ └── settings.json └── usr │ ├── bin │ └── .gitkeep │ └── share │ └── ublue-os │ ├── privileged-setup.hooks.d │ └── 20-dx.sh │ └── user-setup.hooks.d │ └── 11-vscode.sh ├── gdx ├── .gitkeep └── usr │ └── share │ └── ublue-os │ └── user-setup.hooks.d │ └── 30-gdx-vscode.sh ├── x86_64-dx └── .gitkeep ├── x86_64-gdx └── .gitkeep └── x86_64 └── .gitkeep /.gitattributes: -------------------------------------------------------------------------------- 1 | *.yml linguist-detectable=true 2 | *.yml linguist-language=YAML 3 | 4 | *.yaml linguist-detectable=true 5 | *.yaml linguist-language=YAML 6 | 7 | *.just linguist-detectable=true 8 | *.just linguist-documentation=false 9 | *.just linguist-language=Just 10 | 11 | *.json linguist-detectable=true 12 | *.json linguist-documentation=false 13 | *.json linguist-language=JSON 14 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @castrojo @tulilirockz 2 | 3 | image-versions.yml 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [castrojo, tulilirockz] 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/pin.yml: -------------------------------------------------------------------------------- 1 | name: Pin request 2 | description: Ask to pin a package to a specific version 3 | body: 4 | - type: markdown 5 | attributes: 6 | value: | 7 | Thank you for taking the time to fill out this bug report! (She bites sometimes) 8 | - type: textarea 9 | id: package 10 | attributes: 11 | label: Describe the Package 12 | description: Describe the package you want pinned and why 13 | placeholder: Pin foobar to version 1.2 14 | value: "Package foobar version 1.2 blew up, we need to revert to 1.1" 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: bodhi 19 | attributes: 20 | label: Bodhi Link (Optional) 21 | description: Add the bodhi link to the working version, this is very useful in order to pin a package quickly 22 | placeholder: Bodhi link 23 | value: "Pin to this version please: https://bodhi.fedoraproject.org/updates/FEDORA-2024-45d587348e" 24 | validations: 25 | required: false 26 | -------------------------------------------------------------------------------- /.github/changelogs.py: -------------------------------------------------------------------------------- 1 | from itertools import product 2 | import subprocess 3 | import json 4 | import time 5 | from typing import Any 6 | import re 7 | from collections import defaultdict 8 | 9 | REGISTRY = "docker://ghcr.io/ublue-os/" 10 | 11 | IMAGE_MATRIX = { 12 | "experience": ["base", "dx", "gdx"], 13 | "de": ["gnome"], 14 | "image_flavor": ["main"], 15 | } 16 | 17 | RETRIES = 3 18 | RETRY_WAIT = 5 19 | FEDORA_PATTERN = re.compile(r"\.fc\d\d") 20 | START_PATTERN = lambda target: re.compile(rf"{target}-\d\d\d+") 21 | 22 | PATTERN_ADD = "\n| ✨ | {name} | | {version} |" 23 | PATTERN_CHANGE = "\n| 🔄 | {name} | {prev} | {new} |" 24 | PATTERN_REMOVE = "\n| ❌ | {name} | {version} | |" 25 | PATTERN_PKGREL_CHANGED = "{prev} ➡️ {new}" 26 | PATTERN_PKGREL = "{version}" 27 | COMMON_PAT = "### All Images\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n" 28 | OTHER_NAMES = { 29 | "base": "### Base Images\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n", 30 | "dx": "### [Developer Experience Images](https://docs.projectbluefin.io/bluefin-dx)\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n", 31 | "gdx": "### [Graphical Developer Experience Images](https://docs.projectbluefin.io/gdx)\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n", 32 | "gnome": "### [Bluefin LTS Images](https://docs.projectbluefin.io/lts)\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n", 33 | "nvidia": "### Nvidia Images\n| | Name | Previous | New |\n| --- | --- | --- | --- |{changes}\n\n", 34 | } 35 | 36 | COMMITS_FORMAT = "### Commits\n| Hash | Subject |\n| --- | --- |{commits}\n\n" 37 | COMMIT_FORMAT = "\n| **[{short}](https://github.com/ublue-os/bluefin-lts/commit/{githash})** | {subject} |" 38 | 39 | CHANGELOG_TITLE = "{tag}: {pretty}" 40 | CHANGELOG_FORMAT = """\ 41 | {handwritten} 42 | 43 | From previous `{target}` version `{prev}` there have been the following changes. **One package per new version shown.** 44 | 45 | ### Major packages 46 | | Name | Version | 47 | | --- | --- | 48 | | **Kernel** | {pkgrel:kernel} | 49 | | **GNOME** | {pkgrel:gnome-control-center-filesystem} | 50 | | **Mesa** | {pkgrel:mesa-filesystem} | 51 | | **Podman** | {pkgrel:podman} | 52 | | **Nvidia** | {pkgrel:nvidia-driver} | 53 | 54 | ### Major DX packages 55 | | Name | Version | 56 | | --- | --- | 57 | | **Docker** | {pkgrel:docker-ce} | 58 | | **VSCode** | {pkgrel:code} | 59 | | **Ramalama** | {pkgrel:python3-ramalama} | 60 | 61 | {changes} 62 | 63 | ### How to rebase 64 | For current users, type the following to rebase to this version: 65 | ```bash 66 | # Get Image Name 67 | IMAGE_NAME=$(jq -r '.["image-name"]' < /usr/share/ublue-os/image-info.json) 68 | 69 | # For this Stream 70 | sudo bootc switch --enforce-container-sigpolicy ghcr.io/ublue-os/$IMAGE_NAME:{target} 71 | 72 | # For this Specific Image: 73 | sudo bootc switch --enforce-container-sigpolicy ghcr.io/ublue-os/$IMAGE_NAME:{curr} 74 | ``` 75 | 76 | ### Documentation 77 | Be sure to read the [documentation](https://docs.projectbluefin.io/lts) for more information 78 | on how to use your cloud native system. 79 | """ 80 | HANDWRITTEN_PLACEHOLDER = """\ 81 | This is an automatically generated changelog for release `{curr}`.""" 82 | 83 | BLACKLIST_VERSIONS = [ 84 | "kernel", 85 | "gnome-control-center-filesystem", 86 | "mesa-filesystem", 87 | "podman", 88 | "docker-ce", 89 | "incus", 90 | "vscode", 91 | "nvidia-driver" 92 | ] 93 | 94 | 95 | def get_images(target: str): 96 | matrix = IMAGE_MATRIX 97 | 98 | for experience, de, image_flavor in product(*matrix.values()): 99 | img = "" 100 | if de == "gnome": 101 | img += "bluefin" 102 | 103 | if experience == "dx": 104 | img += "-dx" 105 | 106 | if experience == "gdx": 107 | img += "-gdx" 108 | 109 | if image_flavor != "main": 110 | img += "-" 111 | img += image_flavor 112 | 113 | yield img, experience, de, image_flavor 114 | 115 | 116 | def get_manifests(target: str): 117 | out = {} 118 | imgs = list(get_images(target)) 119 | for j, (img, _, _, _) in enumerate(imgs): 120 | output = None 121 | print(f"Getting {img}:{target} manifest ({j+1}/{len(imgs)}).") 122 | for i in range(RETRIES): 123 | try: 124 | output = subprocess.run( 125 | ["skopeo", "inspect", REGISTRY + img + ":" + target], 126 | check=True, 127 | stdout=subprocess.PIPE, 128 | ).stdout 129 | break 130 | except subprocess.CalledProcessError: 131 | print( 132 | f"Failed to get {img}:{target}, retrying in {RETRY_WAIT} seconds ({i+1}/{RETRIES})" 133 | ) 134 | time.sleep(RETRY_WAIT) 135 | if output is None: 136 | print(f"Failed to get {img}:{target}, skipping") 137 | continue 138 | out[img] = json.loads(output) 139 | return out 140 | 141 | 142 | def get_tags(target: str, manifests: dict[str, Any]): 143 | tags = set() 144 | 145 | first = next(iter(manifests.values())) 146 | for tag in first["RepoTags"]: 147 | # Tags ending with .0 should not exist 148 | if tag.endswith(".0"): 149 | continue 150 | if re.match(START_PATTERN(target), tag): 151 | tags.add(tag) 152 | 153 | for manifest in manifests.values(): 154 | for tag in list(tags): 155 | if tag not in manifest["RepoTags"]: 156 | tags.remove(tag) 157 | 158 | tags = list(sorted(tags)) 159 | if not len(tags) >= 2: 160 | print("No current and previous tags found") 161 | exit(1) 162 | return tags[-2], tags[-1] 163 | 164 | 165 | def get_packages(manifests: dict[str, Any]): 166 | packages = {} 167 | for img, manifest in manifests.items(): 168 | try: 169 | packages[img] = json.loads(manifest["Labels"]["dev.hhd.rechunk.info"])[ 170 | "packages" 171 | ] 172 | except Exception as e: 173 | print(f"Failed to get packages for {img}:\n{e}") 174 | return packages 175 | 176 | 177 | def get_package_groups(target: str, prev: dict[str, Any], manifests: dict[str, Any]): 178 | common = set() 179 | others = {k: set() for k in OTHER_NAMES.keys()} 180 | 181 | npkg = get_packages(manifests) 182 | ppkg = get_packages(prev) 183 | 184 | keys = set(npkg.keys()) | set(ppkg.keys()) 185 | pkg = defaultdict(set) 186 | for k in keys: 187 | pkg[k] = set(npkg.get(k, {})) | set(ppkg.get(k, {})) 188 | 189 | # Find common packages 190 | first = True 191 | for img, experience, de, image_flavor in get_images(target): 192 | if img not in pkg: 193 | continue 194 | 195 | if first: 196 | for p in pkg[img]: 197 | common.add(p) 198 | else: 199 | for c in common.copy(): 200 | if c not in pkg[img]: 201 | common.remove(c) 202 | 203 | first = False 204 | 205 | # Find other packages 206 | for t, other in others.items(): 207 | first = True 208 | for img, experience, de, image_flavor in get_images(target): 209 | if img not in pkg: 210 | continue 211 | 212 | if t == "nvidia" and "nvidia" not in image_flavor: 213 | continue 214 | if t == "gnome" and de != "gnome": 215 | continue 216 | if t == "base" and experience != "base": 217 | continue 218 | if t == "dx" and experience != "dx": 219 | continue 220 | 221 | if first: 222 | for p in pkg[img]: 223 | if p not in common: 224 | other.add(p) 225 | else: 226 | for c in other.copy(): 227 | if c not in pkg[img]: 228 | other.remove(c) 229 | 230 | first = False 231 | 232 | return sorted(common), {k: sorted(v) for k, v in others.items()} 233 | 234 | 235 | def get_versions(manifests: dict[str, Any]): 236 | versions = {} 237 | pkgs = get_packages(manifests) 238 | for img_pkgs in pkgs.values(): 239 | for pkg, v in img_pkgs.items(): 240 | versions[pkg] = re.sub(FEDORA_PATTERN, "", v) 241 | return versions 242 | 243 | 244 | def calculate_changes(pkgs: list[str], prev: dict[str, str], curr: dict[str, str]): 245 | added = [] 246 | changed = [] 247 | removed = [] 248 | 249 | blacklist_ver = set([curr.get(v, None) for v in BLACKLIST_VERSIONS]) 250 | 251 | for pkg in pkgs: 252 | # Clearup changelog by removing mentioned packages 253 | if pkg in BLACKLIST_VERSIONS: 254 | continue 255 | if pkg in curr and curr.get(pkg, None) in blacklist_ver: 256 | continue 257 | if pkg in prev and prev.get(pkg, None) in blacklist_ver: 258 | continue 259 | 260 | if pkg not in prev: 261 | added.append(pkg) 262 | elif pkg not in curr: 263 | removed.append(pkg) 264 | elif prev[pkg] != curr[pkg]: 265 | changed.append(pkg) 266 | 267 | blacklist_ver.add(curr.get(pkg, None)) 268 | blacklist_ver.add(prev.get(pkg, None)) 269 | 270 | out = "" 271 | for pkg in added: 272 | out += PATTERN_ADD.format(name=pkg, version=curr[pkg]) 273 | for pkg in changed: 274 | out += PATTERN_CHANGE.format(name=pkg, prev=prev[pkg], new=curr[pkg]) 275 | for pkg in removed: 276 | out += PATTERN_REMOVE.format(name=pkg, version=prev[pkg]) 277 | return out 278 | 279 | 280 | def get_commits(prev_manifests, manifests, workdir: str): 281 | try: 282 | start = next(iter(prev_manifests.values()))["Labels"][ 283 | "org.opencontainers.image.revision" 284 | ] 285 | finish = next(iter(manifests.values()))["Labels"][ 286 | "org.opencontainers.image.revision" 287 | ] 288 | 289 | commits = subprocess.run( 290 | [ 291 | "git", 292 | "-C", 293 | workdir, 294 | "log", 295 | "--pretty=format:%H %h %s", 296 | f"{start}..{finish}", 297 | ], 298 | check=True, 299 | stdout=subprocess.PIPE, 300 | ).stdout.decode("utf-8") 301 | 302 | out = "" 303 | for commit in commits.split("\n"): 304 | if not commit: 305 | continue 306 | githash, short, subject = commit.split(" ", 2) 307 | 308 | if subject.lower().startswith("merge"): 309 | continue 310 | if subject.lower().startswith("chore"): 311 | continue 312 | 313 | out += ( 314 | COMMIT_FORMAT.replace("{short}", short) 315 | .replace("{subject}", subject) 316 | .replace("{githash}", githash) 317 | ) 318 | 319 | if out: 320 | return COMMITS_FORMAT.format(commits=out) 321 | return "" 322 | except Exception as e: 323 | print(f"Failed to get commits:\n{e}") 324 | return "" 325 | 326 | 327 | def generate_changelog( 328 | handwritten: str | None, 329 | target: str, 330 | pretty: str | None, 331 | workdir: str, 332 | prev_manifests, 333 | manifests, 334 | ): 335 | common, others = get_package_groups(target, prev_manifests, manifests) 336 | versions = get_versions(manifests) 337 | prev_versions = get_versions(prev_manifests) 338 | 339 | prev, curr = get_tags(target, manifests) 340 | 341 | if not pretty: 342 | # Generate pretty version since we dont have it 343 | try: 344 | finish: str = next(iter(manifests.values()))["Labels"][ 345 | "org.opencontainers.image.revision" 346 | ] 347 | except Exception as e: 348 | print(f"Failed to get finish hash:\n{e}") 349 | finish = "" 350 | try: 351 | linux: str = next(iter(manifests.values()))["Labels"][ 352 | "ostree.linux" 353 | ] 354 | start=linux.find(".el") + 3 355 | fedora_version=linux[start:start+2] 356 | except Exception as e: 357 | print(f"Failed to get linux version:\n{e}") 358 | fedora_version = "" 359 | 360 | # Remove .0 from curr 361 | curr_pretty = re.sub(r"\.\d{1,2}$", "", curr) 362 | # Remove target- from curr 363 | curr_pretty = re.sub(rf"^[a-z]+-|^[0-9]+-", "", curr_pretty) 364 | if target == "stable-daily": 365 | curr_pretty = re.sub(rf"^[a-z]+-", "", curr_pretty) 366 | if not fedora_version + "." in curr_pretty: 367 | curr_pretty=fedora_version + "." + curr_pretty 368 | pretty = target.capitalize() 369 | pretty += " (c" + curr_pretty + "s" 370 | if finish: 371 | pretty += ", #" + finish[:7] 372 | pretty += ")" 373 | 374 | title = CHANGELOG_TITLE.format_map(defaultdict(str, tag=curr, pretty=pretty)) 375 | 376 | changelog = CHANGELOG_FORMAT 377 | 378 | if target == "gts": 379 | changelog = changelog.splitlines() 380 | del changelog[9] 381 | changelog = '\n'.join(changelog) 382 | 383 | changelog = ( 384 | changelog.replace("{handwritten}", handwritten if handwritten else HANDWRITTEN_PLACEHOLDER) 385 | .replace("{target}", target) 386 | .replace("{prev}", prev) 387 | .replace("{curr}", curr) 388 | ) 389 | 390 | for pkg, v in versions.items(): 391 | if pkg not in prev_versions or prev_versions[pkg] == v: 392 | changelog = changelog.replace( 393 | "{pkgrel:" + pkg + "}", PATTERN_PKGREL.format(version=v) 394 | ) 395 | else: 396 | changelog = changelog.replace( 397 | "{pkgrel:" + pkg + "}", 398 | PATTERN_PKGREL_CHANGED.format(prev=prev_versions[pkg], new=v), 399 | ) 400 | 401 | changes = "" 402 | changes += get_commits(prev_manifests, manifests, workdir) 403 | common = calculate_changes(common, prev_versions, versions) 404 | if common: 405 | changes += COMMON_PAT.format(changes=common) 406 | for k, v in others.items(): 407 | chg = calculate_changes(v, prev_versions, versions) 408 | if chg: 409 | changes += OTHER_NAMES[k].format(changes=chg) 410 | 411 | changelog = changelog.replace("{changes}", changes) 412 | 413 | return title, changelog 414 | 415 | 416 | def main(): 417 | import argparse 418 | 419 | parser = argparse.ArgumentParser() 420 | parser.add_argument("target", help="Target tag") 421 | parser.add_argument("output", help="Output environment file") 422 | parser.add_argument("changelog", help="Output changelog file") 423 | parser.add_argument("--pretty", help="Subject for the changelog") 424 | parser.add_argument("--workdir", help="Git directory for commits") 425 | parser.add_argument("--handwritten", help="Handwritten changelog") 426 | args = parser.parse_args() 427 | 428 | # Remove refs/tags, refs/heads, refs/remotes e.g. 429 | # Tags cannot include / anyway. 430 | target = args.target.split('/')[-1] 431 | 432 | manifests = get_manifests(target) 433 | prev, curr = get_tags(target, manifests) 434 | print(f"Previous tag: {prev}") 435 | print(f" Current tag: {curr}") 436 | 437 | prev_manifests = get_manifests(prev) 438 | title, changelog = generate_changelog( 439 | args.handwritten, 440 | target, 441 | args.pretty, 442 | args.workdir, 443 | prev_manifests, 444 | manifests, 445 | ) 446 | 447 | print(f"Changelog:\n# {title}\n{changelog}") 448 | print(f"\nOutput:\nTITLE=\"{title}\"\nTAG={curr}") 449 | 450 | with open(args.changelog, "w") as f: 451 | f.write(changelog) 452 | 453 | with open(args.output, "w") as f: 454 | f.write(f'TITLE="{title}"\nTAG={curr}\n') 455 | 456 | 457 | if __name__ == "__main__": 458 | main() 459 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:best-practices", 5 | ], 6 | 7 | "rebaseWhen": "never", 8 | 9 | "packageRules": [ 10 | { 11 | "automerge": true, 12 | "matchUpdateTypes": ["pin", "pinDigest"] 13 | }, 14 | { 15 | "enabled": false, 16 | "matchUpdateTypes": ["digest", "pinDigest", "pin"], 17 | "matchDepTypes": ["container"], 18 | "matchFileNames": [".github/workflows/**.yaml", ".github/workflows/**.yml"], 19 | }, 20 | { 21 | "automerge": true, 22 | "matchUpdateTypes": ["digest"], 23 | "matchDepNames": ["quay.io/centos-bootc/centos-bootc"], 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /.github/semantic.yaml: -------------------------------------------------------------------------------- 1 | enabled: true 2 | titleOnly: true 3 | -------------------------------------------------------------------------------- /.github/workflows/build-dx.yml: -------------------------------------------------------------------------------- 1 | name: Build Bluefin LTS DX 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC 9 | merge_group: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | build: 18 | uses: ./.github/workflows/reusable-build-image.yml 19 | secrets: inherit 20 | with: 21 | image-name: bluefin-dx 22 | flavor: dx 23 | rechunk: ${{ github.event_name != 'pull_request' }} 24 | sbom: ${{ github.event_name != 'pull_request' }} 25 | publish: ${{ github.event_name != 'pull_request' }} 26 | -------------------------------------------------------------------------------- /.github/workflows/build-gdx.yml: -------------------------------------------------------------------------------- 1 | name: Build Bluefin LTS GDX 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC 9 | merge_group: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | build: 18 | uses: ./.github/workflows/reusable-build-image.yml 19 | secrets: inherit 20 | with: 21 | image-name: bluefin-gdx 22 | flavor: gdx 23 | rechunk: ${{ github.event_name != 'pull_request' }} 24 | sbom: ${{ github.event_name != 'pull_request' }} 25 | publish: ${{ github.event_name != 'pull_request' }} 26 | -------------------------------------------------------------------------------- /.github/workflows/build-regular-testing.yml: -------------------------------------------------------------------------------- 1 | name: Build Bluefin LTS Testing 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC 9 | merge_group: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | build: 18 | uses: ./.github/workflows/reusable-build-image.yml 19 | secrets: inherit 20 | with: 21 | image-name: bluefin 22 | rechunk: ${{ github.event_name != 'pull_request' }} 23 | sbom: ${{ github.event_name != 'pull_request' }} 24 | publish: ${{ github.event_name != 'pull_request' }} 25 | testing: true 26 | -------------------------------------------------------------------------------- /.github/workflows/build-regular.yml: -------------------------------------------------------------------------------- 1 | name: Build Bluefin LTS 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | schedule: 8 | - cron: "0 1 * * TUE" # Every Tuesday at 1am UTC 9 | merge_group: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.workflow }}-${{ github.ref || github.run_id }} 14 | cancel-in-progress: true 15 | 16 | jobs: 17 | build: 18 | uses: ./.github/workflows/reusable-build-image.yml 19 | secrets: inherit 20 | with: 21 | image-name: bluefin 22 | rechunk: ${{ github.event_name != 'pull_request' }} 23 | sbom: ${{ github.event_name != 'pull_request' }} 24 | publish: ${{ github.event_name != 'pull_request' }} 25 | -------------------------------------------------------------------------------- /.github/workflows/content-filter.yaml: -------------------------------------------------------------------------------- 1 | name: Check for Spammy Issue Comments 2 | 3 | on: 4 | issue_comment: 5 | types: [created, edited] 6 | 7 | permissions: 8 | issues: write 9 | 10 | jobs: 11 | comment-filter: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Comment filter 15 | uses: DecimalTurn/Comment-Filter@9c95bdb06ae1dd6b8185d58f52a07a2a71e19d94 # v0.2.2 16 | -------------------------------------------------------------------------------- /.github/workflows/generate-changelog-release.yml: -------------------------------------------------------------------------------- 1 | on: 2 | schedule: 3 | - cron: '05 11 * * *' # 11:05am UTC everyday 4 | workflow_call: 5 | inputs: 6 | stream_name: 7 | description: "Release Tag (e.g. stream10, latest)" 8 | type: string 9 | default: "lts" 10 | required: false 11 | workflow_dispatch: 12 | inputs: 13 | handwritten: 14 | description: "Small Changelog about changes in this build" 15 | permissions: 16 | contents: write 17 | 18 | name: Generate Release 19 | jobs: 20 | generate-release: 21 | runs-on: ubuntu-latest 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | version: ["lts"] 26 | steps: 27 | - name: Checkout last 500 commits (for to work) 28 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 29 | with: 30 | fetch-depth: 500 31 | 32 | - name: Generate Release Text 33 | id: generate-release-text 34 | shell: bash 35 | run: | 36 | python3 .github/changelogs.py --workdir . "${{ matrix.version }}" ./output.env ./changelog.md --handwritten "${{ inputs.handwritten }}" 37 | source ./output.env 38 | echo "title=${TITLE}" >> $GITHUB_OUTPUT 39 | echo "tag=${TAG}" >> $GITHUB_OUTPUT 40 | 41 | - name: Create Release 42 | uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2 43 | if: (github.event.schedule == '50 5 * * 0' || contains(fromJson('["workflow_dispatch", "workflow_call","schedule"]'), github.event_name)) 44 | with: 45 | name: ${{ steps.generate-release-text.outputs.title }} 46 | tag_name: ${{ steps.generate-release-text.outputs.tag }} 47 | body_path: ./changelog.md 48 | make_latest: true 49 | 50 | -------------------------------------------------------------------------------- /.github/workflows/reusable-build-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Image 3 | on: 4 | workflow_call: 5 | inputs: 6 | image-name: 7 | description: "The name of the image to build" 8 | required: true 9 | type: string 10 | image-desc: 11 | description: "The description of the image to build" 12 | required: false 13 | type: string 14 | default: "Bluefin LTS, built on CentOS Stream with bootc" 15 | flavor: 16 | description: "The flavor of the image to build" 17 | required: false 18 | type: string 19 | default: "" 20 | platforms: 21 | description: "The platforms to build the image for" 22 | required: false 23 | type: string 24 | default: "amd64,arm64" 25 | centos-version: 26 | description: "The version of CentOS to build the image on" 27 | required: false 28 | type: string 29 | default: "stream10" 30 | rechunk: 31 | description: "Rechunk the build" 32 | required: false 33 | type: boolean 34 | default: true 35 | sbom: 36 | description: "Generate/publish SBOMs for the artifacts" 37 | required: false 38 | type: boolean 39 | default: true 40 | cleanup_runner: 41 | description: "Use the ublue cleanup action to clean up the runner before running the build" 42 | required: false 43 | type: boolean 44 | default: true 45 | testing: 46 | description: "Enable experimental things during builds, such as gnome backport. Will build an extra `-testing` tag." 47 | required: false 48 | type: boolean 49 | default: false 50 | publish: 51 | description: "Publish this image" 52 | required: false 53 | type: boolean 54 | # default: ${{ github.event_name != 'pull_request' }} 55 | default: true 56 | secrets: 57 | SIGNING_SECRET: 58 | description: "The private key used to sign the image" 59 | required: false 60 | 61 | env: 62 | IMAGE_NAME: ${{ inputs.image-name }} 63 | IMAGE_DESC: ${{ inputs.image-desc }} 64 | IMAGE_REGISTRY: "ghcr.io/${{ github.repository_owner }}" 65 | DEFAULT_TAG: "lts" 66 | CENTOS_VERSION: ${{ inputs.centos-version }} 67 | PLATFORMS: ${{ inputs.platforms }} 68 | 69 | jobs: 70 | generate_matrix: 71 | runs-on: ubuntu-latest 72 | outputs: 73 | matrix: ${{ steps.set-matrix.outputs.matrix }} 74 | steps: 75 | - name: Set matrix 76 | id: set-matrix 77 | env: 78 | PLATFORMS: "${{ inputs.platforms }}" 79 | ENABLE_TESTING: "${{ inputs.testing }}" 80 | run: | 81 | # turn the comma separated string into a list 82 | platforms=() 83 | IFS=',' read -r -a platforms <<< "${PLATFORMS}" 84 | 85 | MATRIX="{\"include\":[]}" 86 | for platform in "${platforms[@]}"; do 87 | MATRIX="$(echo "${MATRIX}" | jq ".include += [{\"platform\": \"${platform}\"}]")" 88 | done 89 | echo "matrix=$(echo "${MATRIX}" | jq -c '.')" >> $GITHUB_OUTPUT 90 | 91 | build_push: 92 | name: Build and push image 93 | runs-on: ${{ matrix.platform == 'amd64' && 'ubuntu-24.04' || 'ubuntu-24.04-arm' }} 94 | timeout-minutes: 60 95 | needs: generate_matrix 96 | strategy: 97 | fail-fast: false 98 | matrix: ${{fromJson(needs.generate_matrix.outputs.matrix)}} 99 | permissions: 100 | contents: read 101 | packages: write 102 | id-token: write 103 | outputs: 104 | image_tag: ${{ steps.build-image.outputs.image_tag }} 105 | 106 | steps: 107 | - name: Install dependencies 108 | if: matrix.platform == 'arm64' 109 | run: | 110 | sudo apt update -y 111 | sudo apt install -y \ 112 | podman 113 | 114 | - name: Checkout 115 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 116 | 117 | - name: Maximize build space 118 | if: ${{ matrix.platform != 'arm64' && inputs.cleanup_runner }} 119 | uses: ublue-os/remove-unwanted-software@cc0becac701cf642c8f0a6613bbdaf5dc36b259e # v9 120 | with: 121 | remove-codeql: true 122 | 123 | - name: Setup Just 124 | uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3 125 | 126 | - name: Check Just Syntax 127 | shell: bash 128 | run: just check 129 | 130 | - name: Build Image 131 | id: build-image 132 | shell: bash 133 | env: 134 | FLAVOR: ${{ inputs.flavor }} 135 | TESTING: ${{ inputs.testing }} 136 | run: | 137 | set -x 138 | just=$(which just) 139 | 140 | ENABLE_TESTING=0 141 | ENABLE_GDX=0 142 | ENABLE_DX=0 143 | if [[ "${TESTING}" == "true" ]] ; then 144 | export DEFAULT_TAG="${DEFAULT_TAG}-testing" 145 | echo "DEFAULT_TAG=${DEFAULT_TAG}" >> "${GITHUB_ENV}" 146 | ENABLE_TESTING=1 147 | fi 148 | if [[ "${FLAVOR}" =~ "gdx" ]] ; then 149 | ENABLE_GDX=1 150 | fi 151 | if [[ "${FLAVOR}" =~ "dx" ]] ; then 152 | ENABLE_DX=1 153 | fi 154 | 155 | sudo $just build "${IMAGE_NAME}" "${DEFAULT_TAG}" "${ENABLE_DX}" "${ENABLE_GDX}" "${ENABLE_TESTING}" 156 | echo "image_tag=${DEFAULT_TAG}" >> "${GITHUB_OUTPUT}" 157 | 158 | - name: Setup Syft 159 | id: setup-syft 160 | if: ${{ inputs.sbom && inputs.publish }} 161 | uses: anchore/sbom-action/download-syft@e11c554f704a0b820cbf8c51673f6945e0731532 # v0 162 | 163 | - name: Generate SBOM 164 | id: generate-sbom 165 | if: ${{ inputs.sbom && inputs.publish }} 166 | env: 167 | IMAGE: ${{ env.IMAGE_NAME }} 168 | DEFAULT_TAG: ${{ env.DEFAULT_TAG }} 169 | SYFT_CMD: ${{ steps.setup-syft.outputs.cmd }} 170 | run: | 171 | sudo systemctl start podman.socket 172 | OUTPUT_PATH="$(mktemp -d)/sbom.json" 173 | export SYFT_PARALLELISM=$(($(nproc)*2)) 174 | sudo "$SYFT_CMD" "${IMAGE}:${DEFAULT_TAG}" -o "spdx-json=${OUTPUT_PATH}" 175 | echo "OUTPUT_PATH=${OUTPUT_PATH}" >> "${GITHUB_OUTPUT}" 176 | 177 | - name: Run Rechunker 178 | if: ${{ inputs.rechunk && inputs.publish }} 179 | id: rechunk 180 | uses: hhd-dev/rechunk@ca77507401f8700bb0b25ebecbbf980a078cd180 # v1.2.2 181 | with: 182 | rechunk: ghcr.io/hhd-dev/rechunk:v1.2.1 183 | ref: localhost/${{ env.IMAGE_NAME }}:${{ env.DEFAULT_TAG }} 184 | prev-ref: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.DEFAULT_TAG }} 185 | skip_compression: true 186 | version: ${{ env.CENTOS_VERSION }} 187 | 188 | - name: Load Image 189 | if: ${{ inputs.publish }} 190 | id: load 191 | env: 192 | RECHUNK_RAN: ${{ inputs.rechunk }} 193 | RECHUNK_REF: ${{ steps.rechunk.outputs.ref }} 194 | RECHUNK_LOCATION: ${{ steps.rechunk.outputs.location }} 195 | run: | 196 | if [ "${RECHUNK_RAN}" == "true" ] ; then 197 | IMAGE="$(podman pull "${RECHUNK_REF}")" 198 | sudo rm -rf "${RECHUNK_LOCATION}" 199 | else 200 | IMAGE="localhost/${IMAGE_NAME}:${DEFAULT_TAG}" 201 | fi 202 | podman image tag "${IMAGE}" "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}" 203 | 204 | IMAGE="${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}" 205 | IMAGE_DIGEST="$(podman image inspect --format '{{.Digest}}' "${IMAGE}")" 206 | echo "image=${IMAGE}" >> "${GITHUB_OUTPUT}" 207 | echo "digest=${IMAGE_DIGEST}" >> "${GITHUB_OUTPUT}" 208 | 209 | - name: Login to GitHub Container Registry 210 | if: ${{ inputs.publish }} 211 | env: 212 | REGISTRY: ghcr.io 213 | run: | 214 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}" 215 | echo "${{ secrets.GITHUB_TOKEN }}" | docker login -u "${{ github.actor }}" --password-stdin "${REGISTRY}" 216 | 217 | - name: Push to GHCR 218 | if: ${{ inputs.publish }} 219 | id: push 220 | env: 221 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }} 222 | IMAGE_NAME: ${{ env.IMAGE_NAME }} 223 | IMAGE_DIGEST: ${{ steps.load.outputs.digest }} 224 | PLATFORM: ${{ matrix.platform }} 225 | MAX_RETRIES: 3 226 | run: | 227 | set -x 228 | podman tag "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}" "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}-${PLATFORM}" 229 | for i in $(seq "${MAX_RETRIES}"); do 230 | podman push --digestfile=/tmp/digestfile "${IMAGE_REGISTRY}/${IMAGE_NAME}:${DEFAULT_TAG}-${PLATFORM}" && break || sleep $((5 * i)); 231 | done 232 | REMOTE_IMAGE_DIGEST=$(cat /tmp/digestfile) 233 | echo "remote_image_digest=${REMOTE_IMAGE_DIGEST}" >> $GITHUB_OUTPUT 234 | 235 | - name: Install Cosign 236 | uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 237 | if: ${{ inputs.publish }} 238 | 239 | - name: Sign Image 240 | if: ${{ inputs.publish }} 241 | run: | 242 | IMAGE_FULL="${IMAGE_REGISTRY}/${IMAGE_NAME}" 243 | cosign sign -y --key env://COSIGN_PRIVATE_KEY ${IMAGE_FULL}@${{ steps.push.outputs.remote_image_digest }} 244 | env: 245 | TAGS: ${{ steps.push.outputs.digest }} 246 | COSIGN_EXPERIMENTAL: false 247 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }} 248 | 249 | - name: Add SBOM Attestation 250 | if: ${{ inputs.sbom }} 251 | env: 252 | IMAGE: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_NAME }} 253 | DIGEST: ${{ steps.push.outputs.remote_image_digest }} 254 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }} 255 | SBOM_OUTPUT: ${{ steps.generate-sbom.outputs.OUTPUT_PATH }} 256 | run: | 257 | cd "$(dirname "$SBOM_OUTPUT")" 258 | 259 | # Compress the SBOM and create the predicate 260 | TYPE="urn:ublue-os:attestation:spdx+json+zstd:v1" 261 | zstd -19 "./sbom.json" -o "./sbom.json.zst" 262 | BASE64_SBOM_FILE="payload.b64" 263 | base64 "./sbom.json.zst" | tr -d '\n' > "${BASE64_SBOM_FILE}" 264 | PREDICATE_FILE="payload.json" 265 | jq -n \ 266 | --arg compression "zstd" \ 267 | --arg mediaType "application/spdx+json" \ 268 | --rawfile payload "${BASE64_SBOM_FILE}" \ 269 | '{compression: $compression, mediaType: $mediaType, payload: $payload}' \ 270 | > "$PREDICATE_FILE" 271 | rm -f "${BASE64_SBOM_FILE}" 272 | 273 | # Create the attestation 274 | cosign attest -y \ 275 | --predicate "${PREDICATE_FILE}" \ 276 | --type $TYPE \ 277 | --key env://COSIGN_PRIVATE_KEY \ 278 | "${IMAGE}@${DIGEST}" 279 | 280 | - name: Create Job Outputs 281 | if: ${{ inputs.publish }} 282 | env: 283 | IMAGE_NAME: ${{ env.IMAGE_NAME }} 284 | PLATFORM: ${{ matrix.platform }} 285 | TESTING: ${{ inputs.testing }} 286 | DIGEST: ${{ steps.push.outputs.remote_image_digest }} 287 | run: | 288 | mkdir -p /tmp/outputs/digests 289 | echo "${DIGEST}" > "/tmp/outputs/digests/${IMAGE_NAME}-${TESTING}-${PLATFORM}.txt" 290 | 291 | - name: Upload Output Artifacts 292 | if: ${{ inputs.publish }} 293 | uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 294 | with: 295 | name: ${{ env.IMAGE_NAME }}-${{ matrix.platform }} 296 | retention-days: 1 297 | if-no-files-found: error 298 | path: | 299 | /tmp/outputs/digests/*.txt 300 | 301 | manifest: 302 | name: Create ${{ inputs.image-name }}:${{ inputs.centos-version }} Manifest 303 | runs-on: ubuntu-latest 304 | if: always() 305 | needs: 306 | - generate_matrix 307 | - build_push 308 | container: 309 | image: cgr.dev/chainguard/wolfi-base:latest 310 | options: --privileged --security-opt seccomp=unconfined 311 | permissions: 312 | contents: read 313 | packages: write 314 | id-token: write 315 | outputs: 316 | image: ${{ steps.push_manifest.outputs.IMAGE }} 317 | digest: ${{ steps.push_manifest.outputs.DIGEST }} 318 | steps: 319 | - name: Install dependencies 320 | run: | 321 | apk add jq git podman uutils bash conmon crun netavark fuse-overlayfs 322 | ln -sf /bin/bash /bin/sh 323 | mkdir -p /etc/containers 324 | echo '{"default":[{"type":"insecureAcceptAnything"}]}' | jq . > /etc/containers/policy.json 325 | 326 | - name: Exit on failure 327 | env: 328 | JOBS: ${{ toJson(needs) }} 329 | run: | 330 | echo "Job status:" 331 | echo $JOBS | jq -r 'to_entries[] | " - \(.key): \(.value.result)"' 332 | 333 | for i in $(echo "${JOBS}" | jq -r 'to_entries[] | .value.result'); do 334 | if [ "$i" != "success" ] && [ "$i" != "skipped" ]; then 335 | echo "" 336 | echo "Status check not okay!" 337 | exit 1 338 | fi 339 | done 340 | 341 | - name: Get current date 342 | id: date 343 | run: | 344 | # Should generate a timestamp like what is defined on the ArtifactHub documentation 345 | # E.G: 2022-02-08T15:38:15Z' 346 | # https://artifacthub.io/docs/topics/repositories/container-images/ 347 | # https://linux.die.net/man/1/date 348 | echo "date=$(date -u +%Y\-%m\-%d\T%H\:%M\:%S\Z)" >> $GITHUB_OUTPUT 349 | 350 | - name: Extract numbers from input 351 | id: extract-numbers 352 | env: 353 | CENTOS_VERSION: ${{ env.CENTOS_VERSION }} 354 | run: | 355 | numbers_only=$(echo "${CENTOS_VERSION}" | tr -cd '0-9') 356 | echo "CENTOS_VERSION_NUMBER=${numbers_only}" >> "${GITHUB_ENV}" 357 | 358 | - name: Image Metadata 359 | uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5 360 | id: metadata 361 | with: 362 | tags: | 363 | type=raw,value=${{ env.DEFAULT_TAG }} 364 | type=raw,value=${{ env.DEFAULT_TAG }}.{{date 'YYYYMMDD'}} 365 | type=raw,value=${{ env.CENTOS_VERSION }} 366 | type=raw,value=${{ env.CENTOS_VERSION }}.{{date 'YYYYMMDD'}} 367 | type=raw,value=${{ env.CENTOS_VERSION_NUMBER }} 368 | type=raw,value=${{ env.CENTOS_VERSION_NUMBER }}.{{date 'YYYYMMDD'}} 369 | type=ref,event=pr 370 | flavor: | 371 | ${{ inputs.testing && 'suffix=-testing' || '' }} 372 | labels: | 373 | io.artifacthub.package.readme-url=https://raw.githubusercontent.com/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}/refs/heads/main/README.md 374 | org.opencontainers.image.created=${{ steps.date.outputs.date }} 375 | org.opencontainers.image.description=${{ env.IMAGE_DESC }} 376 | org.opencontainers.image.documentation=https://docs.projectbluefin.io 377 | org.opencontainers.image.source=https://github.com/${{ github.repository_owner }}/${{ env.IMAGE_NAME }}/blob/main/Containerfile 378 | org.opencontainers.image.title=${{ env.IMAGE_NAME }} 379 | org.opencontainers.image.url=https://projectbluefin.io 380 | org.opencontainers.image.vendor=${{ github.repository_owner }} 381 | org.opencontainers.image.version=${{ env.CENTOS_VERSION }} 382 | io.artifacthub.package.deprecated=false 383 | io.artifacthub.package.keywords=bootc,centos,bluefin,ublue,universal-blue 384 | io.artifacthub.package.license=Apache-2.0 385 | io.artifacthub.package.logo-url=https://avatars.githubusercontent.com/u/120078124?s=200&v=4 386 | io.artifacthub.package.maintainers=[{\"name\":\"tulilirockz\",\"email\":\"tulilirockz@outlook.com\"},{\"name\":\"castrojo\",\"email\":\"jorge.castro@gmail.com\"}] 387 | io.artifacthub.package.prerelease=true 388 | containers.bootc=1 389 | 390 | - name: Fetch Build Outputs 391 | if: ${{ inputs.publish }} 392 | uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 393 | with: 394 | pattern: ${{ env.IMAGE_NAME }}-* 395 | merge-multiple: true 396 | path: /tmp/artifacts 397 | 398 | - name: Load Outputs 399 | if: ${{ inputs.publish }} 400 | id: load-outputs 401 | run: | 402 | DIGESTS_JSON="$(jq -n '{}')" 403 | for digest_file in /tmp/artifacts/*.txt; do 404 | # Extract the platform from the file name 405 | PLATFORM="$(basename "${digest_file}" | rev | cut -d'-' -f1 | rev | cut -d'.' -f1)" 406 | DIGEST="$(cat "${digest_file}")" 407 | # Add the platform and digest to the JSON object 408 | DIGESTS_JSON="$(echo "${DIGESTS_JSON}" | jq --arg key "${PLATFORM}" --arg value "${DIGEST}" '. + {($key): $value}')" 409 | done 410 | echo "DIGESTS_JSON=$(echo "$DIGESTS_JSON" | jq -c '.')" >> "${GITHUB_OUTPUT}" 411 | 412 | - name: Create Manifest 413 | if: ${{ inputs.publish }} 414 | id: create-manifest 415 | env: 416 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }} 417 | IMAGE_NAME: ${{ env.IMAGE_NAME }} 418 | run: | 419 | podman manifest create ${IMAGE_REGISTRY}/${IMAGE_NAME} 420 | echo "MANIFEST=${IMAGE_REGISTRY}/${IMAGE_NAME}" >> $GITHUB_OUTPUT 421 | 422 | - name: Populate Manifest 423 | if: ${{ inputs.publish }} 424 | env: 425 | MANIFEST: ${{ steps.create-manifest.outputs.MANIFEST }} 426 | DIGESTS_JSON: ${{ steps.load-outputs.outputs.DIGESTS_JSON }} 427 | LABELS: ${{ steps.metadata.outputs.labels }} 428 | PLATFORMS: "${{ inputs.platforms }}" 429 | run: | 430 | DIGESTS=$(echo "$DIGESTS_JSON" | jq -c '.') 431 | # turn the comma separated string into a list 432 | platforms=() 433 | IFS=',' read -r -a platforms <<< "${PLATFORMS}" 434 | for platform in ${platforms[@]}; do 435 | digest="$(echo $DIGESTS | jq -r ".$platform")" 436 | echo "Adding ${IMAGE_REGISTRY}/${IMAGE_NAME}@${digest} for ${platform}" 437 | podman manifest add "${MANIFEST}" "${IMAGE_REGISTRY}/${IMAGE_NAME}@${digest}" --arch "${platform}" 438 | done 439 | 440 | # Apply the labels to the manifest (separated by newlines) 441 | while IFS= read -r label; do 442 | echo "Applying label ${label} to manifest" 443 | podman manifest annotate --index --annotation "$label" "${MANIFEST}" 444 | done <<< "${LABELS}" 445 | 446 | - name: Login to GitHub Container Registry 447 | if: ${{ inputs.publish }} 448 | env: 449 | REGISTRY: ghcr.io 450 | run: | 451 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}" 452 | 453 | - name: Push Manifest 454 | if: github.event_name != 'pull_request' 455 | id: push_manifest 456 | env: 457 | MANIFEST: ${{ steps.create-manifest.outputs.MANIFEST }} 458 | TAGS: ${{ steps.metadata.outputs.tags }} 459 | IMAGE_REGISTRY: ${{ env.IMAGE_REGISTRY }} 460 | IMAGE_NAME: ${{ env.IMAGE_NAME }} 461 | run: | 462 | while IFS= read -r tag; do 463 | podman manifest push --all=false --digestfile=/tmp/digestfile $MANIFEST $IMAGE_REGISTRY/$IMAGE_NAME:$tag 464 | done <<< "$TAGS" 465 | 466 | DIGEST=$(cat /tmp/digestfile) 467 | echo "DIGEST=$DIGEST" >> $GITHUB_OUTPUT 468 | echo "IMAGE=$IMAGE_REGISTRY/$IMAGE_NAME" >> $GITHUB_OUTPUT 469 | 470 | # Cosign throws errors when ran inside the Fedora container for one reason or another 471 | # so we move this to another step in order to run on Ubuntu 472 | sign: 473 | needs: manifest 474 | if: github.event_name != 'pull_request' 475 | runs-on: ubuntu-latest 476 | permissions: 477 | contents: read 478 | packages: write 479 | id-token: write 480 | steps: 481 | - name: Login to GitHub Container Registry 482 | if: ${{ inputs.publish }} 483 | env: 484 | REGISTRY: ghcr.io 485 | run: | 486 | echo "${{ secrets.GITHUB_TOKEN }}" | podman login -u "${{ github.actor }}" --password-stdin "${REGISTRY}" 487 | cat ${XDG_RUNTIME_DIR}/containers/auth.json > ~/.docker/config.json 488 | 489 | - name: Install Cosign 490 | uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 491 | 492 | - name: Sign Manifest 493 | env: 494 | DIGEST: ${{ needs.manifest.outputs.digest }} 495 | IMAGE: ${{ needs.manifest.outputs.image }} 496 | COSIGN_EXPERIMENTAL: false 497 | COSIGN_PRIVATE_KEY: ${{ secrets.SIGNING_SECRET }} 498 | run: | 499 | cosign sign -y --key env://COSIGN_PRIVATE_KEY "${IMAGE}@${DIGEST}" 500 | -------------------------------------------------------------------------------- /.github/workflows/validate-renovate.yaml: -------------------------------------------------------------------------------- 1 | name: Validate Renovate Config 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - ".github/renovate.json5" 7 | - ".github/workflows/renovate.yml" 8 | push: 9 | branches: 10 | - main 11 | paths: 12 | - ".github/renovate.json5" 13 | - ".github/workflows/renovate.yml" 14 | 15 | jobs: 16 | validate: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 21 | 22 | - name: Setup Node.js 23 | uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 24 | with: 25 | node-version: latest 26 | 27 | - name: Install dependencies 28 | shell: bash 29 | env: 30 | RENOVATE_VERSION: latest 31 | run: npm install -g renovate@${RENOVATE_VERSION} 32 | 33 | - name: Validate Renovate config 34 | shell: bash 35 | run: renovate-config-validator --strict 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | cosign.key 2 | output/ 3 | *.raw 4 | *.qcow2 5 | -------------------------------------------------------------------------------- /Containerfile: -------------------------------------------------------------------------------- 1 | ARG MAJOR_VERSION="${MAJOR_VERSION:-c10s}" 2 | ARG BASE_IMAGE_SHA="${BASE_IMAGE_SHA:-sha256-feea845d2e245b5e125181764cfbc26b6dacfb3124f9c8d6a2aaa4a3f91082ed}" 3 | FROM scratch as context 4 | 5 | COPY system_files /files 6 | COPY system_files_overrides /overrides 7 | COPY build_scripts /build_scripts 8 | 9 | ARG MAJOR_VERSION="${MAJOR_VERSION:-c10s}" 10 | FROM quay.io/centos-bootc/centos-bootc:$MAJOR_VERSION 11 | 12 | ARG ENABLE_DX="${ENABLE_DX:-0}" 13 | ARG ENABLE_GDX="${ENABLE_GDX:-0}" 14 | ARG ENABLE_TESTING="${ENABLE_TESTING:-0}" 15 | ARG IMAGE_NAME="${IMAGE_NAME:-bluefin}" 16 | ARG IMAGE_VENDOR="${IMAGE_VENDOR:-ublue-os}" 17 | ARG MAJOR_VERSION="${MAJOR_VERSION:-lts}" 18 | ARG SHA_HEAD_SHORT="${SHA_HEAD_SHORT:-deadbeef}" 19 | 20 | RUN --mount=type=tmpfs,dst=/opt \ 21 | --mount=type=tmpfs,dst=/tmp \ 22 | --mount=type=tmpfs,dst=/var \ 23 | --mount=type=tmpfs,dst=/boot \ 24 | --mount=type=bind,from=context,source=/,target=/run/context \ 25 | /run/context/build_scripts/build.sh 26 | 27 | # Makes `/opt` writeable by default 28 | # Needs to be here to make the main image build strict (no /opt there) 29 | RUN rm -rf /opt && ln -s /var/opt /opt 30 | -------------------------------------------------------------------------------- /Justfile: -------------------------------------------------------------------------------- 1 | export repo_organization := env("GITHUB_REPOSITORY_OWNER", "ublue-os") 2 | export image_name := env("IMAGE_NAME", "bluefin") 3 | export centos_version := env("CENTOS_VERSION", "stream10") 4 | export default_tag := env("DEFAULT_TAG", "lts") 5 | export bib_image := env("BIB_IMAGE", "quay.io/centos-bootc/bootc-image-builder:latest") 6 | 7 | alias build-vm := build-qcow2 8 | alias rebuild-vm := rebuild-qcow2 9 | alias run-vm := run-vm-qcow2 10 | 11 | [private] 12 | default: 13 | @just --list 14 | 15 | # Check Just Syntax 16 | [group('Just')] 17 | check: 18 | #!/usr/bin/env bash 19 | find . -type f -name "*.just" | while read -r file; do 20 | echo "Checking syntax: $file" 21 | just --unstable --fmt --check -f $file 22 | done 23 | echo "Checking syntax: Justfile" 24 | just --unstable --fmt --check -f Justfile 25 | 26 | # Fix Just Syntax 27 | [group('Just')] 28 | fix: 29 | #!/usr/bin/env bash 30 | find . -type f -name "*.just" | while read -r file; do 31 | echo "Checking syntax: $file" 32 | just --unstable --fmt -f $file 33 | done 34 | echo "Checking syntax: Justfile" 35 | just --unstable --fmt -f Justfile || { exit 1; } 36 | 37 | # Clean Repo 38 | [group('Utility')] 39 | clean: 40 | #!/usr/bin/env bash 41 | set -eoux pipefail 42 | touch _build 43 | find *_build* -exec rm -rf {} \; 44 | rm -f previous.manifest.json 45 | rm -f changelog.md 46 | rm -f output.env 47 | 48 | # Sudo Clean Repo 49 | [group('Utility')] 50 | [private] 51 | sudo-clean: 52 | just sudoif just clean 53 | 54 | # sudoif bash function 55 | [group('Utility')] 56 | [private] 57 | sudoif command *args: 58 | #!/usr/bin/env bash 59 | function sudoif(){ 60 | if [[ "${UID}" -eq 0 ]]; then 61 | "$@" 62 | elif [[ "$(command -v sudo)" && -n "${SSH_ASKPASS:-}" ]] && [[ -n "${DISPLAY:-}" || -n "${WAYLAND_DISPLAY:-}" ]]; then 63 | /usr/bin/sudo --askpass "$@" || exit 1 64 | elif [[ "$(command -v sudo)" ]]; then 65 | /usr/bin/sudo "$@" || exit 1 66 | else 67 | exit 1 68 | fi 69 | } 70 | sudoif {{ command }} {{ args }} 71 | 72 | # This Justfile recipe builds a container image using Podman. 73 | # 74 | # Arguments: 75 | # $target_image - The tag you want to apply to the image (default: bluefin). 76 | # $tag - The tag for the image (default: lts). 77 | # $dx - Enable DX (default: "0"). 78 | # $gdx - Enable GDX (default: "0"). 79 | # 80 | # DX: 81 | # Developer Experience (DX) is a feature that allows you to install the latest developer tools for your system. 82 | # Packages include VScode, Docker, Distrobox, and more. 83 | # GDX: https://docs.projectbluefin.io/gdx/ 84 | # GPU Developer Experience (GDX) creates a base as an AI and Graphics platform. 85 | # Installs Nvidia drivers, CUDA, and other tools. 86 | # 87 | # The script constructs the version string using the tag and the current date. 88 | # If the git working directory is clean, it also includes the short SHA of the current HEAD. 89 | # 90 | # just build $target_image $tag $dx $gdx 91 | # 92 | # Example usage: 93 | # just build bluefin lts 1 0 1 94 | # 95 | # This will build an image 'bluefin:lts' with DX and GDX enabled. 96 | # 97 | 98 | # Build the image using the specified parameters 99 | build $target_image=image_name $tag=default_tag $dx="0" $gdx="0" $testing="0": 100 | #!/usr/bin/env bash 101 | 102 | # Get Version 103 | ver="${tag}-${centos_version}.$(date +%Y%m%d)" 104 | 105 | BUILD_ARGS=() 106 | BUILD_ARGS+=("--build-arg" "MAJOR_VERSION=${centos_version}") 107 | BUILD_ARGS+=("--build-arg" "IMAGE_NAME=${image_name}") 108 | BUILD_ARGS+=("--build-arg" "IMAGE_VENDOR=${repo_organization}") 109 | BUILD_ARGS+=("--build-arg" "ENABLE_DX=${dx}") 110 | BUILD_ARGS+=("--build-arg" "ENABLE_GDX=${gdx}") 111 | BUILD_ARGS+=("--build-arg" "ENABLE_TESTING=${testing}") 112 | if [[ -z "$(git status -s)" ]]; then 113 | BUILD_ARGS+=("--build-arg" "SHA_HEAD_SHORT=$(git rev-parse --short HEAD)") 114 | fi 115 | 116 | podman build \ 117 | "${BUILD_ARGS[@]}" \ 118 | --pull=newer \ 119 | --tag "${target_image}:${tag}" \ 120 | . 121 | 122 | # Command: _rootful_load_image 123 | # Description: This script checks if the current user is root or running under sudo. If not, it attempts to resolve the image tag using podman inspect. 124 | # If the image is found, it loads it into rootful podman. If the image is not found, it pulls it from the repository. 125 | # 126 | # Parameters: 127 | # $target_image - The name of the target image to be loaded or pulled. 128 | # $tag - The tag of the target image to be loaded or pulled. Default is 'default_tag'. 129 | # 130 | # Example usage: 131 | # _rootful_load_image my_image latest 132 | # 133 | # Steps: 134 | # 1. Check if the script is already running as root or under sudo. 135 | # 2. Check if target image is in the non-root podman container storage) 136 | # 3. If the image is found, load it into rootful podman using podman scp. 137 | # 4. If the image is not found, pull it from the remote repository into reootful podman. 138 | 139 | _rootful_load_image $target_image=image_name $tag=default_tag: 140 | #!/usr/bin/env bash 141 | set -eoux pipefail 142 | 143 | # Check if already running as root or under sudo 144 | if [[ -n "${SUDO_USER:-}" || "${UID}" -eq "0" ]]; then 145 | echo "Already root or running under sudo, no need to load image from user podman." 146 | exit 0 147 | fi 148 | 149 | # Try to resolve the image tag using podman inspect 150 | set +e 151 | resolved_tag=$(podman inspect -t image "${target_image}:${tag}" | jq -r '.[].RepoTags.[0]') 152 | return_code=$? 153 | set -e 154 | 155 | if [[ $return_code -eq 0 ]]; then 156 | # If the image is found, load it into rootful podman 157 | ID=$(just sudoif podman images --filter reference="${target_image}:${tag}" --format "'{{ '{{.ID}}' }}'") 158 | if [[ -z "$ID" ]]; then 159 | # If the image ID is not found, copy the image from user podman to root podman 160 | COPYTMP=$(mktemp -p "${PWD}" -d -t _build_podman_scp.XXXXXXXXXX) 161 | just sudoif TMPDIR=${COPYTMP} podman image scp ${UID}@localhost::"${target_image}:${tag}" root@localhost::"${target_image}:${tag}" 162 | rm -rf "${COPYTMP}" 163 | fi 164 | else 165 | # If the image is not found, pull it from the repository 166 | just sudoif podman pull "${target_image}:${tag}" 167 | fi 168 | 169 | # Build a bootc bootable image using Bootc Image Builder (BIB) 170 | # Converts a container image to a bootable image 171 | # Parameters: 172 | # target_image: The name of the image to build (ex. localhost/fedora) 173 | # tag: The tag of the image to build (ex. latest) 174 | # type: The type of image to build (ex. qcow2, raw, iso) 175 | # config: The configuration file to use for the build (default: image.toml) 176 | 177 | # Example: just _rebuild-bib localhost/fedora latest qcow2 image.toml 178 | _build-bib $target_image $tag $type $config: (_rootful_load_image target_image tag) 179 | #!/usr/bin/env bash 180 | set -euo pipefail 181 | 182 | mkdir -p "output" 183 | 184 | echo "Cleaning up previous build" 185 | if [[ $type == iso ]]; then 186 | sudo rm -rf "output/bootiso" || true 187 | else 188 | sudo rm -rf "output/${type}" || true 189 | fi 190 | 191 | args="--type ${type} " 192 | args+="--use-librepo=True" 193 | 194 | if [[ $target_image == localhost/* ]]; then 195 | args+=" --local" 196 | fi 197 | 198 | sudo podman run \ 199 | --rm \ 200 | -it \ 201 | --privileged \ 202 | --pull=newer \ 203 | --net=host \ 204 | --security-opt label=type:unconfined_t \ 205 | -v $(pwd)/${config}:/config.toml:ro \ 206 | -v $(pwd)/output:/output \ 207 | -v /var/lib/containers/storage:/var/lib/containers/storage \ 208 | "${bib_image}" \ 209 | ${args} \ 210 | "${target_image}:${tag}" 211 | 212 | sudo chown -R $USER:$USER output 213 | 214 | # Podman build's the image from the Containerfile and creates a bootable image 215 | # Parameters: 216 | # target_image: The name of the image to build (ex. localhost/fedora) 217 | # tag: The tag of the image to build (ex. latest) 218 | # type: The type of image to build (ex. qcow2, raw, iso) 219 | # config: The configuration file to use for the build (deafult: image.toml) 220 | 221 | # Example: just _rebuild-bib localhost/fedora latest qcow2 image.toml 222 | _rebuild-bib $target_image $tag $type $config: (build target_image tag) && (_build-bib target_image tag type config) 223 | 224 | # Build a QCOW2 virtual machine image 225 | [group('Build Virtal Machine Image')] 226 | build-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "qcow2" "image.toml") 227 | 228 | # Build a RAW virtual machine image 229 | [group('Build Virtal Machine Image')] 230 | build-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "raw" "image.toml") 231 | 232 | # Build an ISO virtual machine image 233 | [group('Build Virtal Machine Image')] 234 | build-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_build-bib target_image tag "iso" "iso.toml") 235 | 236 | # Rebuild a QCOW2 virtual machine image 237 | [group('Build Virtal Machine Image')] 238 | rebuild-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "qcow2" "image.toml") 239 | 240 | # Rebuild a RAW virtual machine image 241 | [group('Build Virtal Machine Image')] 242 | rebuild-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "raw" "image.toml") 243 | 244 | # Rebuild an ISO virtual machine image 245 | [group('Build Virtal Machine Image')] 246 | rebuild-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_rebuild-bib target_image tag "iso" "iso.toml") 247 | 248 | # Run a virtual machine with the specified image type and configuration 249 | _run-vm $target_image $tag $type $config: 250 | #!/usr/bin/env bash 251 | set -eoux pipefail 252 | 253 | # Determine the image file based on the type 254 | image_file="output/${type}/disk.${type}" 255 | if [[ $type == iso ]]; then 256 | image_file="output/bootiso/install.iso" 257 | fi 258 | 259 | # Build the image if it does not exist 260 | if [[ ! -f "${image_file}" ]]; then 261 | just "build-${type}" "$target_image" "$tag" 262 | fi 263 | 264 | # Determine an available port to use 265 | port=8006 266 | while grep -q :${port} <<< $(ss -tunalp); do 267 | port=$(( port + 1 )) 268 | done 269 | echo "Using Port: ${port}" 270 | echo "Connect to http://localhost:${port}" 271 | 272 | # Set up the arguments for running the VM 273 | run_args=() 274 | run_args+=(--rm --privileged) 275 | run_args+=(--pull=newer) 276 | run_args+=(--publish "127.0.0.1:${port}:8006") 277 | run_args+=(--env "CPU_CORES=4") 278 | run_args+=(--env "RAM_SIZE=8G") 279 | run_args+=(--env "DISK_SIZE=64G") 280 | run_args+=(--env "TPM=Y") 281 | run_args+=(--env "GPU=Y") 282 | run_args+=(--device=/dev/kvm) 283 | run_args+=(--volume "${PWD}/${image_file}":"/boot.${type}") 284 | run_args+=(docker.io/qemux/qemu-docker) 285 | 286 | # Run the VM and open the browser to connect 287 | podman run "${run_args[@]}" & 288 | xdg-open http://localhost:${port} 289 | fg "%podman" 290 | 291 | # Run a virtual machine from a QCOW2 image 292 | [group('Run Virtal Machine')] 293 | run-vm-qcow2 $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "qcow2" "image.toml") 294 | 295 | # Run a virtual machine from a RAW image 296 | [group('Run Virtal Machine')] 297 | run-vm-raw $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "raw" "image.toml") 298 | 299 | # Run a virtual machine from an ISO 300 | [group('Run Virtal Machine')] 301 | run-vm-iso $target_image=("localhost/" + image_name) $tag=default_tag: && (_run-vm target_image tag "iso" "iso.toml") 302 | 303 | # Run a virtual machine using systemd-vmspawn 304 | [group('Run Virtal Machine')] 305 | spawn-vm rebuild="0" type="qcow2" ram="6G": 306 | #!/usr/bin/env bash 307 | 308 | set -euo pipefail 309 | 310 | [ "{{ rebuild }}" -eq 1 ] && echo "Rebuilding the ISO" && just build-vm {{ rebuild }} {{ type }} 311 | 312 | systemd-vmspawn \ 313 | -M "achillobator" \ 314 | --console=gui \ 315 | --cpus=2 \ 316 | --ram=$(echo {{ ram }}| /usr/bin/numfmt --from=iec) \ 317 | --network-user-mode \ 318 | --vsock=false --pass-ssh-key=false \ 319 | -i ./output/**/*.{{ type }} 320 | 321 | ########################## 322 | # 'customize-iso-build' # 323 | ########################## 324 | # Description: 325 | # Enables the manual customization of the osbuild manifest before running the ISO build 326 | # 327 | # Mount the configuration file and output directory 328 | # Clear the entrypoint to run the custom command 329 | 330 | # Run osbuild with the specified parameters 331 | customize-iso-build: 332 | sudo podman run \ 333 | --rm -it \ 334 | --privileged \ 335 | --pull=newer \ 336 | --net=host \ 337 | --security-opt label=type:unconfined_t \ 338 | -v $(pwd)/iso.toml \ 339 | -v $(pwd)/output:/output \ 340 | -v /var/lib/containers/storage:/var/lib/containers/storage \ 341 | --entrypoint "" \ 342 | "${bib_image}" \ 343 | osbuild --store /store --output-directory /output /output/manifest-iso.json --export bootiso 344 | 345 | ########################## 346 | # 'patch-iso-branding' # 347 | ########################## 348 | # Description: 349 | # creates a custom branded ISO image. As per https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/7/html/anaconda_customization_guide/sect-iso-images#sect-product-img 350 | # Parameters: 351 | # override: A flag to determine if the final ISO should replace the original ISO (default is 0). 352 | # iso_path: The path to the original ISO file. 353 | # Runs a Podman container with Fedora image. Installs 'lorax' and 'mkksiso' tools inside the container. Creates a compressed 'product.img' 354 | # from the Brnading images in the 'iso_files' directory. Uses 'mkksiso' to add the 'product.img' to the original ISO and creates 'final.iso' 355 | # in the output directory. If 'override' is not 0, replaces the original ISO with the newly created 'final.iso'. 356 | 357 | # applies custom branding to an ISO image. 358 | patch-iso-branding override="0" iso_path="output/bootiso/install.iso": 359 | #!/usr/bin/env bash 360 | podman run \ 361 | --rm \ 362 | -it \ 363 | --pull=newer \ 364 | --privileged \ 365 | -v ./output:/output \ 366 | -v ./iso_files:/iso_files \ 367 | quay.io/centos/centos:stream10 \ 368 | bash -c 'dnf install -y lorax && \ 369 | mkdir /images && cd /iso_files/product && find . | cpio -c -o | gzip -9cv > /images/product.img && cd / \ 370 | && mkksiso --add images --volid bluefin-boot /{{ iso_path }} /output/final.iso' 371 | 372 | if [ {{ override }} -ne 0 ] ; then 373 | mv output/final.iso {{ iso_path }} 374 | fi 375 | 376 | # Runs shell check on all Bash scripts 377 | lint: 378 | /usr/bin/find . -iname "*.sh" -type f -exec shellcheck "{}" ';' 379 | 380 | # Runs shfmt on all Bash scripts 381 | format: 382 | /usr/bin/find . -iname "*.sh" -type f -exec shfmt --write "{}" ';' 383 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # UP FOR ADOPTION 2 | The Bluefin team has decided to not pursue a Bluefin LTS. This project will not be graduating out of beta. This project is up for adoption, please let us know if you are interested in rebranding this and pursuing the idea. We will continue to publish builds to give our beta testers time to migrate but we recommend doing so quickly. 3 | 4 | # Bluefin LTS 5 | *Achillobator giganticus* 6 | 7 | [![Build Bluefin LTS](https://github.com/ublue-os/bluefin-lts/actions/workflows/build-regular.yml/badge.svg)](https://github.com/ublue-os/bluefin-lts/actions/workflows/build-regular.yml) 8 | [![Codacy Badge](https://app.codacy.com/project/badge/Grade/13d42ded3cf54250a71ad05aca7d5961)](https://app.codacy.com/gh/ublue-os/bluefin-lts/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade) 9 | [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10098/badge)](https://www.bestpractices.dev/projects/10098) 10 | [![Bluefin LTS on ArtifactHub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/bluefin)](https://artifacthub.io/packages/container/bluefin/bluefin) 11 | 12 | Larger, more lethal [Bluefin](https://projectbluefin.io). `bluefin:lts` is built on CentOS Stream10. 13 | 14 | ![image](https://github.com/user-attachments/assets/2e160934-44e6-4aee-b2b8-accb3bcf0a41) 15 | 16 | ## Instructions 17 | 18 | Check [the documentation](https://docs.projectbluefin.io/lts) for the latest instructions. 19 | 20 | ## Metrics 21 | 22 | ![Alt](https://repobeats.axiom.co/api/embed/3e29c59ccd003fe1939ce0bdfccdee2b14203541.svg "Repobeats analytics image") 23 | -------------------------------------------------------------------------------- /artifacthub-repo.yml: -------------------------------------------------------------------------------- 1 | repositoryID: d31cdc0c-6639-475b-9a51-35af5ca3f235 2 | owners: # (optional, used to claim repository ownership) 3 | - name: Jorge Castro 4 | email: jorge.castro@gmail.com 5 | #ignore: # (optional, packages that should not be indexed by Artifact Hub) 6 | # - name: package1 7 | # - name: package2 # Exact match 8 | -------------------------------------------------------------------------------- /build_scripts/00-workarounds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | # This is a bucket list. We want to not have anything in this file at all. 6 | 7 | # Enable the same compose repos during our build that the centos-bootc image 8 | # uses during its build. This avoids downgrading packages in the image that 9 | # have strict NVR requirements. 10 | curl --retry 3 -Lo "/etc/yum.repos.d/compose.repo" "https://gitlab.com/redhat/centos-stream/containers/bootc/-/raw/c${MAJOR_VERSION_NUMBER}s/cs.repo" 11 | sed -i \ 12 | -e "s@- (BaseOS|AppStream)@& - Compose@" \ 13 | -e "s@\(baseos\|appstream\)@&-compose@" \ 14 | /etc/yum.repos.d/compose.repo 15 | cat /etc/yum.repos.d/compose.repo 16 | -------------------------------------------------------------------------------- /build_scripts/10-packages-image-base.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | # This is the base for a minimal GNOME system on CentOS Stream. 6 | 7 | # This thing slows down downloads A LOT for no reason 8 | dnf remove -y subscription-manager 9 | 10 | dnf -y install centos-release-hyperscale-kernel 11 | dnf config-manager --set-disabled "centos-hyperscale,centos-hyperscale-kernel" 12 | dnf --enablerepo="centos-hyperscale" --enablerepo="centos-hyperscale-kernel" -y update kernel 13 | 14 | if [ "${ENABLE_TESTING}" == "1" ] ; then 15 | # GNOME 48 backport COPR 16 | dnf copr enable -y "@centoshyperscale/c${MAJOR_VERSION_NUMBER}s-gnome-48" 17 | dnf -y install glib2 18 | dnf copr enable -y jreilly1821/packages 19 | dnf -y install xdg-desktop-portal xdg-desktop-portal-gnome 20 | dnf copr disable -y jreilly1821/packages 21 | fi 22 | 23 | dnf -y install 'dnf-command(versionlock)' 24 | # This fixes a lot of skew issues on GDX because kernel-devel wont update then 25 | dnf versionlock add kernel kernel-devel kernel-devel-matched kernel-core kernel-modules kernel-modules-core kernel-modules-extra kernel-uki-virt 26 | 27 | dnf -y install "https://dl.fedoraproject.org/pub/epel/epel-release-latest-${MAJOR_VERSION_NUMBER}.noarch.rpm" 28 | dnf config-manager --set-enabled crb 29 | 30 | # Multimidia codecs 31 | dnf config-manager --add-repo=https://negativo17.org/repos/epel-multimedia.repo 32 | dnf config-manager --set-disabled epel-multimedia 33 | dnf -y install --enablerepo=epel-multimedia \ 34 | ffmpeg libavcodec @multimedia gstreamer1-plugins-{bad-free,bad-free-libs,good,base} lame{,-libs} libjxl ffmpegthumbnailer 35 | 36 | # `dnf group info Workstation` without GNOME 37 | dnf group install -y --nobest \ 38 | -x rsyslog* \ 39 | -x cockpit \ 40 | -x cronie* \ 41 | -x crontabs \ 42 | -x PackageKit \ 43 | -x PackageKit-command-not-found \ 44 | "Common NetworkManager submodules" \ 45 | "Core" \ 46 | "Fonts" \ 47 | "Guest Desktop Agents" \ 48 | "Hardware Support" \ 49 | "Printing Client" \ 50 | "Standard" \ 51 | "Workstation product core" 52 | 53 | # Minimal GNOME group. ("Multimedia" adds most of the packages from the GNOME group. This should clear those up too.) 54 | # In order to reproduce this, get the packages with `dnf group info GNOME`, install them manually with dnf install and see all the packages that are already installed. 55 | # Other than that, I've removed a few packages we didnt want, those being a few GUI applications. 56 | dnf -y install \ 57 | -x PackageKit \ 58 | -x PackageKit-command-not-found \ 59 | -x gnome-software-fedora-langpacks \ 60 | "NetworkManager-adsl" \ 61 | "centos-backgrounds" \ 62 | "gdm" \ 63 | "gnome-bluetooth" \ 64 | "gnome-color-manager" \ 65 | "gnome-control-center" \ 66 | "gnome-initial-setup" \ 67 | "gnome-remote-desktop" \ 68 | "gnome-session-wayland-session" \ 69 | "gnome-settings-daemon" \ 70 | "gnome-shell" \ 71 | "gnome-software" \ 72 | "gnome-user-docs" \ 73 | "gvfs-fuse" \ 74 | "gvfs-goa" \ 75 | "gvfs-gphoto2" \ 76 | "gvfs-mtp" \ 77 | "gvfs-smb" \ 78 | "libsane-hpaio" \ 79 | "nautilus" \ 80 | "orca" \ 81 | "ptyxis" \ 82 | "sane-backends-drivers-scanners" \ 83 | "xdg-desktop-portal-gnome" \ 84 | "xdg-user-dirs-gtk" \ 85 | "yelp-tools" 86 | 87 | dnf -y install \ 88 | plymouth \ 89 | plymouth-system-theme \ 90 | fwupd \ 91 | systemd-{resolved,container,oomd} \ 92 | libcamera{,-{v4l2,gstreamer,tools}} 93 | 94 | # This package adds "[systemd] Failed Units: *" to the bashrc startup 95 | dnf -y remove console-login-helper-messages 96 | -------------------------------------------------------------------------------- /build_scripts/20-packages.sh: -------------------------------------------------------------------------------- 1 | !/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | dnf -y remove \ 6 | setroubleshoot 7 | 8 | dnf -y install \ 9 | -x gnome-extensions-app \ 10 | system-reinstall-bootc \ 11 | gnome-disk-utility \ 12 | distrobox \ 13 | fastfetch \ 14 | fpaste \ 15 | gnome-shell-extension-{dash-to-dock,caffeine} \ 16 | just \ 17 | powertop \ 18 | tuned-ppd \ 19 | fzf \ 20 | glow \ 21 | wl-clipboard \ 22 | gum \ 23 | jetbrains-mono-fonts-all \ 24 | buildah \ 25 | btrfs-progs \ 26 | xhost 27 | 28 | # Everything that depends on external repositories should be after this. 29 | # Make sure to set them as disabled and enable them only when you are going to use their packages. 30 | # We do, however, leave crb and EPEL enabled by default. 31 | 32 | dnf config-manager --add-repo "https://pkgs.tailscale.com/stable/centos/${MAJOR_VERSION_NUMBER}/tailscale.repo" 33 | dnf config-manager --set-disabled "tailscale-stable" 34 | # FIXME: tailscale EPEL10 request: https://bugzilla.redhat.com/show_bug.cgi?id=2349099 35 | dnf -y --enablerepo "tailscale-stable" install \ 36 | tailscale 37 | 38 | dnf -y copr enable ublue-os/packages 39 | dnf -y copr disable ublue-os/packages 40 | dnf -y --enablerepo copr:copr.fedorainfracloud.org:ublue-os:packages swap \ 41 | centos-logos bluefin-logos 42 | 43 | dnf -y --enablerepo copr:copr.fedorainfracloud.org:ublue-os:packages install \ 44 | -x bluefin-logos \ 45 | -x bluefin-readymade-config \ 46 | ublue-os-just \ 47 | ublue-os-luks \ 48 | ublue-os-signing \ 49 | ublue-os-udev-rules \ 50 | ublue-os-update-services \ 51 | ublue-{motd,fastfetch,bling,rebase-helper,setup-services,polkit-rules,brew} \ 52 | uupd \ 53 | bluefin-* 54 | 55 | # Upstream ublue-os-signing bug, we are using /usr/etc for the container signing and bootc gets mad at this 56 | # FIXME: remove this once https://github.com/ublue-os/packages/issues/245 is closed 57 | cp -avf /usr/etc/. /etc 58 | rm -rvf /usr/etc 59 | 60 | dnf -y copr enable ublue-os/staging 61 | dnf -y copr disable ublue-os/staging 62 | dnf -y --enablerepo copr:copr.fedorainfracloud.org:ublue-os:staging install \ 63 | gnome-shell-extension-{appindicator,blur-my-shell,search-light,logo-menu,gsconnect} 64 | 65 | dnf -y copr enable che/nerd-fonts "centos-stream-${MAJOR_VERSION_NUMBER}-$(arch)" 66 | dnf -y copr disable che/nerd-fonts 67 | dnf -y --enablerepo "copr:copr.fedorainfracloud.org:che:nerd-fonts" install \ 68 | nerd-fonts 69 | 70 | # This is required so homebrew works indefinitely. 71 | # Symlinking it makes it so whenever another GCC version gets released it will break if the user has updated it without- 72 | # the homebrew package getting updated through our builds. 73 | # We could get some kind of static binary for GCC but this is the cleanest and most tested alternative. This Sucks. 74 | dnf -y --setopt=install_weak_deps=False install gcc 75 | 76 | if [ "${ENABLE_TESTING}" == "1" ] ; then 77 | # We need the fedora (LATEST_MAJOR) builds because f41 and el10 namespaces under copr arent customizeable so we cant build using the g48 backport 78 | # Using the f(LATEST_MAJOR) should provide the dependencies we need just fine. 79 | FEDORA_MAJOR_SPOOF=42 80 | dnf config-manager --add-repo "https://copr.fedorainfracloud.org/coprs/ublue-os/staging/repo/fedora-${FEDORA_MAJOR_SPOOF}/ublue-os-staging-fedora-${FEDORA_MAJOR_SPOOF}.repo" 81 | REPO_FILE="/etc/yum.repos.d/ublue-os-staging-fedora-${FEDORA_MAJOR_SPOOF}.repo" 82 | sed -i "s/\:staging/&:fedora/" $REPO_FILE 83 | sed -i "s/\$releasever/$FEDORA_MAJOR_SPOOF/" $REPO_FILE 84 | dnf config-manager --set-disabled "copr:copr.fedorainfracloud.org:ublue-os:staging:fedora" 85 | dnf -y \ 86 | --enablerepo "copr:copr.fedorainfracloud.org:ublue-os:staging" \ 87 | --enablerepo "copr:copr.fedorainfracloud.org:ublue-os:staging:fedora" \ 88 | install bazaar 89 | fi 90 | -------------------------------------------------------------------------------- /build_scripts/26-packages-post.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | # Fancy CentOS icon on the fastfetch 6 | sed -i "s/󰣛//g" /usr/share/ublue-os/fastfetch.jsonc 7 | 8 | # Fix 1969 date getting returned on Fastfetch (upstream issue) 9 | # FIXME: check if this issue is fixed upstream at some point. (28-02-2025) https://github.com/ostreedev/ostree/issues/1469 10 | sed -i -e "s@ls -alct /@&var/log@g" /usr/share/ublue-os/fastfetch.jsonc 11 | 12 | # Automatic wallpaper changing by month 13 | HARDCODED_RPM_MONTH="12" 14 | sed -i "/picture-uri/ s/${HARDCODED_RPM_MONTH}/$(date +%m)/" "/usr/share/glib-2.0/schemas/zz0-bluefin-modifications.gschema.override" 15 | glib-compile-schemas /usr/share/glib-2.0/schemas 16 | 17 | # Required for bluefin faces to work without conflicting with a ton of packages 18 | rm -f /usr/share/pixmaps/faces/* || echo "Expected directory deletion to fail" 19 | mv /usr/share/pixmaps/faces/bluefin/* /usr/share/pixmaps/faces 20 | rm -rf /usr/share/pixmaps/faces/bluefin 21 | 22 | # This should only be enabled on `-dx` 23 | sed -i "/^show-boxbuddy=.*/d" /etc/dconf/db/distro.d/04-bluefin-logomenu-extension 24 | sed -i "/^show-boxbuddy=.*/d" /usr/share/glib-2.0/schemas/zz0-bluefin-modifications.gschema.override 25 | sed -i "/.*io.github.dvlv.boxbuddyrs.*/d" /etc/ublue-os/system-flatpaks.list 26 | 27 | # Offline Bluefin documentation 28 | curl --retry 3 -Lo /tmp/bluefin.pdf https://github.com/ublue-os/bluefin-docs/releases/download/0.1/bluefin.pdf 29 | install -Dm0644 -t /usr/share/doc/bluefin/ /tmp/bluefin.pdf 30 | 31 | # Add Flathub by default 32 | mkdir -p /etc/flatpak/remotes.d 33 | curl --retry 3 -o /etc/flatpak/remotes.d/flathub.flatpakrepo "https://dl.flathub.org/repo/flathub.flatpakrepo" 34 | 35 | # move the custom just 36 | mv /usr/share/ublue-os/just/61-lts-custom.just /usr/share/ublue-os/just/60-custom.just 37 | 38 | # Generate initramfs image after installing Bluefin branding because of Plymouth subpackage 39 | # Add resume module so that hibernation works 40 | echo "add_dracutmodules+=\" resume \"" >/etc/dracut.conf.d/resume.conf 41 | KERNEL_SUFFIX="" 42 | QUALIFIED_KERNEL="$(rpm -qa | grep -P 'kernel-(|'"$KERNEL_SUFFIX"'-)(\d+\.\d+\.\d+)' | sed -E 's/kernel-(|'"$KERNEL_SUFFIX"'-)//' | tail -n 1)" 43 | /usr/bin/dracut --no-hostonly --kver "$QUALIFIED_KERNEL" --reproducible --zstd -v --add ostree -f "/lib/modules/$QUALIFIED_KERNEL/initramfs.img" 44 | -------------------------------------------------------------------------------- /build_scripts/40-services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | sed -i 's|uupd|& --disable-module-distrobox|' /usr/lib/systemd/system/uupd.service 6 | 7 | # Enable sleep then hibernation by DEFAULT! 8 | sed -i 's/#HandleLidSwitch=.*/HandleLidSwitch=suspend-then-hibernate/g' /usr/lib/systemd/logind.conf 9 | sed -i 's/#HandleLidSwitchDocked=.*/HandleLidSwitchDocked=suspend-then-hibernate/g' /usr/lib/systemd/logind.conf 10 | sed -i 's/#HandleLidSwitchExternalPower=.*/HandleLidSwitchExternalPower=suspend-then-hibernate/g' /usr/lib/systemd/logind.conf 11 | sed -i 's/#SleepOperation=.*/SleepOperation=suspend-then-hibernate/g' /usr/lib/systemd/logind.conf 12 | systemctl enable brew-setup.service 13 | systemctl enable gdm.service 14 | systemctl enable fwupd.service 15 | systemctl enable rpm-ostree-countme.service 16 | systemctl --global enable podman-auto-update.timer 17 | systemctl enable rpm-ostree-countme.service 18 | systemctl disable rpm-ostree.service 19 | systemctl enable dconf-update.service 20 | systemctl disable mcelog.service 21 | systemctl enable tailscaled.service 22 | systemctl enable uupd.timer 23 | systemctl enable ublue-system-setup.service 24 | systemctl --global enable ublue-user-setup.service 25 | systemctl mask bootc-fetch-apply-updates.timer bootc-fetch-apply-updates.service 26 | systemctl enable check-sb-key.service 27 | 28 | # Disable lastlog display on previous failed login in GDM (This makes logins slow) 29 | authselect enable-feature with-silent-lastlog 30 | 31 | # Enable polkit rules for fingerprint sensors via fprintd 32 | authselect enable-feature with-fingerprint 33 | 34 | sed -i -e "s@PrivateTmp=.*@PrivateTmp=no@g" /usr/lib/systemd/system/systemd-resolved.service 35 | # FIXME: this does not yet work, the resolution service fails for somer reason 36 | # enable systemd-resolved for proper name resolution 37 | systemctl enable systemd-resolved.service 38 | -------------------------------------------------------------------------------- /build_scripts/90-image-info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | IMAGE_REF="ostree-image-signed:docker://ghcr.io/${IMAGE_VENDOR}/${IMAGE_NAME}" 6 | IMAGE_INFO="/usr/share/ublue-os/image-info.json" 7 | IMAGE_FLAVOR="main" 8 | 9 | cat >$IMAGE_INFO <> /usr/share/ublue-os/just/60-custom.just 7 | -------------------------------------------------------------------------------- /build_scripts/overrides/aarch64/hwe/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ublue-os/bluefin-lts/93e8fa13d5f972371019a5a6bc7dbe2d6e59dc2a/build_scripts/overrides/aarch64/hwe/.gitkeep -------------------------------------------------------------------------------- /build_scripts/overrides/dx/00-packages.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xeuo pipefail 4 | 5 | dnf install -y \ 6 | python3-ramalama 7 | 8 | # VSCode on the base image! 9 | dnf config-manager --add-repo "https://packages.microsoft.com/yumrepos/vscode" 10 | dnf config-manager --set-disabled packages.microsoft.com_yumrepos_vscode 11 | dnf -y --enablerepo packages.microsoft.com_yumrepos_vscode --nogpgcheck install code 12 | 13 | dnf config-manager --add-repo "https://download.docker.com/linux/centos/docker-ce.repo" 14 | dnf config-manager --set-disabled docker-ce-stable 15 | dnf -y --enablerepo docker-ce-stable install \ 16 | docker-ce \ 17 | docker-ce-cli \ 18 | containerd.io \ 19 | docker-buildx-plugin \ 20 | docker-compose-plugin 21 | 22 | dnf -y --enablerepo copr:copr.fedorainfracloud.org:ublue-os:packages install \ 23 | libvirt \ 24 | libvirt-daemon-kvm \ 25 | libvirt-nss \ 26 | virt-install \ 27 | ublue-os-libvirt-workarounds 28 | 29 | STABLE_KUBE_VERSION="$(curl -L -s https://dl.k8s.io/release/stable.txt)" 30 | STABLE_KUBE_VERSION_MAJOR="${STABLE_KUBE_VERSION%.*}" 31 | GITHUB_LIKE_ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/aarch64/arm64/')" 32 | KIND_LATEST_VERSION="$(curl -L https://api.github.com/repos/kubernetes-sigs/kind/releases/latest | jq -r ".tag_name")" 33 | KZERO_LATEST_VERSION="$(curl -L https://api.github.com/repos/k0sproject/k0s/releases/latest | jq -r ".tag_name")" 34 | KZEROCTL_LATEST_VERSION="$(curl -L https://api.github.com/repos/k0sproject/k0sctl/releases/latest | jq -r ".tag_name")" 35 | KUBE_TMP="$(mktemp -d)" 36 | 37 | trap "rm -rf ${KUBE_TMP}" EXIT 38 | 39 | SHA_TYPE="256" 40 | KIND_BIN_NAME="kind-linux-${GITHUB_LIKE_ARCH}" 41 | DEFAULT_RETRY=3 42 | pushd "${KUBE_TMP}" 43 | curl --retry "${DEFAULT_RETRY}" -Lo "${KIND_BIN_NAME}" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_LATEST_VERSION}/kind-linux-${GITHUB_LIKE_ARCH}" 44 | curl --retry "${DEFAULT_RETRY}" -Lo "${KIND_BIN_NAME}.sha${SHA_TYPE}sum" "https://github.com/kubernetes-sigs/kind/releases/download/${KIND_LATEST_VERSION}/kind-linux-${GITHUB_LIKE_ARCH}.sha${SHA_TYPE}sum" 45 | curl --retry "${DEFAULT_RETRY}" -LO "https://dl.k8s.io/release/${STABLE_KUBE_VERSION}/bin/linux/${GITHUB_LIKE_ARCH}/kubectl" 46 | curl --retry "${DEFAULT_RETRY}" -LO "https://dl.k8s.io/release/${STABLE_KUBE_VERSION}/bin/linux/${GITHUB_LIKE_ARCH}/kubectl.sha${SHA_TYPE}" 47 | curl --retry "${DEFAULT_RETRY}" -LO "https://github.com/k0sproject/k0sctl/releases/download/${KZEROCTL_LATEST_VERSION}/k0sctl-linux-${GITHUB_LIKE_ARCH}" 48 | curl --retry "${DEFAULT_RETRY}" -Lo "kzeroctl-checksums.txt" "https://github.com/k0sproject/k0sctl/releases/download/${KZEROCTL_LATEST_VERSION}/checksums.txt" 49 | curl --retry "${DEFAULT_RETRY}" -LO "https://github.com/k0sproject/k0s/releases/download/${KZERO_LATEST_VERSION}/k0s-${KZERO_LATEST_VERSION}-${GITHUB_LIKE_ARCH}" 50 | curl --retry "${DEFAULT_RETRY}" -Lo "kzero-checksums.txt" "https://github.com/k0sproject/k0s/releases/download/${KZERO_LATEST_VERSION}/sha256sums.txt" 51 | 52 | grep "k0s-${KZERO_LATEST_VERSION}-${GITHUB_LIKE_ARCH}" kzero-checksums.txt | grep -v "sig\|exe" | sha256sum --strict --check 53 | grep "k0sctl-linux-${GITHUB_LIKE_ARCH}" kzeroctl-checksums.txt | sha256sum --strict --check 54 | "sha${SHA_TYPE}sum" --strict --check "${KUBE_TMP}/${KIND_BIN_NAME}.sha${SHA_TYPE}sum" 55 | echo "$(cat kubectl.sha256) kubectl" | sha256sum --strict --check 56 | 57 | install -Dpm0755 "${KIND_BIN_NAME}" "/usr/bin/kind" 58 | install -Dpm0755 "./kubectl" "/usr/bin/kubectl" 59 | install -Dpm0755 "./k0sctl-linux-${GITHUB_LIKE_ARCH}" "/usr/bin/k0sctl" 60 | install -Dpm0755 "./k0s-${KZERO_LATEST_VERSION}-${GITHUB_LIKE_ARCH}" "/usr/bin/k0s" 61 | popd 62 | 63 | -------------------------------------------------------------------------------- /build_scripts/overrides/dx/05-dconf.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeuo pipefail 4 | 5 | # FIXME: make this part prettier, i dont know how to do it right now 6 | cat >/etc/dconf/db/distro.d/05-dx-logomenu-extension </usr/share/glib-2.0/schemas/zz1-dx-modifications.gschema.override </tmp/fake-uname </usr/lib/modprobe.d/00-nouveau-blacklist.conf </usr/lib/bootc/kargs.d/00-nvidia.toml <"${ANNOYING_JQ_WORKAROUND}" <' 12 | if functions -q fish_is_root_user; and fish_is_root_user 13 | if set -q fish_color_cwd_root 14 | set color_cwd $fish_color_cwd_root 15 | end 16 | set suffix '#' 17 | end 18 | 19 | # Detect if we are in a container 20 | if test -n "$CONTAINER_ID" 21 | set -g prompt_host "[$CONTAINER_ID]" 22 | set -g prefix_icon "📦 " 23 | else 24 | set -g prompt_host "$hostname" 25 | set -g prefix_icon "" 26 | end 27 | 28 | # Write pipestatus 29 | # If the status was carried over (if no command is issued or if `set` leaves the status untouched), don't bold it. 30 | set -l bold_flag --bold 31 | set -q __fish_prompt_status_generation; or set -g __fish_prompt_status_generation $status_generation 32 | if test $__fish_prompt_status_generation = $status_generation 33 | set bold_flag 34 | end 35 | set __fish_prompt_status_generation $status_generation 36 | set -l status_color (set_color $fish_color_status) 37 | set -l statusb_color (set_color $bold_flag $fish_color_status) 38 | set -l prompt_status (__fish_print_pipestatus "[" "]" "|" "$status_color" "$statusb_color" $last_pipestatus) 39 | 40 | echo -n -s $prefix_icon (set_color $fish_color_user) "$USER" $normal "@" $prompt_host' ' (set_color $color_cwd) (prompt_pwd) $normal (fish_vcs_prompt) $normal " "$prompt_status $suffix " " 41 | end 42 | -------------------------------------------------------------------------------- /system_files/usr/share/ublue-os/firefox-config/01-bluefin-global.js: -------------------------------------------------------------------------------- 1 | // Bluefin Global 2 | pref("gfx.webrender.all", true); 3 | pref("media.ffmpeg.vaapi.enabled", true); 4 | -------------------------------------------------------------------------------- /system_files/usr/share/ublue-os/just/10-update.just: -------------------------------------------------------------------------------- 1 | # vim: set ft=make : 2 | 3 | alias upgrade := update 4 | 5 | update: 6 | #!/usr/bin/bash 7 | if systemctl cat -- uupd.timer &> /dev/null; then 8 | SERVICE="uupd.service" 9 | else 10 | SERVICE="rpm-ostreed-automatic.service" 11 | fi 12 | if systemctl is-active --quiet "$SERVICE"; then 13 | echo "automatic updates are currently running, use \`journalctl -fexu $SERVICE\` to see logs" 14 | exit 1 15 | fi 16 | # rpm-ostree used due to bootc upgrade not supporting local layered packages 17 | sudo bootc upgrade 18 | # Updates system Flatpaks 19 | if flatpak remotes | grep -q system; then 20 | flatpak update -y 21 | fi 22 | # Update user Flatpaks 23 | if flatpak remotes | grep -q user; then 24 | flatpak update --user -y 25 | fi 26 | # Guard Brew if the user does not own brew/doesn't exist 27 | if [[ -O /var/home/linuxbrew/.linuxbrew/bin/brew ]]; then 28 | # Upgrade will run brew update if needed 29 | /var/home/linuxbrew/.linuxbrew/bin/brew upgrade 30 | fi 31 | 32 | alias auto-update := toggle-updates 33 | 34 | # Turn automatic updates on or off 35 | toggle-updates ACTION="prompt": 36 | #!/usr/bin/bash 37 | source /usr/lib/ujust/ujust.sh 38 | CURRENT_STATE="Disabled" 39 | if systemctl is-enabled ublue-update.timer | grep -q enabled; then 40 | CURRENT_STATE="Enabled" 41 | fi 42 | OPTION={{ ACTION }} 43 | if [ "$OPTION" == "prompt" ]; then 44 | echo "Automatic updates are currently: ${bold}${CURRENT_STATE}${normal}" 45 | echo "Enable or Disable automatic updates?" 46 | OPTION=$(ugum choose Enable Disable) 47 | elif [ "$OPTION" == "help" ]; then 48 | echo "Usage: ujust toggle-updates