├── .github └── workflows │ ├── checks.yml │ └── update-versions.yml ├── .gitignore ├── LICENSE ├── README.md ├── antora.yml ├── ci ├── check.py └── update-versions.py ├── docsbuilder.sh ├── modules └── ROOT │ ├── assets │ └── images │ │ ├── fcos-vertical.svg │ │ ├── fedoracoreos-logo.svg │ │ ├── hyperv-actions.png │ │ ├── hyperv-disk.png │ │ ├── hyperv-new.png │ │ ├── hyperv-secure-boot.png │ │ ├── hyperv-select-server.png │ │ ├── hyperv-switch-create.png │ │ ├── notfound.svg │ │ ├── raspberry-pi-imager.png │ │ ├── vfkit.png │ │ └── welcomefedoracoreos.jpg │ ├── nav.adoc │ └── pages │ ├── access-recovery.adoc │ ├── alternatives.adoc │ ├── audit.adoc │ ├── authentication.adoc │ ├── auto-updates.adoc │ ├── bare-metal.adoc │ ├── bootloader-updates.adoc │ ├── composefs.adoc │ ├── counting.adoc │ ├── customize-nic.adoc │ ├── debugging-kernel-crashes.adoc │ ├── debugging-with-toolbox.adoc │ ├── docker-ce.adoc │ ├── emergency-shell.adoc │ ├── faq.adoc │ ├── fcct-config.adoc │ ├── fcos-projects.adoc │ ├── getting-started-aws.adoc │ ├── getting-started-libvirt.adoc │ ├── getting-started.adoc │ ├── grub-password.adoc │ ├── hostname.adoc │ ├── index.adoc │ ├── kernel-args.adoc │ ├── live-booting.adoc │ ├── live-reference.adoc │ ├── major-changes.adoc │ ├── managing-files.adoc │ ├── manual-rollbacks.adoc │ ├── migrate-ah.adoc │ ├── migrate-cl.adoc │ ├── os-extensions.adoc │ ├── platforms.adoc │ ├── producing-ign.adoc │ ├── provisioning-aliyun.adoc │ ├── provisioning-applehv.adoc │ ├── provisioning-aws.adoc │ ├── provisioning-azure.adoc │ ├── provisioning-digitalocean.adoc │ ├── provisioning-exoscale.adoc │ ├── provisioning-gcp.adoc │ ├── provisioning-hetzner.adoc │ ├── provisioning-hyperv.adoc │ ├── provisioning-ibmcloud.adoc │ ├── provisioning-kubevirt.adoc │ ├── provisioning-libvirt.adoc │ ├── provisioning-nutanix.adoc │ ├── provisioning-openstack.adoc │ ├── provisioning-qemu.adoc │ ├── provisioning-raspberry-pi4.adoc │ ├── provisioning-virtualbox.adoc │ ├── provisioning-vmware.adoc │ ├── provisioning-vultr.adoc │ ├── proxy.adoc │ ├── remote-ign.adoc │ ├── running-containers.adoc │ ├── storage.adoc │ ├── stream-metadata.adoc │ ├── sysconfig-configure-swaponzram.adoc │ ├── sysconfig-configure-wireguard.adoc │ ├── sysconfig-enabling-wifi.adoc │ ├── sysconfig-network-configuration.adoc │ ├── sysconfig-setting-keymap.adoc │ ├── sysctl.adoc │ ├── time-zone.adoc │ ├── tutorial-autologin.adoc │ ├── tutorial-conclusion.adoc │ ├── tutorial-containers.adoc │ ├── tutorial-services.adoc │ ├── tutorial-setup.adoc │ ├── tutorial-updates.adoc │ ├── tutorial-user-systemd-unit-on-boot.adoc │ ├── update-barrier-signing-keys.adoc │ └── update-streams.adoc ├── nginx.conf └── site.yml /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Checks 3 | 4 | on: 5 | push: 6 | branches: [main] 7 | pull_request: 8 | branches: [main] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | butane: 15 | name: Validate Butane configs 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Check out repository 19 | uses: actions/checkout@v3 20 | - name: Run validator 21 | run: ci/check.py -v 22 | -------------------------------------------------------------------------------- /.github/workflows/update-versions.yml: -------------------------------------------------------------------------------- 1 | name: Update software versions 2 | 3 | on: 4 | schedule: 5 | - cron: '0 */6 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | contents: write 10 | 11 | jobs: 12 | update-versions: 13 | name: Update versions 14 | if: ${{ github.repository_owner == 'coreos' || github.event_name != 'schedule' }} 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | - name: Update versions 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | run: ci/update-versions.py 23 | - name: Create commit 24 | run: | 25 | git config user.name 'CoreOS Bot' 26 | git config user.email coreosbot@fedoraproject.org 27 | if ! git diff --quiet --exit-code; then 28 | git commit -am "antora: update software versions ✨" \ 29 | -m "Triggered by update-versions GitHub Action." 30 | fi 31 | - name: Open pull request 32 | uses: peter-evans/create-pull-request@v6 33 | with: 34 | token: ${{ secrets.COREOSBOT_RELENG_TOKEN }} 35 | branch: update-versions 36 | push-to-fork: coreosbot-releng/fedora-coreos-docs 37 | title: "antora: update software versions ✨" 38 | body: "Created by update-versions [GitHub workflow](${{ github.server_url }}/${{ github.repository }}/actions/workflows/update-versions.yml) ([source](${{ github.server_url }}/${{ github.repository }}/blob/main/.github/workflows/update-versions.yml))." 39 | committer: "CoreOS Bot " 40 | author: "CoreOS Bot " 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | cache 3 | public 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Fedora Docs Template 2 | 3 | This repository contains the Fedora CoreOS documentation. The format is [AsciiDoc](https://asciidoctor.org/docs/asciidoc-syntax-quick-reference/) to enable integration into the official [Fedora documentation](https://docs.fedoraproject.org/en-US/docs/). 4 | 5 | ## Structure 6 | 7 | ``` 8 | |-- README.md 9 | |-- antora.yml ....................... 1. 10 | |-- docsbuilder.sh ................... 2. 11 | |-- nginx.conf ....................... 3. 12 | |-- site.yml ......................... 4. 13 | `-- modules 14 | `-- ROOT ......................... 5. 15 | |-- assets 16 | | `-- images ............... 6. 17 | | `-- * 18 | |-- nav.adoc ................. 7. 19 | `-- pages .................... 8. 20 | `-- *.adoc 21 | ``` 22 | 23 | 1. Metadata definition. 24 | 2. A script that does a local build. It shows a preview of the site in a web browser by running a local web server. Uses podman or Docker. 25 | 3. A configuration file used by the local preview web server. 26 | 4. A definition file for the build script. 27 | 5. A "root module of this documentation component". Please read below for an explanation. 28 | 6. **Images** to be used on any page. 29 | 7. **Menu definition.** Also defines the hierarchy of all the pages. 30 | 8. **Pages with the actual content.** They can be also organised into subdirectories if desired. 31 | 32 | ## Components and Modules 33 | 34 | Antora introduces two new terms: 35 | 36 | * **Component** — Simply put, a component is a part of the documentation website with its own menu. Components can also be versioned. In the Fedora Docs, we use separate components for user documentation, the Fedora Project, Fedora council, Mindshare, FESCO, but also subprojects such as CommOps or Modularity. 37 | * **Module** — A component can be broken down into multiple modules. Modules still share a single menu on the site, but their sources can be stored in different git repositories, even owned by different groups. The default module is called "ROOT" (that's what is in this example). If you don't want to use multiple modules, only use "ROOT". But to define more modules, simply duplicate the "ROOT" directory and name it anything you want. You can store modules in one or more git repositories. 38 | 39 | ## Local preview 40 | 41 | This repo includes a script to build and preview the contents of this repository. 42 | 43 | **NOTE**: Please note that if you reference pages from other repositories, such links will be broken in this local preview as it only builds this repository. If you want to rebuild the whole Fedora Docs site, please see [the Fedora Docs build repository](https://pagure.io/fedora-docs/docs-fp-o/) for instructions. 44 | 45 | The script works on Fedora (using Podman or Docker) and macOS (using Docker). 46 | 47 | To build and preview the site, run: 48 | 49 | ``` 50 | $ ./docsbuilder.sh -p 51 | ``` 52 | 53 | The result will be available at http://localhost:8080 54 | 55 | To stop the preview: 56 | 57 | ``` 58 | $ ./docsbuilder.sh -k 59 | 60 | ``` 61 | 62 | ### Installing Podman on Fedora 63 | 64 | Fedora Workstation doesn't come with Podman preinstalled by default — so you might need to install it using the following command: 65 | 66 | ``` 67 | $ sudo dnf install podman 68 | ``` 69 | 70 | ### Preview as a part of the whole Fedora Docs site 71 | 72 | You can also build the whole Fedora Docs site locally to see your changes in the whole context. 73 | This is especially useful for checking if your `xref` links work properly. 74 | 75 | To do this, you need to clone the main [Fedora Docs build repository](https://pagure.io/fedora-docs/docs-fp-o), modify the `site.yml` file to reference a repo with your changes, and build it. 76 | Steps: 77 | 78 | Clone the main repository and cd into it: 79 | 80 | ``` 81 | $ git clone https://pagure.io/fedora-docs/docs-fp-o.git 82 | $ cd docs-fp-o 83 | ``` 84 | 85 | Find a reference to the repository you're changing in the `site.yml` file, and change it so it points to your change. 86 | So for example, if I made a modification to the Modularity docs, I would find: 87 | 88 | ``` 89 | ... 90 | - url: https://pagure.io/fedora-docs/modularity.git 91 | branches: 92 | - master 93 | ... 94 | ``` 95 | 96 | And replaced it with a pointer to my fork: 97 | ``` 98 | ... 99 | - url: https://pagure.io/forks/asamalik/fedora-docs/modularity.git 100 | branches: 101 | - master 102 | ... 103 | ``` 104 | 105 | I could also point to a local repository, using `HEAD` as a branch to preview the what's changed without the need of making a commit. 106 | 107 | **Note:** I would need to move the repository under the `docs-fp-o` directory, because the builder won't see anything above. 108 | So I would need to create a `repositories` directory in `docs-fp-o` and copy my repository into it. 109 | 110 | ``` 111 | ... 112 | - url: ./repositories/modularity 113 | branches: 114 | - HEAD 115 | ... 116 | ``` 117 | 118 | To build the whole site, I would run the following in the `docs-fp-o` directory. 119 | 120 | ``` 121 | $ ./docsbuilder.sh -p 122 | ``` 123 | # License 124 | 125 | SPDX-License-Identifier: CC-BY-SA-4.0 126 | -------------------------------------------------------------------------------- /antora.yml: -------------------------------------------------------------------------------- 1 | # Automatically modified by update-versions.py; comments will not be preserved 2 | 3 | name: fedora-coreos 4 | title: Fedora CoreOS 5 | version: master 6 | start_page: ROOT:index.adoc 7 | nav: 8 | - modules/ROOT/nav.adoc 9 | asciidoc: 10 | attributes: 11 | stable-version: 42.20250512.3.0 12 | ignition-version: 2.21.0 13 | butane-version: 0.24.0 14 | butane-latest-stable-spec: 1.6.0 15 | -------------------------------------------------------------------------------- /ci/check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Find all Butane configs in the doc tree, use the podman Butane container 4 | # to run them through butane --strict, and fail on any errors. 5 | # 6 | # A Butane config looks like this: 7 | # 8 | # [source,yaml] 9 | # ---- 10 | # variant:[...] 11 | # ---- 12 | # 13 | # If variant: is missing, we print a warning but continue, since there 14 | # might be [source,yaml] documents that aren't Butane configs. 15 | 16 | import argparse 17 | import os 18 | import re 19 | import subprocess 20 | import sys 21 | import tempfile 22 | import textwrap 23 | 24 | ERR = '\x1b[1;31m' 25 | WARN = '\x1b[1;33m' 26 | RESET = '\x1b[0m' 27 | 28 | container = os.getenv('BUTANE_CONTAINER', 'quay.io/coreos/butane:release') 29 | matcher = re.compile(r'^\[source,\s*yaml\]\n----\n(.+?\n)----$', 30 | re.MULTILINE | re.DOTALL) 31 | 32 | parser = argparse.ArgumentParser(description='Run validations on docs.') 33 | parser.add_argument('-v', '--verbose', action='store_true', 34 | help='log all detected Butane configs') 35 | args = parser.parse_args() 36 | 37 | def handle_error(e): 38 | raise e 39 | 40 | 41 | # List of files required during verification 42 | tmpfiles = { 43 | # tutorial-services.adoc 44 | os.path.join('public-ipv4.sh'): '#!/bin/bash\ntrue', 45 | os.path.join('issuegen-public-ipv4.service'): '[Unit]\nBefore=systemd-user-sessions.service\n[Install]\nWantedBy=multi-user.target', 46 | # authentication.adoc 47 | os.path.join('users', 'core', 'id_rsa.pub'): 'ssh-rsa AAAAB', 48 | os.path.join('users', 'jlebon', 'id_rsa.pub'): 'ssh-rsa AAAAB', 49 | os.path.join('users', 'jlebon', 'id_ed25519.pub'): 'ssh-ed25519 AAAAC', 50 | os.path.join('users', 'miabbott', 'id_rsa.pub'): 'ssh-rsa AAAAB', 51 | # tutorial-containers.adoc, tutorial-setup.adoc, tutorial-updates.adoc, tutorial-user-systemd-unit-on-boot.adoc 52 | os.path.join('ssh-key.pub'): 'ssh-rsa AAAAB', 53 | } 54 | 55 | ret = 0 56 | with tempfile.TemporaryDirectory() as tmpdocs: 57 | for path, contents in tmpfiles.items(): 58 | os.makedirs(os.path.join(tmpdocs, os.path.dirname(path)), exist_ok=True) 59 | with open(os.path.join(tmpdocs, path), 'w') as fh: 60 | fh.write(contents) 61 | for dirpath, dirnames, filenames in os.walk('.', onerror=handle_error): 62 | dirnames.sort() # walk in sorted order 63 | for filename in sorted(filenames): 64 | filepath = os.path.join(dirpath, filename) 65 | if not filename.endswith('.adoc'): 66 | continue 67 | with open(filepath) as fh: 68 | filedata = fh.read() 69 | # Iterate over YAML source blocks 70 | for match in matcher.finditer(filedata): 71 | bu = match.group(1) 72 | buline = filedata.count('\n', 0, match.start(1)) + 1 73 | if not bu.startswith('variant:'): 74 | print(f'{WARN}Ignoring non-Butane YAML at {filepath}:{buline}{RESET}') 75 | continue 76 | if args.verbose: 77 | print(f'Checking Butane config at {filepath}:{buline}') 78 | result = subprocess.run( 79 | ['podman', 'run', '--rm', '-i', '-v=' + tmpdocs + ':/files-dir', container, '--strict', '--files-dir=/files-dir'], 80 | universal_newlines=True, # can be spelled "text" on >= 3.7 81 | input=bu, 82 | stdout=subprocess.DEVNULL, 83 | stderr=subprocess.PIPE) 84 | if result.returncode != 0: 85 | formatted = textwrap.indent(result.stderr.strip(), ' ') 86 | # Not necessary for ANSI terminals, but required by GitHub's 87 | # log renderer 88 | formatted = ERR + formatted.replace('\n', '\n' + ERR) 89 | print(f'{ERR}Invalid Butane config at {filepath}:{buline}:\n{formatted}{RESET}') 90 | ret = 1 91 | sys.exit(ret) 92 | -------------------------------------------------------------------------------- /ci/update-versions.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Update Antora attributes for OS and tool versions. 3 | 4 | import os 5 | import requests 6 | import sys 7 | import yaml 8 | 9 | GITHUB_RELEASES = { 10 | 'butane-version': 'coreos/butane', 11 | 'ignition-version': 'coreos/ignition', 12 | } 13 | FCOS_STREAMS = { 14 | 'stable-version': 'stable', 15 | } 16 | 17 | basedir = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), '..')) 18 | github_token = os.getenv('GITHUB_TOKEN') 19 | 20 | with open(os.path.join(basedir, 'antora.yml'), 'r+') as fh: 21 | config = yaml.safe_load(fh) 22 | attrs = config.setdefault('asciidoc', {}).setdefault('attributes', {}) 23 | orig_attrs = attrs.copy() 24 | 25 | for attr, repo in GITHUB_RELEASES.items(): 26 | headers = {'Authorization': f'Bearer {github_token}'} if github_token else {} 27 | resp = requests.get( 28 | f'https://api.github.com/repos/{repo}/releases/latest', 29 | headers=headers 30 | ) 31 | resp.raise_for_status() 32 | tag = resp.json()['tag_name'] 33 | attrs[attr] = tag.lstrip('v') 34 | 35 | for attr, stream in FCOS_STREAMS.items(): 36 | resp = requests.get(f'https://builds.coreos.fedoraproject.org/streams/{stream}.json') 37 | resp.raise_for_status() 38 | # to be rigorous, we should have a separate attribute for each 39 | # artifact type, but the website doesn't do that either 40 | attrs[attr] = resp.json()['architectures']['x86_64']['artifacts']['metal']['release'] 41 | 42 | if attrs != orig_attrs: 43 | fh.seek(0) 44 | fh.truncate() 45 | fh.write("# Automatically modified by update-versions.py; comments will not be preserved\n\n") 46 | yaml.safe_dump(config, fh, sort_keys=False) 47 | -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-actions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-actions.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-disk.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-disk.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-new.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-secure-boot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-secure-boot.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-select-server.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-select-server.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/hyperv-switch-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/hyperv-switch-create.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/raspberry-pi-imager.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/raspberry-pi-imager.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/vfkit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/vfkit.png -------------------------------------------------------------------------------- /modules/ROOT/assets/images/welcomefedoracoreos.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coreos/fedora-coreos-docs/4c82227a22ee7e688196472ded0d1bc0758cdeb9/modules/ROOT/assets/images/welcomefedoracoreos.jpg -------------------------------------------------------------------------------- /modules/ROOT/nav.adoc: -------------------------------------------------------------------------------- 1 | * xref:getting-started.adoc[Getting Started] 2 | * Provisioning Machines 3 | ** xref:stream-metadata.adoc[Stream metadata] 4 | ** xref:bare-metal.adoc[Installing on Bare Metal] 5 | ** xref:live-booting.adoc[Running directly from RAM] 6 | ** xref:provisioning-aliyun.adoc[Booting on Alibaba Cloud] 7 | ** xref:provisioning-aws.adoc[Booting on AWS] 8 | ** xref:provisioning-azure.adoc[Booting on Azure] 9 | ** xref:provisioning-digitalocean.adoc[Booting on DigitalOcean] 10 | ** xref:provisioning-exoscale.adoc[Booting on Exoscale] 11 | ** xref:provisioning-gcp.adoc[Booting on GCP] 12 | ** xref:provisioning-hetzner.adoc[Booting on Hetzner] 13 | ** xref:provisioning-hyperv.adoc[Booting on Hyper-V] 14 | ** xref:provisioning-ibmcloud.adoc[Booting on IBM Cloud] 15 | ** xref:provisioning-applehv.adoc[Booting on macOS] 16 | ** xref:provisioning-kubevirt.adoc[Booting on KubeVirt] 17 | ** xref:provisioning-libvirt.adoc[Booting on libvirt] 18 | ** xref:provisioning-openstack.adoc[Booting on OpenStack] 19 | ** xref:provisioning-nutanix.adoc[Booting on Nutanix] 20 | ** xref:provisioning-qemu.adoc[Booting on QEMU] 21 | ** xref:provisioning-raspberry-pi4.adoc[Booting on the Raspberry Pi 4] 22 | ** xref:provisioning-virtualbox.adoc[Booting on VirtualBox] 23 | ** xref:provisioning-vmware.adoc[Booting on VMware] 24 | ** xref:provisioning-vultr.adoc[Booting on Vultr] 25 | * System Configuration 26 | ** xref:producing-ign.adoc[Producing an Ignition File] 27 | ** link:https://coreos.github.io/butane/specs/[Butane Specification] 28 | ** xref:remote-ign.adoc[Using a remote Ignition config] 29 | ** xref:storage.adoc[Configuring Storage] 30 | ** xref:managing-files.adoc[Managing Files] 31 | ** xref:sysconfig-network-configuration.adoc[Network Configuration] 32 | ** xref:sysconfig-enabling-wifi.adoc[Enabling Wi-Fi] 33 | ** xref:sysctl.adoc[Kernel Tuning] 34 | ** xref:running-containers.adoc[Running Containers] 35 | ** xref:authentication.adoc[Configuring Users and Groups] 36 | ** xref:hostname.adoc[Setting a Hostname] 37 | ** xref:proxy.adoc[Proxied Internet Access] 38 | ** xref:sysconfig-setting-keymap.adoc[Setting Keyboard Layout] 39 | ** xref:os-extensions.adoc[Adding OS extensions] 40 | ** xref:docker-ce.adoc[Installing Docker CE] 41 | ** xref:customize-nic.adoc[How to Customize a NIC Name] 42 | ** xref:sysconfig-configure-swaponzram.adoc[Configuring SwapOnZRAM] 43 | ** xref:sysconfig-configure-wireguard.adoc[Configuring WireGuard] 44 | ** xref:kernel-args.adoc[Modifying Kernel Arguments] 45 | ** xref:alternatives.adoc[Setting alternatives] 46 | ** xref:counting.adoc[Node counting] 47 | ** xref:time-zone.adoc[Configuring Time Zone] 48 | ** xref:grub-password.adoc[Setting a GRUB password] 49 | ** xref:audit.adoc[Managing the audit daemon] 50 | ** xref:composefs.adoc[ComposeFS] 51 | * OS updates 52 | ** xref:update-streams.adoc[Update Streams] 53 | ** xref:auto-updates.adoc[Auto-Updates] 54 | ** xref:bootloader-updates.adoc[Bootloader Updates] 55 | * xref:major-changes.adoc[Major Changes] 56 | * Troubleshooting 57 | ** xref:manual-rollbacks.adoc[Manual Rollbacks] 58 | ** xref:access-recovery.adoc[Access Recovery] 59 | ** xref:emergency-shell.adoc[Emergency Console Access] 60 | ** xref:debugging-with-toolbox.adoc[Debugging with Toolbx] 61 | ** xref:debugging-kernel-crashes.adoc[Debugging Kernel Crashes] 62 | * Tutorials 63 | ** xref:tutorial-setup.adoc[Prerequisites for the tutorials] 64 | ** xref:tutorial-autologin.adoc[Enabling autologin and custom hostname] 65 | ** xref:tutorial-services.adoc[Starting a service on first boot] 66 | ** xref:tutorial-containers.adoc[SSH access and starting containers] 67 | ** xref:tutorial-user-systemd-unit-on-boot.adoc[Launching a user-level systemd unit on boot] 68 | ** xref:tutorial-updates.adoc[Testing Fedora CoreOS updates] 69 | * Reference pages 70 | ** xref:live-reference.adoc[Live ISO/PXE reference] 71 | ** xref:platforms.adoc[Supported Platforms] 72 | ** xref:fcos-projects.adoc[Projects Using Fedora CoreOS] 73 | ** xref:update-barrier-signing-keys.adoc[Signing keys and updates] 74 | * Projects documentation 75 | ** https://coreos.github.io/afterburn/[Afterburn] 76 | ** https://coreos.github.io/butane/[Butane (Config Transpiler)] 77 | ** https://coreos.github.io/coreos-assembler/[CoreOS Assembler] 78 | ** https://coreos.github.io/coreos-installer/[CoreOS Installer] 79 | ** https://coreos.github.io/ignition/[Ignition] 80 | ** https://coreos.github.io/rpm-ostree/[rpm-ostree] 81 | ** https://coreos.github.io/zincati/[Zincati] 82 | ** https://ostreedev.github.io/ostree/[ostree] 83 | * Migration notes 84 | ** xref:migrate-ah.adoc[Migrating from Atomic Host] 85 | ** xref:migrate-cl.adoc[Migrating from Container Linux] 86 | * xref:faq.adoc[FAQ] 87 | -------------------------------------------------------------------------------- /modules/ROOT/pages/access-recovery.adoc: -------------------------------------------------------------------------------- 1 | = Access Recovery 2 | 3 | If you've lost the private key of an SSH key pair used to log into Fedora CoreOS, and do not have any password logins set up to use at the console, you can gain access back to the machine by booting into single user mode with the `single` kernel command-line argument: 4 | 5 | . When booting the system, intercept the GRUB menu and edit the entry to append `single` to the kernel argument list, then press Ctrl-X to resume booting. 6 | . Wait for the system to boot into a shell prompt 7 | . Set or reset the password for the target user using the `passwd` utility. 8 | . Finally, reboot the system with `/sbin/reboot -f`. 9 | 10 | You should now be able to log back into the system at the console. From there, you can e.g. fetch a new public SSH key to add to `~/.ssh/authorized_keys` and delete the old one. You may also want to lock the password you've set (using `passwd -l`). Note that Fedora CoreOS by default does not allow SSH login via password authentication. 11 | -------------------------------------------------------------------------------- /modules/ROOT/pages/alternatives.adoc: -------------------------------------------------------------------------------- 1 | = Setting alternatives 2 | 3 | Due to an https://github.com/fedora-sysv/chkconfig/issues/9[ongoing issue] in how alternatives configurations are stored on the system, Fedora CoreOS systems can not use the usual `alternatives` commands to configure them. 4 | 5 | Instead, until this issue is resolved, you can set the symlinks directly in `/etc/alternatives`. For example, to use the legacy-based variants of the `iptables` commands: 6 | 7 | [source,yaml,subs="attributes"] 8 | ---- 9 | variant: fcos 10 | version: {butane-latest-stable-spec} 11 | storage: 12 | links: 13 | - path: /etc/alternatives/iptables 14 | target: /usr/sbin/iptables-legacy 15 | overwrite: true 16 | hard: false 17 | - path: /etc/alternatives/iptables-restore 18 | target: /usr/sbin/iptables-legacy-restore 19 | overwrite: true 20 | hard: false 21 | - path: /etc/alternatives/iptables-save 22 | target: /usr/sbin/iptables-legacy-save 23 | overwrite: true 24 | hard: false 25 | - path: /etc/alternatives/ip6tables 26 | target: /usr/sbin/ip6tables-legacy 27 | overwrite: true 28 | hard: false 29 | - path: /etc/alternatives/ip6tables-restore 30 | target: /usr/sbin/ip6tables-legacy-restore 31 | overwrite: true 32 | hard: false 33 | - path: /etc/alternatives/ip6tables-save 34 | target: /usr/sbin/ip6tables-legacy-save 35 | overwrite: true 36 | hard: false 37 | ---- 38 | 39 | == Using alternatives commands 40 | 41 | Starting with Fedora CoreOS based on Fedora 41, you can use `alternatives` commands to configure the default command. 42 | 43 | .Example Butane config using a systemd unit to configure the default iptables backend 44 | [source,yaml,subs="attributes"] 45 | ---- 46 | variant: fcos 47 | version: {butane-latest-stable-spec} 48 | systemd: 49 | units: 50 | - name: custom-iptables-default.service 51 | enabled: true 52 | contents: | 53 | [Unit] 54 | Description=Set the default backend for iptables 55 | [Service] 56 | ExecStart=/usr/sbin/alternatives --set iptables /usr/sbin/iptables-legacy 57 | RemainAfterExit=yes 58 | [Install] 59 | WantedBy=multi-user.target 60 | ---- 61 | 62 | NOTE: We don't recommend configuring the default iptables backend to `iptables-legacy`. This is just an example. 63 | 64 | You can also manually run the `alternatives` commands to configure the default command runtime. 65 | 66 | .Example to manually configure the default iptables backend 67 | [source,bash] 68 | ---- 69 | # Check the link info 70 | alternatives --display iptables 71 | iptables --version 72 | 73 | # Configure iptables to point to iptables-nft 74 | sudo alternatives --set iptables /usr/sbin/iptables-nft 75 | 76 | # Verify iptables version is iptables-nft 77 | alternatives --display iptables 78 | iptables --version 79 | ---- -------------------------------------------------------------------------------- /modules/ROOT/pages/audit.adoc: -------------------------------------------------------------------------------- 1 | = Managing the audit daemon (`auditd`) 2 | 3 | Starting with the first release based on Fedora 39, Fedora CoreOS includes the audit daemon (`auditd`) to load and manage audit rules. 4 | 5 | Like all system daemons on Fedora CoreOS, the audit daemon is managed by systemd but with an exception: it can not be stopped or restarted via `systemctl stop auditd` or `systemctl restart auditd` for compliance reasons. 6 | 7 | From https://access.redhat.com/solutions/2664811[Unable to restart/stop auditd service using systemctl command in RHEL]: 8 | 9 | [quote] 10 | ____ 11 | "The reason for this unusual handling of restart/stop requests is that auditd is treated specially by the kernel: the credentials of a process that sends a killing signal to auditd are saved to the audit log. The audit developers do not want to see the credentials of PID 1 logged there. They want to see the login UID of the user who initiated the action." 12 | ____ 13 | 14 | To stop and restart the audit daemon, you should use the following commands: 15 | 16 | [source,bash] 17 | ---- 18 | $ sudo auditctl --signal stop 19 | $ sudo systemctl start auditd.service # Only if you want it started again 20 | ---- 21 | 22 | You may also use the following commands to reload the rules, rotate the logs, resume logging or dump the daemon state: 23 | 24 | [source,bash] 25 | ---- 26 | $ sudo auditctl --signal reload 27 | $ sudo auditctl --signal rotate 28 | $ sudo auditctl --signal resume 29 | $ sudo auditctl --signal state 30 | ---- 31 | 32 | See https://man7.org/linux/man-pages/man8/auditctl.8.html[auditctl(8)] and https://man7.org/linux/man-pages/man8/auditd.8.html[auditd(8)] for more details about those commands. 33 | -------------------------------------------------------------------------------- /modules/ROOT/pages/auto-updates.adoc: -------------------------------------------------------------------------------- 1 | = Auto-Updates and Manual Rollbacks 2 | 3 | Fedora CoreOS provides atomic updates and rollbacks via https://ostreedev.github.io/ostree/[OSTree] deployments. 4 | 5 | By default, the OS performs continual auto-updates via two components: 6 | 7 | * https://github.com/coreos/rpm-ostree[rpm-ostree] handles multiple on-disk OSTree deployments and can switch between them at boot-time. 8 | * https://github.com/coreos/zincati[Zincati] continually checks for OS updates and applies them via rpm-ostree. 9 | 10 | == Wariness to updates 11 | 12 | The local Zincati agent periodically checks with a remote service to see when updates are available. 13 | A custom "rollout wariness" value (see https://coreos.github.io/zincati/usage/auto-updates/#phased-rollouts-client-wariness-canaries[documentation]) can be provided to let the server know how eager, or how risk-averse, the node is to receiving updates. 14 | 15 | The `rollout_wariness` parameter can be set to a floating point value between `0.0` (most eager) and `1.0` (most conservative). 16 | In order to receive updates very early in the phased rollout cycle, a node can be configured with a low value (e.g. `0.001`). 17 | This can be done during provisioning by using the xref:producing-ign.adoc[Butane] config snippet shown below: 18 | 19 | .Example: configuring Zincati rollout wariness 20 | [source,yaml,subs="attributes"] 21 | ---- 22 | variant: fcos 23 | version: {butane-latest-stable-spec} 24 | storage: 25 | files: 26 | - path: /etc/zincati/config.d/51-rollout-wariness.toml 27 | contents: 28 | inline: | 29 | [identity] 30 | rollout_wariness = 0.001 31 | ---- 32 | 33 | == OS update finalization 34 | 35 | To finalize an OS update, the machine must reboot. 36 | As this is an invasive action which may cause service disruption, Zincati allows the cluster administrator to control when nodes are allowed to reboot for update finalization. 37 | 38 | The following finalization strategies are available: 39 | 40 | * As soon as the update is downloaded and staged locally, immediately reboot to apply an update. 41 | * Use an external lock-manager to coordinate the reboot of a fleet of machines. 42 | * Allow reboots only within configured maintenance windows, defined on a weekly UTC schedule. 43 | 44 | A specific finalization strategy can be configured on each node. 45 | 46 | The xref:producing-ign.adoc[Butane] snippet below shows how to define two maintenance windows during weekend days, starting at 22:30 UTC and lasting one hour each: 47 | 48 | .Example: configuring Zincati updates strategy 49 | [source,yaml,subs="attributes"] 50 | ---- 51 | variant: fcos 52 | version: {butane-latest-stable-spec} 53 | storage: 54 | files: 55 | - path: /etc/zincati/config.d/55-updates-strategy.toml 56 | contents: 57 | inline: | 58 | [updates] 59 | strategy = "periodic" 60 | [[updates.periodic.window]] 61 | days = [ "Sat", "Sun" ] 62 | start_time = "22:30" 63 | length_minutes = 60 64 | ---- 65 | For further details on updates finalization, check the https://coreos.github.io/zincati/usage/updates-strategy/[Zincati documentation]. 66 | 67 | include::manual-rollbacks.adoc[leveloffset=1] 68 | -------------------------------------------------------------------------------- /modules/ROOT/pages/bootloader-updates.adoc: -------------------------------------------------------------------------------- 1 | = Updating the bootloader 2 | 3 | == bootupd 4 | 5 | Updating the bootloader is not currently automatic. The https://github.com/coreos/bootupd/[bootupd] 6 | project is included in Fedora CoreOS and may be used for manual updates. 7 | 8 | This is usually only relevant on bare metal scenarios, or virtualized 9 | hypervisors that support Secure Boot. An example reason to update the 10 | bootloader is for https://eclypsium.com/2020/07/29/theres-a-hole-in-the-boot/[the BootHole vulnerability]. 11 | 12 | At the moment, only the EFI system partition (i.e. not the BIOS MBR) can be updated by bootupd. 13 | 14 | Inspect the system status: 15 | 16 | [source,bash] 17 | ---- 18 | # bootupctl status 19 | Component EFI 20 | Installed: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 21 | Update: At latest version 22 | # 23 | ---- 24 | 25 | If an update is available, use `bootupctl update` to apply it; the 26 | change will take effect for the next reboot. 27 | 28 | [source,bash] 29 | ---- 30 | # bootupctl update 31 | ... 32 | Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 33 | # 34 | ---- 35 | 36 | .Example systemd unit to automate bootupd updates 37 | [source,yaml,subs="attributes"] 38 | ---- 39 | variant: fcos 40 | version: {butane-latest-stable-spec} 41 | systemd: 42 | units: 43 | - name: custom-bootupd-auto.service 44 | enabled: true 45 | contents: | 46 | [Unit] 47 | Description=Bootupd automatic update 48 | 49 | [Service] 50 | ExecStart=/usr/bin/bootupctl update 51 | RemainAfterExit=yes 52 | 53 | [Install] 54 | WantedBy=multi-user.target 55 | ---- 56 | 57 | === Using images that predate bootupd 58 | 59 | Older CoreOS images that predate the existence of bootupd need 60 | an explicit "adoption" phase. If `bootupctl status` says the component 61 | is `Adoptable`, perform the adoption with `bootupctl adopt-and-update`. 62 | 63 | [source,bash] 64 | ---- 65 | # bootupctl adopt-and-update 66 | ... 67 | Updated: grub2-efi-x64-1:2.04-31.fc33.x86_64,shim-x64-15-8.x86_64 68 | # 69 | ---- 70 | 71 | === Future versions may default to automatic updates 72 | 73 | It is possible that future Fedora CoreOS versions may default 74 | to automating bootloader updates similar to the above. 75 | -------------------------------------------------------------------------------- /modules/ROOT/pages/composefs.adoc: -------------------------------------------------------------------------------- 1 | = Composefs 2 | 3 | Fedora CoreOS introduced composefs enabled by default starting in Fedora 41. 4 | Composefs is an overlay filesystem where the data comes from the usual ostree deployement, and metadata are in the composefs file. 5 | The result is a truely read-only root (`/`) filesystem, increasing the system integrity and robustness. 6 | 7 | This is a first step towards a full verification of filesystem integrity, even at runtime. 8 | 9 | == What does it change? 10 | 11 | The main visible change will be that the root filesystem (`/`) is now small and full (a few MB, 100% used). 12 | 13 | == Known issues 14 | 15 | === Top-level directories 16 | 17 | Another consequence is that it is now impossible to create top-level directories in `/`. 18 | A common use case for those top level directories is to use them as mount points. 19 | We recommend using sub directories in `/var` instead. 20 | Currently, the only way around that is to disable composefs as shown below. 21 | 22 | == Disable composefs 23 | 24 | Composefs can be disabled through a kernel argument: `ostree.prepare-root.composefs=0`. 25 | 26 | .Disabling composefs at provisionning 27 | [source,yaml,subs="attributes"] 28 | ---- 29 | variant: fcos 30 | version: {butane-latest-stable-spec} 31 | kernel_arguments: 32 | should_exist: 33 | - ostree.prepare-root.composefs=0 34 | ---- 35 | 36 | .Disabling composefs on a running FCOS system 37 | [source,bash] 38 | ---- 39 | $ sudo rpm-ostree kargs --append='ostree.prepare-root.composefs=0' 40 | ---- 41 | Note that a reboot is required for the change to take effect. 42 | 43 | == Links 44 | 45 | https://fedoraproject.org/wiki/Changes/ComposefsAtomicCoreOSIoT[Enabling composefs by default for CoreOS and IoT] 46 | -------------------------------------------------------------------------------- /modules/ROOT/pages/counting.adoc: -------------------------------------------------------------------------------- 1 | = Node counting 2 | 3 | Fedora CoreOS nodes are counted by the Fedora infrastructure via the Count Me feature. This system is explicitly designed to make sure that no personally identifiable information is sent from counted systems. It also ensures that the Fedora infrastructure does not collect any personal data. The nickname for this counting mechanism is "Count Me", from the option name. Implementation details of this feature are available in https://fedoraproject.org/wiki/Changes/DNF_Better_Counting[DNF Better Counting change request for Fedora 32]. In short, the Count Me mechanism works by telling Fedora servers how old your system is (with a very large approximation). 4 | 5 | On Fedora CoreOS nodes, this functionality is implemented in https://coreos.github.io/rpm-ostree/countme/[rpm-ostree as a stand-alone method]. The new implementation has the same privacy preserving properties as the original DNF implementation. 6 | 7 | == Opting out of counting 8 | 9 | You can use the following command to disable counting on existing nodes: 10 | 11 | [source,bash] 12 | ---- 13 | $ sudo systemctl mask --now rpm-ostree-countme.timer 14 | ---- 15 | 16 | You can use the following Butane config to disable counting during provisioning on first boot: 17 | 18 | [source,yaml,subs="attributes"] 19 | ---- 20 | variant: fcos 21 | version: {butane-latest-stable-spec} 22 | systemd: 23 | units: 24 | - name: rpm-ostree-countme.timer 25 | enabled: false 26 | mask: true 27 | ---- 28 | -------------------------------------------------------------------------------- /modules/ROOT/pages/customize-nic.adoc: -------------------------------------------------------------------------------- 1 | = How to Customize a NIC Name 2 | 3 | == Using a systemd Link File 4 | You can create a systemd https://www.freedesktop.org/software/systemd/man/systemd.link.html[link file] with Ignition configs. 5 | 6 | For example, to name NIC with the MAC address `12:34:56:78:9a:bc` to "infra", place a systemd link file at `/etc/systemd/network/25-infra.link` using the xref:producing-ign.adoc[Butane] config snippet shown below: 7 | 8 | .Example: Customize NIC via systemd Link File 9 | [source,yaml,subs="attributes"] 10 | ---- 11 | variant: fcos 12 | version: {butane-latest-stable-spec} 13 | storage: 14 | files: 15 | - path: /etc/systemd/network/25-infra.link 16 | mode: 0644 17 | contents: 18 | inline: | 19 | [Match] 20 | MACAddress=12:34:56:78:9a:bc 21 | [Link] 22 | Name=infra 23 | ---- 24 | 25 | == Using Udev Rules 26 | Similarly, also through Ignition configs, to name NIC with the MAC address `12:34:56:78:9a:bc` to "infra", create a https://man7.org/linux/man-pages/man7/udev.7.html[udev rule] at `/etc/udev/rules.d/80-ifname.rules` using the xref:producing-ign.adoc[Butane] config snippet shown below: 27 | 28 | .Example: Customize NIC via Udev Rules 29 | [source,yaml,subs="attributes"] 30 | ---- 31 | variant: fcos 32 | version: {butane-latest-stable-spec} 33 | storage: 34 | files: 35 | - path: /etc/udev/rules.d/80-ifname.rules 36 | mode: 0644 37 | contents: 38 | inline: | 39 | SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="12:34:56:78:9a:bc", ATTR{type}=="1", NAME="infra" 40 | ---- 41 | 42 | == Networking in the Initramfs via Kernel Arguments 43 | If networking in the initramfs is required, the kernel argument `ifname=` will dynamically create a udev rule to change the name of a NIC. 44 | 45 | Currently, unlike other parts of the networking config from the initramfs (e.g. static IPs, hostnames, etc.), these udev rules are not persisted into the real root. If the custom name needs to be applied to the real root, either a link file or udev rule must be created, as shown above. See xref:https://github.com/coreos/fedora-coreos-tracker/issues/553[this issue] for more details. 46 | 47 | For example, to give the NIC with the MAC address `12:34:56:78:9a:bc` a name of "infra", provide a `ifname=infra:12:34:56:78:9a:bc` kernel argument. A udev rule would be created in the initramfs like: 48 | [source] 49 | ---- 50 | # cat /etc/udev/rules.d/80-ifname.rules 51 | SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="12:34:56:78:9a:bc", ATTR{type}=="1", NAME="infra" 52 | ---- 53 | -------------------------------------------------------------------------------- /modules/ROOT/pages/debugging-kernel-crashes.adoc: -------------------------------------------------------------------------------- 1 | = Debugging kernel crashes using kdump 2 | 3 | == Introduction 4 | kdump is a service that creates crash dumps when there is a kernel crash. It uses https://www.mankier.com/8/kexec[`kexec(8)`] to boot into a secondary kernel (known as a capture kernel), then exports the contents of the kernel's memory (known as a crash dump or vmcore) to the filesystem. The contents of vmcore can then be analyzed to root cause the kernel crash. 5 | 6 | Configuring kdump requires setting the `crashkernel` kernel argument and enabling the kdump systemd service. Memory must be reserved for the crash kernel during booting of the first kernel. `crashkernel=auto` generally doesn't reserve enough memory on Fedora CoreOS, so it is recommended to specify `crashkernel=300M`. 7 | 8 | By default, the vmcore will be saved in `/var/crash`. It is also possible to write the dump to some other location on the local system or to send it over the network by editing `/etc/kdump.conf`. For additional information, see https://www.mankier.com/5/kdump.conf[`kdump.conf(5)`] and the comments in `/etc/kdump.conf` and `/etc/sysconfig/kdump`. 9 | 10 | == Configuring kdump via Ignition 11 | .Example kdump configuration 12 | [source,yaml,subs="attributes"] 13 | ---- 14 | variant: fcos 15 | version: {butane-latest-stable-spec} 16 | kernel_arguments: 17 | should_exist: 18 | - 'crashkernel=300M' 19 | systemd: 20 | units: 21 | - name: kdump.service 22 | enabled: true 23 | ---- 24 | 25 | == Configuring kdump after initial provision 26 | . Set the crashkernel kernel argument 27 | + 28 | [source, bash] 29 | ---- 30 | sudo rpm-ostree kargs --append='crashkernel=300M' 31 | ---- 32 | xref:kernel-args.adoc[More information] on how to modify kargs via `rpm-ostree`. 33 | 34 | . Enable the kdump systemd service. 35 | + 36 | [source, bash] 37 | ---- 38 | sudo systemctl enable kdump.service 39 | ---- 40 | 41 | . Reboot your system. 42 | + 43 | [source, bash] 44 | ---- 45 | sudo systemctl reboot 46 | ---- 47 | 48 | NOTE: It is highly recommended to test the configuration after setting up the `kdump` service, with extra attention to the amount of memory reserved for the crash kernel. For information on how to test that kdump is properly armed and how to analyze the dump, refer to the https://fedoraproject.org/wiki/How_to_use_kdump_to_debug_kernel_crashes[kdump documentation for Fedora] and https://www.kernel.org/doc/html/latest/admin-guide/kdump/kdump.html[the Linux kernel documentation on kdump]. 49 | -------------------------------------------------------------------------------- /modules/ROOT/pages/debugging-with-toolbox.adoc: -------------------------------------------------------------------------------- 1 | = Debugging with Toolbx 2 | 3 | The FCOS image is kept minimal by design to reduce the image size and the attack surface. 4 | This means that it does not include every troubleshooting tool that a normal OS may include. 5 | Instead, the recommended approach is to leverage containers with the https://containertoolbx.org/[toolbox] utility included in the image. 6 | 7 | == What is Toolbx? 8 | 9 | Toolbx is a utility that allows you to create privileged containers meant to debug and troubleshoot your instance. 10 | It is a wrapper around podman which starts long running containers with default mounts and namespaces to facilitate debugging the host system. 11 | 12 | These containers can then be used to install tools that you may need for troubleshooting. 13 | 14 | == Using Toolbx 15 | 16 | You can create a new toolbox by running the command below. On the first run it will ask you if you want to download an image. Answer yes with `y`. 17 | 18 | [source,sh] 19 | ---- 20 | toolbox create my_toolbox 21 | ---- 22 | 23 | You can then list all the running toolboxes running on the host. 24 | This should show you your newly created toolbox. In this case, it is named `my_toolbox`. 25 | 26 | [source,sh] 27 | ---- 28 | toolbox list 29 | ---- 30 | 31 | As pointed out by the output of the `toolbox create my_toolbox` command, you can enter the following command to enter your toolbox. 32 | 33 | [source,sh] 34 | ---- 35 | toolbox enter my_toolbox 36 | ---- 37 | 38 | Now that you're in the container, you can use the included `dnf` package manager to install packages. 39 | For example, let's install `strace` to look at read syscall done by the host's `toolbox` utility. 40 | 41 | [source,sh] 42 | ---- 43 | sudo dnf install strace 44 | # Some hosts directories are mounted at /run/host 45 | strace -eread /run/host/usr/bin/toolbox list 46 | ---- 47 | 48 | Once done with your container, you can exit the container and then remove it from the host with the following command. 49 | 50 | [source,sh] 51 | ---- 52 | toolbox rm --force my_toolbox 53 | ---- 54 | 55 | NOTE: Toolbx allows you to create toolboxes with your custom images. 56 | You can find more details in the https://github.com/containers/toolbox/tree/main/doc[toolbox manpages]. 57 | -------------------------------------------------------------------------------- /modules/ROOT/pages/docker-ce.adoc: -------------------------------------------------------------------------------- 1 | = Installing Docker CE 2 | 3 | By default, Fedora CoreOS comes with out-of-the-box support for `docker` CLI (as provided via https://mobyproject.org/[Moby]). 4 | However, in some cases Docker Community Edition (CE) may be preferred for various reasons. 5 | This page explains how to replace the provided version with the latest from the upstream Docker sources. 6 | 7 | The recommended approach from the official https://docs.docker.com/engine/install/fedora/[Docker documentation] is to add the Docker repository to your system. 8 | You can then install and update Docker CE from this repository. 9 | 10 | 11 | == Installing Docker CE on first boot 12 | 13 | On provisioning, you can install Docker CE during the first boot of the system via ignition configuration. 14 | 15 | .Example Butane config for setting up Docker CE 16 | [source,yaml,subs="attributes"] 17 | ---- 18 | variant: fcos 19 | version: {butane-latest-stable-spec} 20 | systemd: 21 | units: 22 | # Install Docker CE 23 | - name: rpm-ostree-install-docker-ce.service 24 | enabled: true 25 | contents: | 26 | [Unit] 27 | Description=Install Docker CE 28 | Wants=network-online.target 29 | After=network-online.target 30 | Before=zincati.service 31 | ConditionPathExists=!/var/lib/%N.stamp 32 | 33 | [Service] 34 | Type=oneshot 35 | RemainAfterExit=yes 36 | ExecStart=/usr/bin/curl --output-dir "/etc/yum.repos.d" --remote-name https://download.docker.com/linux/fedora/docker-ce.repo 37 | ExecStart=/usr/bin/rpm-ostree override remove moby-engine containerd runc docker-cli --install docker-ce 38 | ExecStart=/usr/bin/touch /var/lib/%N.stamp 39 | ExecStart=/usr/bin/systemctl --no-block reboot 40 | 41 | [Install] 42 | WantedBy=multi-user.target 43 | ---- 44 | 45 | 46 | == Installing Docker CE on a running system 47 | 48 | First, download and setup the Docker repository. 49 | Then you need to remove `moby-engine` and several other conflicting packages that ship by default in the Fedora CoreOS image, install the necessary Docker CE packages, and reboot the system. 50 | 51 | [source, bash] 52 | ---- 53 | curl --remote-name https://download.docker.com/linux/fedora/docker-ce.repo 54 | sudo install --owner 0 --group 0 --mode 644 docker-ce.repo /etc/yum.repos.d/docker-ce.repo 55 | sudo rpm-ostree override remove moby-engine containerd runc docker-cli --install docker-ce --reboot 56 | ---- 57 | 58 | === Upgrading Docker CE 59 | 60 | Docker CE should be upgraded automatically with each new release of Fedora CoreOS. 61 | 62 | [NOTE] 63 | ==== 64 | If you have Docker CE installed and are still using Fedora CoreOS 40, upgrading to Fedora CoreOS 41 will likely fail. 65 | This is due to the new `docker-cli` package added in Fedora CoreOS 41 and later. 66 | To upgrade to Fedora CoreOS 41 you’ll need to reset the overrides and uninstall layered Docker CE packages with the following command. 67 | 68 | [source, bash] 69 | ---- 70 | sudo rpm-ostree override reset containerd moby-engine runc --uninstall docker-ce 71 | ---- 72 | 73 | After upgrading to Fedora CoreOS 41, you can follow the instructions for xref:#_installing_docker_ce_on_a_running_system[Installing Docker CE on a running system]. 74 | ==== 75 | -------------------------------------------------------------------------------- /modules/ROOT/pages/emergency-shell.adoc: -------------------------------------------------------------------------------- 1 | = Emergency console access 2 | 3 | Sometimes you may want to access the node console to perform troubleshooting steps or emergency maintenance. 4 | For instance, you may want to access the emergency shell on the console, in order to debug first boot provisioning issues. 5 | 6 | == Default console configuration 7 | 8 | All Fedora CoreOS (FCOS) images come with a default configuration for the console which is meant to accommodate most virtualized and bare-metal setups. Older FCOS releases enabled both serial and graphical consoles by default. Newer releases use different defaults for each cloud and virtualization platform, and use the kernel's defaults (typically a graphical console) on bare metal. New installs of Fedora CoreOS will switch to these new defaults starting with releases on these dates: 9 | 10 | - `next` stream: October 3, 2022 11 | - `testing` stream: November 28, 2022 12 | - `stable` stream: December 12, 2022 13 | 14 | The default consoles may not always match your specific hardware configuration. In that case, you can tweak the console setup. Fedora CoreOS has special support for doing this during xref:bare-metal.adoc[bare-metal installation], and in other cases you can xref:kernel-args.adoc[adjust kernel parameters]. Both approaches use https://www.kernel.org/doc/html/latest/admin-guide/serial-console.html[kernel argument syntax] for specifying the desired consoles. You can specify multiple consoles; kernel messages will appear on all of them, but only the last-specified device will be used as the foreground interactive console (i.e. `/dev/console`) for the machine. 15 | 16 | == Configuring the console during bare-metal installation 17 | 18 | If you are installing FCOS via `coreos-installer`, you can configure the console at install time. 19 | 20 | .Example: Enabling primary serial and secondary graphical console 21 | [source, bash] 22 | ---- 23 | sudo podman run --pull=always --privileged --rm \ 24 | -v /dev:/dev -v /run/udev:/run/udev -v .:/data -w /data \ 25 | quay.io/coreos/coreos-installer:release \ 26 | install /dev/vdb -i config.ign \ 27 | --console tty0 --console ttyS0,115200n8 28 | ---- 29 | 30 | This will configure both the GRUB bootloader and the kernel to use the specified consoles. 31 | 32 | == Configuring the console with Ignition 33 | 34 | If you are launching FCOS from an image (in a cloud or a virtual machine), you can use Ignition to configure the console at provisioning time. 35 | 36 | .Example: Enabling primary serial and secondary graphical console 37 | [source,yaml,subs="attributes"] 38 | ---- 39 | variant: fcos 40 | version: {butane-latest-stable-spec} 41 | kernel_arguments: 42 | should_exist: 43 | # Order is significant, so group both arguments into the same list entry. 44 | - console=tty0 console=ttyS0,115200n8 45 | should_not_exist: 46 | # Remove any existing defaults. Adjust as needed. 47 | - console=hvc0 48 | - console=tty0 49 | - console=ttyAMA0,115200n8 50 | - console=ttyS0,115200n8 51 | - console=ttyS1,115200n8 52 | ---- 53 | 54 | This will configure the kernel to use the specified consoles. The GRUB bootloader will continue to use its previous default. Ignition will configure the console, then reboot into the new configuration and continue provisioning the node. 55 | 56 | == Configuring the console after installation 57 | 58 | You can adjust the console configuration of an existing FCOS node via `rpm-ostree`. 59 | 60 | .Example: Enabling primary serial and secondary graphical console 61 | [source, bash] 62 | ---- 63 | sudo rpm-ostree kargs --append=console=tty0 --append=console=ttyS0,115200n8 --reboot 64 | ---- 65 | 66 | `rpm-ostree` will create a new deployment with the specified kernel arguments added and reboot into the new configuration. The GRUB bootloader will continue to use its previous default. 67 | -------------------------------------------------------------------------------- /modules/ROOT/pages/fcct-config.adoc: -------------------------------------------------------------------------------- 1 | = Content Moved 2 | 3 | The content on this page has been moved. For an overview of the Butane (formerly FCCT) spec, please see the https://coreos.github.io/butane/specs/[full specification page]. The code examples on this page have been moved to the individual pages for each topic. 4 | -------------------------------------------------------------------------------- /modules/ROOT/pages/fcos-projects.adoc: -------------------------------------------------------------------------------- 1 | = Projects Using Fedora CoreOS 2 | 3 | This a list of projects that are actively using Fedora CoreOS: 4 | 5 | * https://docs.podman.io/en/latest/markdown/podman-machine.1.html[Podman Machine] uses Fedora CoreOS to run containers in a local environment (notably including Windows and macOS), and also has a https://docs.podman.io/en/latest/markdown/podman-machine-os.1.html[podman machine os] command that allows customization of Fedora CoreOS in a container-native way. 6 | * https://www.okd.io[OKD] is the Community Distribution of Kubernetes that powers https://www.openshift.com/products/container-platform[Red Hat OpenShift Container Platform]. By default, Fedora CoreOS is the underlying OS used by the control plane nodes and the worker nodes. 7 | * https://github.com/poseidon/typhoon[Typhoon] is a minimal and free Kubernetes distribution. Users of Typhoon have the option of using Fedora CoreOS as the underlying OS for their nodes. 8 | * https://wiki.openstack.org/wiki/Magnum[OpenStack Magnum] is an OpenStack API service developed by the OpenStack Containers Team making container orchestration engines such as Docker Swarm, Kubernetes, and Apache Mesos available as first class resources in OpenStack. Fedora CoreOS is used as the underlying OS for nodes that are provisioned via Magnum. 9 | * https://www.ovirt.org/develop/release-management/features/virt/coreos-ignition-support.html[oVirt] supports booting Fedora CoreOS nodes and has native support for https://github.com/coreos/ignition[Ignition] configurations. 10 | * https://quay.io/[Quay.io] is using Fedora CoreOS in production to handle the job of building containers for their users. 11 | -------------------------------------------------------------------------------- /modules/ROOT/pages/getting-started-aws.adoc: -------------------------------------------------------------------------------- 1 | :page-partial: 2 | 3 | New AWS instances can be directly created from the public FCOS images. You can find the latest AMI for each region from the https://fedoraproject.org/coreos/download/[download page]. 4 | 5 | If you are only interested in exploring FCOS without further customization, you can use a https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html[registered SSH key-pair] for the default `core` user. 6 | 7 | To test out FCOS this way you'll need to run the `aws ec2 run-instances` command and provide some information to get the instance up and running. The following is an example command you can use: 8 | 9 | .Launching a new instance 10 | [source, bash] 11 | ---- 12 | NAME='instance1' 13 | SSHKEY='my-key' # the name of your SSH key: `aws ec2 describe-key-pairs` 14 | IMAGE='ami-xxx' # the AMI ID found on the download page 15 | DISK='20' # the size of the hard disk 16 | REGION='us-east-1' # the target region 17 | TYPE='m5.large' # the instance type 18 | SUBNET='subnet-xxx' # the subnet: `aws ec2 describe-subnets` 19 | SECURITY_GROUPS='sg-xx' # the security group `aws ec2 describe-security-groups` 20 | aws ec2 run-instances \ 21 | --region $REGION \ 22 | --image-id $IMAGE \ 23 | --instance-type $TYPE \ 24 | --key-name $SSHKEY \ 25 | --subnet-id $SUBNET \ 26 | --security-group-ids $SECURITY_GROUPS \ 27 | --tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=${NAME}}]" \ 28 | --block-device-mappings "VirtualName=/dev/xvda,DeviceName=/dev/xvda,Ebs={VolumeSize=${DISK}}" 29 | ---- 30 | 31 | 32 | TIP: You can find out the instance's assigned IP by running `aws ec2 describe-instances` 33 | 34 | You now should be able to SSH into the instance using the associated IP address. 35 | 36 | .Example connecting 37 | [source, bash] 38 | ---- 39 | ssh core@ 40 | ---- 41 | -------------------------------------------------------------------------------- /modules/ROOT/pages/getting-started-libvirt.adoc: -------------------------------------------------------------------------------- 1 | :page-partial: 2 | 3 | . Fetch the latest image suitable for the `qemu` platform using `coreos-installer` (or https://fedoraproject.org/coreos/download/[download and verify] it from the web). You can use `coreos-installer` https://quay.io/repository/coreos/coreos-installer[as a container], or on Fedora install it from the repos. 4 | + 5 | [source, bash] 6 | ---- 7 | STREAM="stable" 8 | # as an installed binary: 9 | coreos-installer download -s $STREAM -p qemu -f qcow2.xz --decompress -C ~/.local/share/libvirt/images/ 10 | # or as a container: 11 | podman run --pull=always --rm -v "${HOME}/.local/share/libvirt/images/:/data" -w /data \ 12 | quay.io/coreos/coreos-installer:release download -s $STREAM -p qemu -f qcow2.xz --decompress 13 | ---- 14 | + 15 | 16 | . Launch a new machine via `virt-install`, using the Ignition file with your customizations. 17 | + 18 | [source, bash] 19 | ---- 20 | IGNITION_CONFIG="/path/to/example.ign" 21 | IMAGE="/path/to/image.qcow2" 22 | VM_NAME="fcos-test-01" 23 | VCPUS="2" 24 | RAM_MB="2048" 25 | STREAM="stable" 26 | DISK_GB="10" 27 | # For x86 / aarch64, 28 | IGNITION_DEVICE_ARG=(--qemu-commandline="-fw_cfg name=opt/com.coreos/config,file=${IGNITION_CONFIG}") 29 | 30 | # For s390x / ppc64le, 31 | IGNITION_DEVICE_ARG=(--disk path="${IGNITION_CONFIG}",format=raw,readonly=on,serial=ignition,startup_policy=optional) 32 | 33 | # Setup the correct SELinux label to allow access to the config 34 | chcon --verbose --type svirt_home_t ${IGNITION_CONFIG} 35 | 36 | virt-install --connect="qemu:///system" --name="${VM_NAME}" --vcpus="${VCPUS}" --memory="${RAM_MB}" \ 37 | --os-variant="fedora-coreos-$STREAM" --import --graphics=none \ 38 | --disk="size=${DISK_GB},backing_store=${IMAGE}" \ 39 | --network bridge=virbr0 "${IGNITION_DEVICE_ARG[@]}" 40 | ---- 41 | 42 | NOTE: `virt-install` requires both the OS image and Ignition file to be specified as absolute paths. 43 | 44 | NOTE: Depending on your version of `virt-install`, you may not be able to use `--os-variant=fedora-coreos-*` and will get an error. In this case, you should pick an older Fedora variant (`--os-variant=fedora31` for example). You can find the variants that are supported by you current version of `virt-install` with `osinfo-query os | grep '^\s*fedora'`. 45 | 46 | NOTE: `DISK_GB` should be at least as big as the default size of the image. For Fedora CoreOS, this is currently 10 GB. 47 | 48 | TIP: Make sure that your user has access to `/dev/kvm`. The default is to allow access for everyone, but on some distributions you may need to add yourself to the `kvm` group. 49 | 50 | TIP: You can escape out of the serial console by pressing `CTRL + ]`. 51 | 52 | If you set up an xref:authentication.adoc[SSH key] for the default `core` user, you can SSH into the VM and explore the OS: 53 | 54 | [source, bash] 55 | ---- 56 | ssh core@ 57 | ---- 58 | -------------------------------------------------------------------------------- /modules/ROOT/pages/getting-started.adoc: -------------------------------------------------------------------------------- 1 | = Getting Started with Fedora CoreOS 2 | 3 | == Introduction 4 | 5 | === Streams 6 | 7 | There are three Fedora CoreOS (FCOS) update streams available: `stable`, `testing`, and `next`. In general, you will want to use `stable`, but it is recommended to run some machines on `testing` and `next` as well and provide feedback. 8 | 9 | Each stream has a canonical URL representing its current state in JSON format, known as "stream metadata". For example, the stream metadata URL for `stable` is: https://builds.coreos.fedoraproject.org/streams/stable.json 10 | 11 | For automating Fedora CoreOS installations, it is expected that you will interact with stream metadata. While Fedora CoreOS does automatic in-place updates, it is generally recommended to start provisioning new machines from the latest images. 12 | 13 | For more information on using stream metadata, see xref:stream-metadata.adoc[Stream Metadata]. 14 | For more about the available streams, see xref:update-streams.adoc[Update Streams]. 15 | 16 | === Provisioning Philosophy 17 | 18 | Fedora CoreOS does not have a separate install disk. Instead, every instance starts from a generic disk image which is customized on first boot via https://github.com/coreos/ignition[Ignition]. 19 | 20 | Each platform has specific logic to retrieve and apply the first boot configuration. For cloud deployments, Ignition gathers the configuration via user-data mechanisms. In the case of bare metal, Ignition can fetch its configuration from the disk or from a remote source. 21 | 22 | For more information on configuration, refer to the documentation for xref:producing-ign.adoc[Producing an Ignition File]. 23 | 24 | == Quickstart 25 | 26 | === Booting on a cloud VM (AWS example) 27 | 28 | include::getting-started-aws.adoc[] 29 | 30 | A more complete example that allows customization is described in xref:provisioning-aws.adoc[]. 31 | 32 | === Booting on a local hypervisor (libvirt example) 33 | 34 | include::getting-started-libvirt.adoc[] 35 | 36 | === Exploring the OS 37 | 38 | Once the VM has finished booting, its IP addresses will appear on the console. By design, there are no hard-coded default credentials. 39 | 40 | If you set up an xref:authentication.adoc[SSH key] for the default `core` user, you can SSH into the VM and explore the OS: 41 | 42 | [source, bash] 43 | ---- 44 | ssh core@ 45 | ---- 46 | 47 | == Getting in touch 48 | 49 | We recommend that all users subscribe to the low-volume https://lists.fedoraproject.org/archives/list/coreos-status@lists.fedoraproject.org/[coreos-status mailing list] for operational notices related to Fedora CoreOS. 50 | 51 | Bugs can be reported to the https://github.com/coreos/fedora-coreos-tracker[Fedora CoreOS Tracker]. 52 | 53 | For live questions, feel free to reach out in the https://chat.fedoraproject.org/#/room/#coreos:fedoraproject.org[#coreos:fedoraproject.org room on Matrix]. 54 | 55 | For doubts and longer discussions related to Fedora CoreOS, a https://discussion.fedoraproject.org/tag/coreos[forum] and a https://lists.fedoraproject.org/archives/list/coreos@lists.fedoraproject.org/[mailing list] are available. 56 | -------------------------------------------------------------------------------- /modules/ROOT/pages/grub-password.adoc: -------------------------------------------------------------------------------- 1 | = Setting a GRUB password 2 | 3 | You can set up a password to prevent unauthorized users from accessing the GRUB command line, modifying kernel command-line arguments, or booting non-default OSTree deployments. 4 | 5 | == Creating the password hash 6 | 7 | You can use `grub2-mkpasswd-pbkdf2` to create a password hash for GRUB. 8 | 9 | [source, bash] 10 | ---- 11 | $ grub2-mkpasswd-pbkdf2 12 | Enter password: 13 | Reenter password: 14 | PBKDF2 hash of your password is grub.pbkdf2.sha512.10000.5AE6255... 15 | ---- 16 | 17 | NOTE: `grub2-mkpasswd-pbkdf2` tool is a component of the `grub2-tools-minimal` package on Fedora. 18 | 19 | == Butane config 20 | 21 | With the password hash ready, you can now create the Butane config. 22 | 23 | [source,yaml,subs="attributes"] 24 | ---- 25 | variant: fcos 26 | version: {butane-latest-stable-spec} 27 | grub: 28 | users: 29 | - name: root 30 | password_hash: grub.pbkdf2.sha512.10000.5AE6255... 31 | ---- 32 | 33 | The Butane config defines a GRUB superuser `root` and sets the password for that user using a hash. 34 | 35 | You can now use this config to boot a Fedora CoreOS instance. 36 | -------------------------------------------------------------------------------- /modules/ROOT/pages/hostname.adoc: -------------------------------------------------------------------------------- 1 | = Setting a Hostname 2 | 3 | To set a custom hostname for your system, use the following Butane config to write to `/etc/hostname`: 4 | 5 | [source,yaml,subs="attributes"] 6 | ---- 7 | variant: fcos 8 | version: {butane-latest-stable-spec} 9 | storage: 10 | files: 11 | - path: /etc/hostname 12 | mode: 0644 13 | contents: 14 | inline: myhostname 15 | ---- 16 | 17 | Once booted, you can also verify that the desired hostname has been set using `hostnamectl`. 18 | -------------------------------------------------------------------------------- /modules/ROOT/pages/index.adoc: -------------------------------------------------------------------------------- 1 | = Fedora CoreOS Documentation 2 | 3 | image::fedoracoreos-logo.svg[] 4 | 5 | Fedora CoreOS is an automatically updating, minimal, monolithic, container-focused operating system, designed for clusters but also operable standalone, optimized for Kubernetes but also great without it. 6 | Its goal is to provide the best container host to run containerized workloads securely and at scale. 7 | 8 | Fedora CoreOS is an open source project associated with the link:https://fedoraproject.org/[Fedora Project]. 9 | 10 | [NOTE] 11 | ==== 12 | Please refer to the xref:faq.adoc[FAQ] for more information. 13 | If you want to help or ask any questions, join the link:https://chat.fedoraproject.org/#/room/#coreos:fedoraproject.org[Matrix room], or join our link:https://discussion.fedoraproject.org/tag/coreos[discussion board]. 14 | ==== 15 | 16 | xref:getting-started.adoc[Get started using Fedora CoreOS!] 17 | -------------------------------------------------------------------------------- /modules/ROOT/pages/kernel-args.adoc: -------------------------------------------------------------------------------- 1 | = Modifying Kernel Arguments 2 | 3 | == Modifying Kernel Arguments via Ignition 4 | 5 | You can specify kernel arguments in a Butane config using the `kernel_arguments` section. 6 | 7 | === Example: Disabling all CPU vulnerability mitigations 8 | 9 | Here's an example `kernelArguments` section which switches `mitigations=auto,nosmt` to `mitigations=off` to disable all CPU vulnerability mitigations: 10 | 11 | [source,yaml,subs="attributes"] 12 | ---- 13 | variant: fcos 14 | version: {butane-latest-stable-spec} 15 | kernel_arguments: 16 | should_exist: 17 | - mitigations=off 18 | should_not_exist: 19 | - mitigations=auto,nosmt 20 | ---- 21 | 22 | == Modifying Console Configuration During Bare Metal Install 23 | 24 | `coreos-installer` has special support for changing the console configuration when performing a bare-metal installation. This functionality can be used to add `console` arguments to the kernel command line and equivalent parameters to the GRUB bootloader configuration. For more information, see xref:emergency-shell.adoc[Emergency Console Access]. For more information about bare metal installs, see xref:bare-metal.adoc[Installing CoreOS on Bare Metal]. 25 | 26 | == Modifying Kernel Arguments on Existing Systems 27 | 28 | Kernel arguments changes are managed by `rpm-ostree` via the https://www.mankier.com/1/rpm-ostree[`rpm-ostree kargs`] subcommand. Changes are applied to a new deployment and a reboot is necessary for those to take effect. 29 | 30 | === Adding kernel arguments 31 | 32 | You can append kernel arguments. An empty value for an argument is allowed: 33 | 34 | [source,bash] 35 | ---- 36 | $ sudo rpm-ostree kargs --append=KEY=VALUE 37 | ---- 38 | 39 | .Example: Add reserved memory for Kdump support 40 | 41 | [source,bash] 42 | ---- 43 | $ sudo rpm-ostree kargs --append='crashkernel=256M' 44 | ---- 45 | 46 | See also xref:debugging-kernel-crashes.adoc[Debugging kernel crashes using kdump]. 47 | 48 | === Removing existing kernel arguments 49 | 50 | You can delete a specific kernel argument key/value pair or an entire argument with a single key/value pair: 51 | 52 | [source,bash] 53 | ---- 54 | $ sudo rpm-ostree kargs --delete=KEY=VALUE 55 | ---- 56 | 57 | .Example: Re-enable SMT on vulnerable CPUs 58 | 59 | [source,bash] 60 | ---- 61 | $ sudo rpm-ostree kargs --delete=mitigations=auto,nosmt 62 | ---- 63 | 64 | .Example: Update an existing system from cgroupsv1 to cgroupsv2 and immediately reboot 65 | 66 | [source,bash] 67 | ---- 68 | $ sudo rpm-ostree kargs --delete=systemd.unified_cgroup_hierarchy --reboot 69 | ---- 70 | 71 | === Replacing existing kernel arguments 72 | 73 | You can replace an existing kernel argument with a new value. You can directly use `KEY=VALUE` if only one value exists for that argument. Otherwise, you can specify the new value using the following format: 74 | 75 | [source,bash] 76 | ---- 77 | $ sudo rpm-ostree kargs --replace=KEY=VALUE=NEWVALUE 78 | ---- 79 | 80 | .Example: Disable all CPU vulnerability mitigations 81 | 82 | [source,bash] 83 | ---- 84 | $ sudo rpm-ostree kargs --replace=mitigations=auto,nosmt=off 85 | ---- 86 | 87 | This switches `mitigations=auto,nosmt` to `mitigations=off` to disable all CPU vulnerability mitigations. 88 | 89 | === Interactive editing 90 | 91 | To use an editor to modify the kernel arguments: 92 | 93 | [source,bash] 94 | ---- 95 | $ sudo rpm-ostree kargs --editor 96 | ---- 97 | -------------------------------------------------------------------------------- /modules/ROOT/pages/live-booting.adoc: -------------------------------------------------------------------------------- 1 | = Running Fedora CoreOS directly from RAM 2 | :page-aliases: live-booting-ipxe.adoc 3 | 4 | The Fedora CoreOS live environment is a fully functional copy of Fedora CoreOS. It can provision itself via Ignition, execute containers, and so on. The live environment can be used to xref:bare-metal.adoc[install Fedora CoreOS to disk], and can also be used to run Fedora CoreOS directly from RAM. 5 | 6 | This guide shows how to boot a transient Fedora CoreOS (FCOS) system via ISO, PXE, or iPXE. For more about the live ISO and PXE images, see the xref:live-reference.adoc[live image reference]. 7 | 8 | == Prerequisites 9 | 10 | Before booting FCOS, you must have an Ignition configuration file. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 11 | 12 | If you're booting from PXE or iPXE, you must host the Ignition config on a reachable HTTP(S) or TFTP server. Booting the live PXE image requires at least 2 GiB of RAM with the `coreos.live.rootfs_url` kernel argument, and 4 GiB otherwise. 13 | 14 | == Booting via ISO 15 | 16 | The live ISO image can be booted from a physical DVD disc, from a USB stick, or from a virtual CD drive via lights-out management (LOM) firmware. 17 | 18 | To boot from the ISO image, follow these steps: 19 | 20 | - Download the ISO image: 21 | + 22 | [source,bash] 23 | ---- 24 | podman run --security-opt label=disable --pull=always --rm -v .:/data -w /data \ 25 | quay.io/coreos/coreos-installer:release download -f iso 26 | ---- 27 | 28 | - Use `coreos-installer iso customize` to customize the ISO for your needs. In this example we assume an Ignition config exists in a file `config.ign`. We also add the optional `coreos.liveiso.fromram` kernel argument to the live boot. 29 | + 30 | NOTE: The `coreos.liveiso.fromram` is optional and is used in cases where you want to have no references to the booted media (ISO) once the system is up and running. This enables use cases like removing the media after boot or rewriting the disk the booted media is on, but does require more memory. 31 | + 32 | [source,bash,subs="attributes"] 33 | ---- 34 | KERNEL_ARG='--live-karg-append=coreos.liveiso.fromram' 35 | IGNITION_ARG='--live-ignition=./config.ign' 36 | podman run --security-opt label=disable --pull=always --rm -v .:/data -w /data \ 37 | quay.io/coreos/coreos-installer:release iso customize $KERNEL_ARG $IGNITION_ARG \ 38 | -o customized.iso fedora-coreos-{stable-version}-live.x86_64.iso 39 | ---- 40 | 41 | - Burn the ISO to disk. On Linux and macOS, you can use `dd`. On Windows, you can use https://rufus.ie/[Rufus] in "DD Image" mode. 42 | - Boot it on the target system. 43 | 44 | == Booting via PXE 45 | 46 | To boot from PXE, follow these steps: 47 | 48 | - Download an FCOS PXE kernel, initramfs, and rootfs image: 49 | [source, bash] 50 | ---- 51 | podman run --security-opt label=disable --pull=always --rm -v .:/data -w /data \ 52 | quay.io/coreos/coreos-installer:release download -f pxe 53 | ---- 54 | 55 | - Follow this example `pxelinux.cfg` for booting the installer images with PXELINUX and running Ignition: 56 | 57 | [source,subs="attributes"] 58 | ---- 59 | DEFAULT pxeboot 60 | TIMEOUT 20 61 | PROMPT 0 62 | LABEL pxeboot 63 | KERNEL fedora-coreos-{stable-version}-live-kernel-x86_64 64 | APPEND initrd=fedora-coreos-{stable-version}-live-initramfs.x86_64.img,fedora-coreos-{stable-version}-live-rootfs.x86_64.img ignition.firstboot ignition.platform.id=metal ignition.config.url=http://192.168.1.101/config.ign 65 | IPAPPEND 2 66 | ---- 67 | 68 | == Booting via iPXE 69 | 70 | An iPXE-capable machine needs to be provided with a relevant Boot Script to fetch and load FCOS artifacts. 71 | 72 | The example below shows how to load those directly from Fedora infrastructure. For performance and reliability reasons it is recommended to mirror them on the local infrastructure, and then tweak the `BASEURL` as needed. 73 | 74 | [source,subs="attributes"] 75 | ---- 76 | #!ipxe 77 | 78 | set STREAM stable 79 | set VERSION {stable-version} 80 | set CONFIGURL https://example.com/config.ign 81 | 82 | set BASEURL https://builds.coreos.fedoraproject.org/prod/streams/$\{STREAM}/builds/$\{VERSION}/x86_64 83 | 84 | kernel $\{BASEURL}/fedora-coreos-$\{VERSION}-live-kernel-x86_64 initrd=main coreos.live.rootfs_url=$\{BASEURL}/fedora-coreos-$\{VERSION}-live-rootfs.x86_64.img ignition.firstboot ignition.platform.id=metal ignition.config.url=$\{CONFIGURL} 85 | initrd --name main $\{BASEURL}/fedora-coreos-$\{VERSION}-live-initramfs.x86_64.img 86 | 87 | boot 88 | ---- 89 | 90 | == Using persistent state 91 | 92 | By default, the Fedora CoreOS live environment does not store any state on disk, and is reprovisioned from scratch on every boot. However, you may choose to store some persistent state (such as container images) in a filesystem that persists across reboots. For example, here's a Butane config that configures a persistent `/var`: 93 | 94 | [source,yaml,subs="attributes"] 95 | ---- 96 | variant: fcos 97 | version: 1.1.0 98 | storage: 99 | disks: 100 | - device: /dev/sda 101 | wipe_table: true 102 | partitions: 103 | - label: var 104 | filesystems: 105 | - device: /dev/disk/by-partlabel/var 106 | label: var 107 | format: xfs 108 | wipe_filesystem: false 109 | path: /var 110 | with_mount_unit: true 111 | ---- 112 | 113 | When booting a live environment, the Ignition config runs on every boot, so it should avoid wiping LUKS volumes and filesystems that you want to reuse. Instead, configure such structures so that they're created on the first boot and preserved on subsequent boots. 114 | 115 | In particular, note the following guidelines: 116 | 117 | - Avoid setting `wipe_filesystem` or `wipe_volume` for https://coreos.github.io/ignition/operator-notes/#filesystem-reuse-semantics[filesystems] or LUKS volumes that you intend to reuse. (You can safely set `wipe_table` or `wipe_partition_entry` when reusing a disk, since those don't modify the contents of a https://coreos.github.io/ignition/operator-notes/#partition-reuse-semantics[partition].) 118 | - When reusing LUKS volumes, you must configure a `key_file`. Ignition cannot reuse Clevis-backed LUKS volumes. 119 | - Avoid writing persistent data to RAID volumes, since Ignition cannot reuse those. 120 | - When writing files in persistent storage, set `overwrite` to `true` to avoid Ignition failures when reusing a filesystem that already has the file. Avoid using the `append` directive for persistent files, since the append operation will occur on every boot. 121 | 122 | == Update process 123 | 124 | Since the traditional FCOS upgrade process requires a disk, the live environment is not able to auto-update in place. For this reason, Zincati is not running there. 125 | 126 | For PXE-booted systems, it is recommended that images referenced in the PXE configuration are regularly refreshed. Once infrastructure and configurations are updated, the live PXE instance only needs to be rebooted in order to start running the new FCOS version. 127 | 128 | For ISO-booted systems, the ISO image used to boot the live environment should be periodically refreshed, and the instance rebooted to update the running OS. 129 | -------------------------------------------------------------------------------- /modules/ROOT/pages/live-reference.adoc: -------------------------------------------------------------------------------- 1 | = Live ISO and PXE image reference 2 | 3 | For an introduction to running Fedora CoreOS directly from RAM, see the xref:live-booting.adoc[provisioning guide]. 4 | 5 | == Passing the PXE rootfs to a machine 6 | 7 | The Fedora CoreOS PXE image includes three components: a `kernel`, an `initramfs`, and a `rootfs`. All three are mandatory and the live PXE environment will not boot without them. 8 | 9 | There are multiple ways to pass the `rootfs` to a machine: 10 | 11 | - Specify only the `initramfs` file as the initrd in your PXE configuration, and pass an HTTP(S) or TFTP URL for the `rootfs` using the `coreos.live.rootfs_url=` kernel argument. This method requires 2 GiB of RAM, and is the recommended option unless you have special requirements. 12 | - Specify both `initramfs` and `rootfs` files as initrds in your PXE configuration. This can be done via multiple `initrd` directives, or using additional `initrd=` parameters as kernel arguments. This method is slower than the first method and requires 4 GiB of RAM. 13 | - Concatenate the `initramfs` and `rootfs` files together, and specify the combined file as the initrd. This method is slower and requires 4 GiB of RAM. 14 | 15 | == Passing an Ignition config to a live PXE system 16 | 17 | When booting Fedora CoreOS via live PXE, the kernel command line must include the arguments `ignition.firstboot ignition.platform.id=metal` to run Ignition. If running in a virtual machine, replace `metal` with the https://coreos.github.io/ignition/supported-platforms/[platform ID] for your platform, such as `qemu` or `vmware`. 18 | 19 | There are several ways to pass an Ignition config when booting Fedora CoreOS via PXE: 20 | 21 | - Add `ignition.config.url=` to the kernel command line. Supported URL schemes include `http`, `https`, `tftp`, `s3`, and `gs`. 22 | 23 | - If running virtualized, pass the Ignition config via the hypervisor, exactly as you would when booting from a disk image. Ensure the `ignition.platform.id` kernel argument is set to the https://coreos.github.io/ignition/supported-platforms/[platform ID] for your platform. 24 | 25 | - Generate a customized version of the `initramfs` containing your Ignition config using `coreos-installer pxe customize`. For example, run: 26 | + 27 | [source,bash,subs="attributes"] 28 | ---- 29 | coreos-installer pxe customize --live-ignition config.ign -o custom-initramfs.img \ 30 | fedora-coreos-{stable-version}-live-initramfs.x86_64.img 31 | ---- 32 | 33 | - If you prefer to keep the Ignition config separate from the Fedora CoreOS `initramfs` image, generate a separate initrd with the low-level `coreos-installer pxe ignition wrap` command and pass it as an additional initrd. For example, run: 34 | + 35 | [source,bash] 36 | ---- 37 | coreos-installer pxe ignition wrap -i config.ign -o ignition.img 38 | ---- 39 | + 40 | and then use a PXELINUX `APPEND` line similar to: 41 | + 42 | [source,subs="attributes"] 43 | ---- 44 | APPEND initrd=fedora-coreos-{stable-version}-live-initramfs.x86_64.img,fedora-coreos-{stable-version}-live-rootfs.x86_64.img,ignition.img ignition.firstboot ignition.platform.id=metal 45 | ---- 46 | 47 | == Passing network configuration to a live ISO or PXE system 48 | 49 | On Fedora CoreOS, networking is typically configured via https://developer.gnome.org/NetworkManager/stable/nm-settings-keyfile.html[NetworkManager keyfiles]. If your network requires special configuration such as static IP addresses, and your Ignition config fetches resources from the network, you cannot simply include those keyfiles in your Ignition config, since that would create a circular dependency. 50 | 51 | Instead, you can use `coreos-installer iso customize` or `coreos-installer pxe customize` with the `--network-keyfile` option to create a customized ISO image or PXE `initramfs` image which applies your network settings before running Ignition. For example: 52 | 53 | [source,bash,subs="attributes"] 54 | ---- 55 | coreos-installer iso customize --network-keyfile custom.nmconnection -o custom.iso \ 56 | fedora-coreos-{stable-version}-live.x86_64.iso 57 | ---- 58 | 59 | If you're PXE booting and want to keep your network settings separate from the Fedora CoreOS `initramfs` image, you can also use the lower-level `coreos-installer pxe network wrap` command to create a separate initrd image, and pass that as an additional initrd. For example, run: 60 | 61 | [source,bash] 62 | ---- 63 | coreos-installer pxe network wrap -k custom.nmconnection -o network.img 64 | ---- 65 | 66 | and then use a PXELINUX `APPEND` line similar to: 67 | 68 | [source,subs="attributes"] 69 | ---- 70 | APPEND initrd=fedora-coreos-{stable-version}-live-initramfs.x86_64.img,fedora-coreos-{stable-version}-live-rootfs.x86_64.img,network.img ignition.firstboot ignition.platform.id=metal 71 | ---- 72 | 73 | == Passing kernel arguments to a live ISO system 74 | 75 | If you want to modify the default kernel arguments of a live ISO system, you can use the `--live-karg-{append,replace,delete}` options to `coreos-installer iso customize`. For example, if you want to enable simultaneous multithreading (SMT) even on CPUs where that is insecure, you can run: 76 | 77 | [source,bash,subs="attributes"] 78 | ---- 79 | coreos-installer iso customize --live-karg-delete mitigations=auto,nosmt -o custom.iso \ 80 | fedora-coreos-{stable-version}-live.x86_64.iso 81 | ---- 82 | 83 | == Extracting PXE artifacts from a live ISO image 84 | 85 | If you want the Fedora CoreOS PXE artifacts and already have an ISO image, you can extract the PXE artifacts from it: 86 | 87 | [source,bash,subs="attributes"] 88 | ---- 89 | podman run --security-opt label=disable --pull=always --rm -v .:/data -w /data \ 90 | quay.io/coreos/coreos-installer:release iso extract pxe \ 91 | fedora-coreos-{stable-version}-live.x86_64.iso 92 | ---- 93 | 94 | The command will print the paths to the artifacts it extracted. 95 | 96 | == Using the minimal ISO image 97 | 98 | In some cases, you may want to boot the Fedora CoreOS ISO image on a machine equipped with Lights-Out Management (LOM) hardware. You can upload the ISO to the LOM controller as a virtual CD image, but the ISO may be larger than the LOM controller supports. 99 | 100 | To avoid this problem, you can convert the ISO image to a smaller _minimal ISO image_ without the `rootfs`. Similar to the PXE image, the minimal ISO must fetch the `rootfs` from the network during boot. 101 | 102 | Suppose you plan to host the `rootfs` image at `https://example.com/fedora-coreos-{stable-version}-live-rootfs.x86_64.img`. This command will extract a minimal ISO image and a `rootfs` from an ISO image, embedding a `coreos.live.rootfs_url` kernel argument with the correct URL: 103 | 104 | [source,bash,subs="attributes"] 105 | ---- 106 | podman run --security-opt label=disable --pull=always --rm -v .:/data -w /data \ 107 | quay.io/coreos/coreos-installer:release iso extract minimal-iso \ 108 | --output-rootfs fedora-coreos-{stable-version}-live-rootfs.x86_64.img \ 109 | --rootfs-url https://example.com/fedora-coreos-{stable-version}-live-rootfs.x86_64.img \ 110 | fedora-coreos-{stable-version}-live.x86_64.iso \ 111 | fedora-coreos-{stable-version}-live-minimal.x86_64.iso 112 | ---- 113 | -------------------------------------------------------------------------------- /modules/ROOT/pages/managing-files.adoc: -------------------------------------------------------------------------------- 1 | = Managing Files, Directories and Links 2 | 3 | You can use Ignition to create, replace or update files, directories or links. 4 | 5 | This example creates a directory with the default mode (set to `0755`: readable 6 | and recurseable by all), and writable only by the owner (by default `root`). 7 | 8 | .Example to create a directory with default ownership and permissions 9 | [source,yaml,subs="attributes"] 10 | ---- 11 | variant: fcos 12 | version: {butane-latest-stable-spec} 13 | storage: 14 | directories: 15 | - path: /opt/tools 16 | overwrite: true 17 | ---- 18 | 19 | This example creates a file named `/var/helloworld` with some content defined 20 | in-line. It also sets the file mode to `0644` (readable by all, writable by the 21 | owner) and sets ownership to `dnsmasq:dnsmasq`. 22 | 23 | .Example to create a file with in-line content 24 | [source,yaml,subs="attributes"] 25 | ---- 26 | variant: fcos 27 | version: {butane-latest-stable-spec} 28 | storage: 29 | files: 30 | - path: /var/helloworld 31 | overwrite: true 32 | contents: 33 | inline: Hello, world! 34 | mode: 0644 35 | user: 36 | name: dnsmasq 37 | group: 38 | name: dnsmasq 39 | ---- 40 | 41 | This example creates a file with its content fetched from a remote location. In 42 | this case, it fetches an HTTPS URL and expects the file to be compressed with 43 | gzip and will decompress it before writing it on the disk. The decompressed 44 | content is checked against the hash value specified in the config. The format 45 | is `sha512-` followed by the 128 hex characters given by the sha512sum command. 46 | The resulting file is made readable and executable by all. 47 | 48 | .Example to create a files from a remote source 49 | [source,yaml,subs="attributes"] 50 | ---- 51 | variant: fcos 52 | version: {butane-latest-stable-spec} 53 | storage: 54 | files: 55 | - path: /opt/tools/transmogrifier 56 | overwrite: true 57 | contents: 58 | source: https://mytools.example.com/path/to/archive.gz 59 | compression: gzip 60 | verification: 61 | hash: sha512-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 62 | mode: 0555 63 | ---- 64 | 65 | This example creates a symbolic link in `/usr/local/bin` to a path in `/opt`. 66 | This is useful to let local processes invoke a program without altering their 67 | PATH environment variable. 68 | 69 | .Example to create a symbolic link 70 | [source,yaml,subs="attributes"] 71 | ---- 72 | variant: fcos 73 | version: {butane-latest-stable-spec} 74 | storage: 75 | links: 76 | - path: /usr/local/bin/transmogrifier 77 | overwrite: true 78 | target: /opt/tools/transmogrifier 79 | hard: false 80 | ---- 81 | 82 | If you need a directory and some of its parents to be owned by a specific user, 83 | you currently have to explicitly list them in your Butane config. See 84 | https://github.com/coreos/butane/issues/380[butane#380] for the tracking issue 85 | in Butane for a future better syntax for this case. 86 | 87 | .Example to create directories with specific ownership 88 | [source,yaml,subs="attributes"] 89 | ---- 90 | variant: fcos 91 | version: {butane-latest-stable-spec} 92 | storage: 93 | directories: 94 | - path: /home/builder/.config 95 | user: 96 | name: builder 97 | group: 98 | name: builder 99 | - path: /home/builder/.config/systemd 100 | user: 101 | name: builder 102 | group: 103 | name: builder 104 | - path: /home/builder/.config/systemd/user 105 | user: 106 | name: builder 107 | group: 108 | name: builder 109 | - path: /home/builder/.config/systemd/user/default.target.wants 110 | user: 111 | name: builder 112 | group: 113 | name: builder 114 | - path: /home/builder/.config/systemd/user/timers.target.wants 115 | user: 116 | name: builder 117 | group: 118 | name: builder 119 | - path: /home/builder/.config/systemd/user/sockets.target.wants 120 | user: 121 | name: builder 122 | group: 123 | name: builder 124 | ---- 125 | -------------------------------------------------------------------------------- /modules/ROOT/pages/manual-rollbacks.adoc: -------------------------------------------------------------------------------- 1 | = Manual Rollbacks 2 | 3 | When an update is complete, the previous OS deployment remains on disk. If an update causes issues, you can use it as a fallback. This is a manual operation that requires human intervention and console access. 4 | 5 | == Temporary rollback 6 | 7 | To temporarily boot the previous OS deployment, hold down `Shift` during the OS boot process. When the bootloader menu appears, select the relevant OS entry in the menu. 8 | 9 | == Permanent rollback 10 | 11 | To permanently revert to the previous OS deployment, log into the target node and run the following commands: 12 | 13 | [source,bash] 14 | ---- 15 | # Stop the service that performs automatic updates 16 | sudo systemctl stop zincati.service 17 | 18 | # Mark the previous OS deployment as the default, and immediately reboots into it 19 | sudo rpm-ostree rollback -r 20 | ---- 21 | 22 | Please note that Zincati will keep looking for updates and upgrade to any new available OS deployment, other than the one you just reverted. 23 | 24 | If you prefer, you can temporarily turn off auto-updates. Later on, you can re-enable them in order to let the machine catch up with the usual flow of updates: 25 | 26 | [source,bash] 27 | ---- 28 | # Disable Zincati in order to opt-out from future auto-updates 29 | sudo systemctl disable --now zincati.service 30 | 31 | [...] 32 | 33 | # At a later point, re-enable it to re-align with the tip of stream 34 | sudo systemctl enable --now zincati.service 35 | ---- 36 | -------------------------------------------------------------------------------- /modules/ROOT/pages/migrate-ah.adoc: -------------------------------------------------------------------------------- 1 | = Migrating from Fedora Atomic Host (FAH) to Fedora CoreOS (FCOS) 2 | 3 | == Overview 4 | 5 | https://www.projectatomic.io/[Fedora Atomic Host] was a system to deploy applications in containers. Existing FAH users are encouraged to migrate to FCOS, as the project has reached its end-of-life. 6 | 7 | FAH used `cloud-init` for provisioning, which required users to provide a `cloud-config` file as userdata for configuration of the instance. Since FCOS Ignition and `cloud-init` are different and have overlapping feature sets, it is not trivial to convert `cloud-init` files to Ignition. Currently, there is no tool for this conversion, so you must manually convert `cloud-init` configs to Butane configs. Refer to link:https://coreos.github.io/butane/specs/[Butane Specification] for an explanation of the available configuration options. 8 | 9 | == Converting `cloud-init` and `cloud-config` userdata 10 | 11 | The following examples show the difference between FAH userdata and user configuration with Butane. 12 | 13 | .Example of FAH userdata file: 14 | ---- 15 | #cloud-config 16 | password: atomic 17 | ssh_pwauth: True 18 | chpasswd: { expire: False } 19 | 20 | ssh_authorized_keys: 21 | - ssh-rsa ... 22 | ---- 23 | 24 | This can be manually translated into a `passwd` node within the Butane config: 25 | 26 | .Example of users: 27 | [source,yaml,subs="attributes"] 28 | ---- 29 | variant: fcos 30 | version: {butane-latest-stable-spec} 31 | passwd: 32 | users: 33 | - name: core 34 | password_hash: "$6$5s2u6/jR$un0AvWnqilcgaNB3Mkxd5yYv6mTlWfOoCYHZmfi3LDKVltj.E8XNKEcwWm..." 35 | ssh_authorized_keys: 36 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGdByTgSVHq......." 37 | groups: [ sudo, docker ] 38 | ---- 39 | 40 | NOTE: Fedora CoreOS disables password login over SSH by default. It is strongly recommended to only use key authentication. Setting passwords can be useful however for logging into the console directly. 41 | 42 | == Converting storage definitions 43 | 44 | With FAH, you could configure additional storage for the system with either `cloud-init` or `docker-storage-setup` via the `/etc/sysconfig/docker-storage-setup` file. With FCOS, you should configure additional storage at provisioning time via Ignition in the `storage` node of the Butane config. 45 | -------------------------------------------------------------------------------- /modules/ROOT/pages/migrate-cl.adoc: -------------------------------------------------------------------------------- 1 | = Migrating from CoreOS Container Linux (CL) to Fedora CoreOS (FCOS) 2 | 3 | Fedora CoreOS is the official successor of CoreOS Container Linux, which https://coreos.com/os/eol/[reached its end of life] on May 26, 2020. This page attempts to document the differences between CL and FCOS to ease the transition to FCOS. 4 | 5 | :toc: 6 | 7 | == Introduction 8 | 9 | To migrate from CL to FCOS, you must convert your old Container Linux Configs, Ignition configs, or `cloud-config` files to a xref:producing-ign.adoc[Butane config] and adapt the contents for FCOS. Since many of the configuration details have changed, you should reference this page and the https://github.com/coreos/fedora-coreos-tracker/issues/159[CL migration issue] on GitHub. 10 | 11 | == Installation changes 12 | 13 | The following changes have been made to the installation process: 14 | 15 | * The `coreos-install` script has been replaced with https://github.com/coreos/coreos-installer[`coreos-installer`]. It offers similar functionality. 16 | * The `coreos.autologin` kernel command-line parameter is not currently supported in FCOS. For access recovery purposes, there are instructions available xref:access-recovery.adoc[here]. 17 | * Certain CL platforms, such as Vagrant, are not yet supported in FCOS. Refer to the https://fedoraproject.org/coreos/download/[Download page] to see the available image types. 18 | 19 | == Software package changes 20 | 21 | * `etcd` is not included in FCOS. Refer to xref:running-containers.adoc#running-etcd[Running etcd] for instructions to run it as a container on FCOS. 22 | * `flannel` is not included in FCOS. 23 | * The Podman container runtime is included in FCOS and is the recommended container runtime. The rkt container runtime is not included. 24 | * FCOS does not have a recommended mechanism to select the version of `docker`. 25 | * Network configuration is now handled by NetworkManager instead of `systemd-networkd`. 26 | * For time synchronization, use https://docs.fedoraproject.org/en-US/fedora/rawhide/system-administrators-guide/servers/Configuring_NTP_Using_the_chrony_Suite/[`chronyd`] rather than `ntpd` or `systemd-timesyncd`. 27 | * Automatic updates are now coordinated by Zincati, as described in the https://coreos.github.io/zincati/usage/auto-updates/[Zincati documentation]. The rollback mechanism (via grub) is now provided by https://coreos.github.io/rpm-ostree/[`rpm-ostree`]. 28 | * The functionality of the reboot manager (`locksmith`) is rolled into https://coreos.github.io/zincati/[Zincati]. 29 | * The `update-ssh-keys` tool is not provided on FCOS. sshd uses a xref:authentication.adoc#ssh-key-locations[helper program] to read key files directly out of `~/.ssh/authorized_keys.d`. 30 | 31 | == Configuration changes 32 | 33 | When writing Butane configs, note the following changes: 34 | 35 | * `coreos-metadata` is now https://coreos.github.io/afterburn/[Afterburn]. The prefix of the metadata variable names has changed from `COREOS_` to `AFTERBURN_`, and the following platform names have changed: 36 | ** `EC2` is now `AWS` 37 | ** `GCE` is now `GCP` 38 | + 39 | For more info, see the https://coreos.github.io/afterburn/usage/attributes/[Afterburn documentation]. 40 | 41 | * By default, FCOS does not allow password logins via SSH. We recommend xref:authentication.adoc#using-an-ssh-key[configuring SSH keys] instead. If needed, you can xref:authentication.adoc#enabling-ssh-password-authentication[enable SSH password authentication]. 42 | * Because `usermod` is not yet fully-functional on FCOS, there is a `docker` group in the `/etc/group` file. This is a stop-gap measure to facilitate a smooth transition to FCOS. The team is working on a more functional `usermod`, at which time the `docker` group will no longer be included by default. See the https://github.com/coreos/fedora-coreos-tracker/issues/2[docker group issue]. 43 | * There is no way to create directories below the `/` directory. Changes are restricted to `/etc` and `/var`. Refer to the documentation for the `storage` node of the Butane config for details about writing directories and files to FCOS. 44 | * Butane configs no longer have a separate section for network configuration. Use the Butane `files` section to write a https://developer.gnome.org/NetworkManager/stable/nm-settings-keyfile.html[NetworkManager key file] instead. 45 | 46 | == Operator notes 47 | 48 | * FCOS provides https://fedoramagazine.org/fedora-coreos-out-of-preview/[best-effort stability], and may occasionally include regressions or breaking changes for some use cases or workloads. 49 | * CL had three release channels: `alpha`, `beta`, and `stable`. The FCOS production https://github.com/coreos/fedora-coreos-tracker/blob/main/Design.md#release-streams[release streams] are `next`, `testing`, and `stable`, with somewhat different semantics. 50 | * In general, SELinux confinement should work the same as in Fedora. 51 | * To deploy an Ignition config as part of a PXE image (a "custom OEM" in CL terminology), follow the https://coreos.com/os/docs/latest/booting-with-pxe.html#adding-a-custom-oem[same process] as in CL, but place the `config.ign` file in the root of the archive. 52 | * In CL, metrics/telemetry data was collected by the update mechanism. In FCOS, nodes are counted (without unique identifiers) via the xref:counting.adoc[Count Me] mechanism. 53 | * Cloud CLI clients are not included in FCOS. There is an initiative to create a "tools" container to run on FCOS. 54 | * When opening an existing file in a sticky directory, the behavior differs from CL. See https://github.com/systemd/systemd/commit/2732587540035227fe59e4b64b60127352611b35[the relevant systemd commit]. 55 | * CL left Simultaneous Multi-Threading (SMT) enabled but advised users to turn it off if their systems were vulnerable to certain issues such as https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html[L1TF] or https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html[MDS]. By default, FCOS https://github.com/coreos/fedora-coreos-tracker/blob/main/Design.md#automatically-disable-smt-when-needed-to-address-vulnerabilities[automatically disables SMT] for vulnerable systems. 56 | * In general, `docker` uses the default configuration from Fedora, which is different under many aspects. Notably the logging driver is set to `journald` and live-restore is enabled. 57 | 58 | == Implementation notes 59 | //* Partition layout differences. CL is at https://coreos.com/os/docs/latest/sdk-disk-partitions.html. I can't make heads or tails of the results of the discussions in https://github.com/coreos/fedora-coreos-tracker/issues/94. 60 | * The default filesystem on CL was `ext4`. On FCOS, the default is `xfs`. 61 | * While CL used systemd socket activation for `sshd`, FCOS starts `sshd` at startup by default. 62 | * CL had an "OEM partition" at `/usr/share/oem` with a user-customizable GRUB config and some additional tools, but FCOS does not have this. 63 | //* Filesystem resizing differences. Need more info on FCOS side. 64 | -------------------------------------------------------------------------------- /modules/ROOT/pages/os-extensions.adoc: -------------------------------------------------------------------------------- 1 | = Adding OS extensions to the host system 2 | 3 | Fedora CoreOS keeps the base image as simple and small as possible for security and maintainability reasons. That is why you should in general prefer the usage of `podman` containers over layering software. However, in some cases it is necessary to add software to the base OS itself. For example, drivers or VPN software are potential candidates because they are harder to containerize and may be brought in as extensions to the OS. 4 | 5 | NOTE: If you're making nontrivial changes to the base operating system, you may instead consider using https://docs.fedoraproject.org/en-US/bootc/[Fedora Bootc], which is oriented around custom OS builds derived from a starting base image. There is more information on the relationship between Fedora CoreOS and Fedora Bootc in xref:faq.adoc#_how_does_fedora_coreos_relate_to_fedora_bootc[our FAQ]. 6 | 7 | To add in additional software to a Fedora CoreOS system, you can use https://coreos.github.io/rpm-ostree/[`rpm-ostree install`]. Consider these packages as "extensions": they extend the functionality of the base OS rather than e.g. providing runtimes for user applications. That said, there are no restrictions on which packages one can actually install. By default, packages are downloaded from the https://docs.fedoraproject.org/en-US/quick-docs/repositories/[Fedora repositories]. 8 | 9 | To start the layering of a package, you need to write a systemd unit that executes the `rpm-ostree` command to install the wanted package(s). 10 | Changes are applied to a new deployment and a reboot is necessary for those to take effect. 11 | 12 | == Example: Layering vim and setting it as the default editor 13 | 14 | Fedora CoreOS includes both `nano` and `vi` as text editors, with the former set as default (see the corresponding https://fedoraproject.org/wiki/Changes/UseNanoByDefault[Fedora change]). 15 | 16 | This example shows how to install the fully fledged `vim` text editor and how to set it up as default for all users by setting up the required configuration in `/etc/profile.d/`. 17 | 18 | NOTE: In the future, we will have a more Ignition-friendly method of doing this with stronger guarantees. See upstream issues https://github.com/coreos/butane/issues/81[butane#81] and https://github.com/coreos/fedora-coreos-tracker/issues/681[fedora-coreos-tracker#681] for more information. 19 | 20 | [source,yaml,subs="attributes"] 21 | ---- 22 | variant: fcos 23 | version: {butane-latest-stable-spec} 24 | systemd: 25 | units: 26 | # Installing vim as a layered package with rpm-ostree 27 | - name: rpm-ostree-install-vim.service 28 | enabled: true 29 | contents: | 30 | [Unit] 31 | Description=Layer vim with rpm-ostree 32 | Wants=network-online.target 33 | After=network-online.target 34 | # We run before `zincati.service` to avoid conflicting rpm-ostree 35 | # transactions. 36 | Before=zincati.service 37 | ConditionPathExists=!/var/lib/%N.stamp 38 | 39 | [Service] 40 | Type=oneshot 41 | RemainAfterExit=yes 42 | # `--allow-inactive` ensures that rpm-ostree does not return an error 43 | # if the package is already installed. This is useful if the package is 44 | # added to the root image in a future Fedora CoreOS release as it will 45 | # prevent the service from failing. 46 | ExecStart=/usr/bin/rpm-ostree install -y --allow-inactive vim 47 | ExecStart=/bin/touch /var/lib/%N.stamp 48 | ExecStart=/bin/systemctl --no-block reboot 49 | 50 | [Install] 51 | WantedBy=multi-user.target 52 | storage: 53 | files: 54 | # Set vim as default editor 55 | # We use `zz-` as prefix to make sure this is processed last in order to 56 | # override any previously set defaults. 57 | - path: /etc/profile.d/zz-default-editor.sh 58 | overwrite: true 59 | contents: 60 | inline: | 61 | export EDITOR=vim 62 | ---- 63 | -------------------------------------------------------------------------------- /modules/ROOT/pages/platforms.adoc: -------------------------------------------------------------------------------- 1 | = Supported Platforms 2 | 3 | Fedora CoreOS is provisioned via prebuilt disk images, and configured on first-boot via https://github.com/coreos/ignition[Ignition]. Each platform may require specific logic and components, thus dedicated images are provided for each supported environment. Additionally, a unique platform ID is available in the host environment for runtime introspection. 4 | 5 | == Supported platforms 6 | 7 | The currently supported platforms and their identifiers are listed below. 8 | 9 | === x86_64 10 | 11 | * Aliyun/Alibaba Cloud (`aliyun`): Cloud platform. See xref:provisioning-aliyun.adoc[Booting on Alibaba Cloud]. 12 | * Amazon Web Services (`aws`): Cloud platform. See xref:provisioning-aws.adoc[Booting on AWS]. 13 | * Microsoft Azure (`azure`): Cloud platform. See xref:provisioning-azure.adoc[Booting on Azure]. 14 | * Microsoft Azure Stack (`azurestack`): Cloud platform. 15 | * DigitalOcean (`digitalocean`): Cloud platform. See xref:provisioning-digitalocean.adoc[Booting on DigitalOcean]. 16 | * Exoscale (`exoscale`): Cloud platform. See xref:provisioning-exoscale.adoc[Booting on Exoscale]. 17 | * Google Cloud Platform (`gcp`): Cloud platform. See xref:provisioning-gcp.adoc[Booting on GCP]. 18 | * IBM Cloud (`ibmcloud`): Cloud platform. See xref:provisioning-ibmcloud.adoc[Booting on IBM Cloud]. 19 | * KubeVirt (`kubevirt`): Cloud platform. See xref:provisioning-kubevirt.adoc[Booting on KubeVirt]. 20 | * Bare metal (`metal`): With BIOS, UEFI or network boot, with standard or 4k Native disks. See xref:bare-metal.adoc[Installing on Bare Metal] or xref:live-booting-ipxe.adoc[Live-booting via iPXE]. 21 | * Nutanix (`nutanix`): Hypervisor. 22 | * OpenStack (`openstack`): Cloud platform. See xref:provisioning-openstack.adoc[Booting on OpenStack]. 23 | * QEMU (`qemu`): Hypervisor. See xref:provisioning-libvirt.adoc[Booting on libvirt] 24 | * VirtualBox (`virtualbox`): Hypervisor. See xref:provisioning-virtualbox.adoc[Booting on VirtualBox]. 25 | * VMware ESXi, Fusion, and Workstation (`vmware`): Hypervisor. See xref:provisioning-vmware.adoc[Booting on VMware]. Fedora CoreOS images currently use https://kb.vmware.com/s/article/1003746[hardware version] 17, supporting VMware ESXi 7.0 or later, Fusion 12.0 or later, and Workstation 16.0 or later. 26 | * Vultr (`vultr`): Cloud platform. See xref:provisioning-vultr.adoc[Booting on Vultr]. 27 | 28 | === AArch64 29 | 30 | * Amazon Web Services (`aws`): Cloud platform. See xref:provisioning-aws.adoc[Booting on AWS]. 31 | * Bare metal (`metal`): With UEFI or network boot, with standard or 4k Native disks. See xref:bare-metal.adoc[Installing on Bare Metal] or xref:live-booting-ipxe.adoc[Live-booting via iPXE] and xref:provisioning-raspberry-pi4.adoc[Booting on the Raspberry Pi 4]. 32 | * QEMU (`qemu`): Hypervisor. See xref:provisioning-libvirt.adoc[Booting on libvirt] 33 | * OpenStack (cloud platform): `openstack`): Cloud platform. See xref:provisioning-openstack.adoc[Booting on OpenStack]. 34 | 35 | === s390x 36 | 37 | * IBM Cloud (`ibmcloud`): Cloud platform. See xref:provisioning-ibmcloud.adoc[Booting on IBM Cloud]. 38 | * Bare metal (`metal`): From disk or network boot. See xref:bare-metal.adoc[Installing on Bare Metal] or xref:live-booting-ipxe.adoc[Live-booting via iPXE]. 39 | * QEMU (`qemu`): Hypervisor. See xref:provisioning-libvirt.adoc[Booting on libvirt] 40 | * OpenStack (cloud platform): `openstack`): Cloud platform. See xref:provisioning-openstack.adoc[Booting on OpenStack]. 41 | 42 | == Runtime introspection of platform IDs 43 | 44 | Each Fedora CoreOS image boots with a platform-specific identifier, available on the kernel command-line. The name of the parameter is `ignition.platform.id`. The platform ID is consumed by OS components such as https://github.com/coreos/ignition[Ignition] and https://github.com/coreos/afterburn[Afterburn]. Additionally, it can be used in systemd units via https://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConditionKernelCommandLine=[`ConditionKernelCommandLine=`]. 45 | 46 | See https://coreos.github.io/ignition/supported-platforms/[Ignition's Supported Platforms] and https://coreos.github.io/afterburn/platforms/[Afterburn's Supported Platforms] documentation pages for more details about which features are supported for each platform. Note that some platforms are currently supported by Ignition and Afterburn but are not yet supported by Fedora CoreOS. 47 | 48 | The Platform ID can be introspected at runtime, as follows: 49 | 50 | .CLI example of platform introspection 51 | [source, bash] 52 | ---- 53 | $ grep -o ignition.platform.id='[[:alnum:]]*' /proc/cmdline 54 | 55 | ignition.platform.id=aws 56 | ---- 57 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-aliyun.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Alibaba Cloud (Aliyun) 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Alibaba Cloud. Fedora currently does not publish Fedora CoreOS images within Alibaba Cloud, so you must download an Alibaba Cloud image from Fedora and upload it to one of your Object Storage Service (OSS) buckets. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 12 | 13 | You also need to have access to an Alibaba Cloud account and https://www.alibabacloud.com/help/doc-detail/31884.htm?spm=a2c63.p38356.879954.10.3d1264baRYHfmB#task-njz-hf4-tdb[activated Object Storage Service (OSS)]. 14 | The examples below use the https://www.alibabacloud.com/help/product/29991.htm[Alibaba Cloud CLI] and https://stedolan.github.io/jq/[jq] as a command-line JSON processor. 15 | 16 | == Downloading an Alibaba Cloud image 17 | 18 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 19 | Once you have picked the relevant stream, download, verify, and decompress the latest Alibaba Cloud image: 20 | 21 | [source, bash] 22 | ---- 23 | STREAM="stable" 24 | coreos-installer download --decompress -s $STREAM -p aliyun -f qcow2.xz 25 | ---- 26 | 27 | Alternatively, you can manually download an Alibaba Cloud image from the https://fedoraproject.org/coreos/download/?stream=stable#cloud_images[download page]. Verify the download, following the instructions on that page, and decompress it. 28 | 29 | == Uploading the image to Alibaba Cloud 30 | 31 | . Create any bucket that doesn't already exist in your Alibaba Cloud account with a globally unique name or reuse an existing bucket: 32 | + 33 | .Example creating Alibaba Cloud OSS (Object Storage Service) bucket 34 | [source, bash] 35 | ---- 36 | REGION="ap-southeast-1" 37 | BUCKET_NAME="my-bucket" 38 | BUCKET_URL="oss://${BUCKET_NAME}" 39 | aliyun oss mb "${BUCKET_URL}" --region="${REGION}" --acl=private 40 | ---- 41 | 42 | . Upload an FCOS image: 43 | + 44 | .Example uploading FCOS to an Alibaba Cloud OSS bucket 45 | [source, bash] 46 | ---- 47 | DOWNLOADED_IMAGE="./image.qcow2" 48 | IMAGE_NAME="my-fcos-image" 49 | IMAGE_BLOB="${IMAGE_NAME}.qcow2" 50 | aliyun oss cp "${DOWNLOADED_IMAGE}" "${BUCKET_URL}/${IMAGE_BLOB}" \ 51 | --region="${REGION}" --acl=private 52 | ---- 53 | 54 | . Import uploaded FCOS image: 55 | + 56 | .Example importing FCOS to Alibaba Cloud ECS 57 | [source, bash] 58 | ---- 59 | TASK_ID=$(aliyun ecs ImportImage \ 60 | --region="${REGION}" \ 61 | --DiskDeviceMapping.1.OSSBucket="${BUCKET_NAME}" \ 62 | --DiskDeviceMapping.1.OSSObject="${IMAGE_BLOB}" \ 63 | --ImageName="${IMAGE_NAME}" \ 64 | | jq --raw-output .TaskId) 65 | ---- 66 | 67 | . Wait until the image was successfully imported 68 | + 69 | .Example waiting with a timeout equal to one hour 70 | [source, bash] 71 | ---- 72 | aliyun ecs DescribeTasks --region="${REGION}" --TaskIds="${TASK_ID}" \ 73 | --waiter expr='TaskSet.Task[0].TaskStatus' to=Finished timeout=3600 74 | ---- 75 | 76 | . Determine id of imported FCOS image: 77 | + 78 | .Example determining id of the imported FCOS image 79 | [source, bash] 80 | ---- 81 | IMAGE_ID=$(aliyun ecs DescribeImages --region="${REGION}" --ImageName="${IMAGE_NAME}" \ 82 | | jq --raw-output .Images.Image[0].ImageId) 83 | ---- 84 | 85 | . Delete uploaded blob 86 | + 87 | .Example deleting uploaded blob 88 | [source, bash] 89 | ---- 90 | aliyun oss rm "${BUCKET_URL}/${IMAGE_BLOB}" --region "${REGION}" 91 | ---- 92 | 93 | == Creating a VSwitch 94 | 95 | There exists no default VPCs or VSwitches in Alibaba Cloud. Hence, for creating any instances a VSwitch must exist. Pick some existing or create one with the following steps. 96 | 97 | . Create a new VPC: 98 | + 99 | .Example creating a new VPC 100 | [source, bash] 101 | ---- 102 | VPC_CIDR="172.16.0.0/12" 103 | VPC_NAME="fcos-test" 104 | VPC_ID=$(aliyun vpc CreateVpc --region="${REGION}" \ 105 | --CidrBlock="${VPC_CIDR}" --VpcName="${VPC_NAME}" \ 106 | | jq --raw-output .VpcId) 107 | ---- 108 | 109 | . Pick some availability zone for creating a VSwitch: 110 | + 111 | .Example pick some availability zone 112 | [source,bash] 113 | ---- 114 | ZONE_ID=$(aliyun ecs DescribeZones --region="${REGION}" \ 115 | | jq --raw-output .Zones.Zone[0].ZoneId) 116 | ---- 117 | 118 | . Create a new VSwitch: 119 | + 120 | .Example creating a new VSwitch 121 | [source, bash] 122 | ---- 123 | VSWITCH_CIDR="172.16.0.0/16" 124 | VSWITCH_NAME="${VPC_NAME}" 125 | VSWITCH_ID=$(aliyun vpc CreateVSwitch \ 126 | --region="${REGION}" \ 127 | --CidrBlock="${VSWITCH_CIDR}" \ 128 | --VpcId="${VPC_ID}" \ 129 | --VSwitchName="${VSWITCH_NAME}" \ 130 | --ZoneId="${ZONE_ID}" \ 131 | | jq --raw-output .VSwitchId) 132 | ---- 133 | 134 | == Launching an ECS instance 135 | 136 | . Upload an SSH public key to Alibaba Cloud ECS 137 | + 138 | .Example uploading an SSH public key 139 | [source, bash] 140 | ---- 141 | KEY_PAIR_NAME="fcos-key" 142 | PUBLIC_KEY_PATH="" 143 | PUBLIC_KEY_BODY=$(cat "${PUBLIC_KEY_PATH}") 144 | aliyun ecs ImportKeyPair --region="${REGION}" \ 145 | --KeyPairName="${KEY_PAIR_NAME}" --PublicKeyBody="${PUBLIC_KEY_BODY}" 146 | ---- 147 | 148 | . Creating an ECS instance 149 | + 150 | .Example creating ECS instance 151 | [source, bash] 152 | ---- 153 | INSTANCE_NAME="my-fcos-vm" 154 | INSTANCE_TYPE="ecs.t6-c1m1.large" 155 | INSTANCE_ID=$(aliyun ecs CreateInstance \ 156 | --region="${REGION}" \ 157 | --KeyPairName="${KEY_PAIR_NAME}" \ 158 | --ImageId="${IMAGE_ID}" \ 159 | --InstanceName="${INSTANCE_NAME}" \ 160 | --InstanceType="${INSTANCE_TYPE}" \ 161 | --InternetChargeType=PayByTraffic \ 162 | --InternetMaxBandwidthIn=5 \ 163 | --InternetMaxBandwidthOut=5 \ 164 | --VSwitchId="${VSWITCH_ID}" \ 165 | | jq --raw-output .InstanceId) 166 | ---- 167 | 168 | . Allocate a public IPv4 address for the previously created instance 169 | + 170 | .Example allocating a public IP address 171 | [source, bash] 172 | ---- 173 | PUBLIC_IP=$(aliyun ecs AllocatePublicIpAddress \ 174 | --region="${REGION}" --InstanceId="${INSTANCE_ID}" \ 175 | | jq --raw-output .IpAddress) 176 | ---- 177 | 178 | . Start the instance 179 | + 180 | .Example starting an instance 181 | [source, bash] 182 | ---- 183 | aliyun ecs StartInstance --region="${REGION}" --InstanceId="${INSTANCE_ID}" 184 | ---- 185 | 186 | . Wait until the instance is running 187 | + 188 | .Example waiting and determining the public IP address 189 | [source, bash] 190 | ---- 191 | aliyun ecs DescribeInstanceStatus --InstanceId.1="$INSTANCE_ID" --region="${REGION}" \ 192 | --waiter expr='InstanceStatuses.InstanceStatus[0].Status' to=Running timeout=600 193 | ---- 194 | 195 | . Connect to the new instance via SSH 196 | + 197 | .Example connecting 198 | [source, bash] 199 | ---- 200 | ssh core@"${PUBLIC_IP}" 201 | ---- 202 | 203 | You can start a customized instance with your Ignition file by adding the parameter `--UserData=$(cat | base64 -w0)` to the `aliyun ecs CreateInstance` command that creates a new instance. 204 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-applehv.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on AppleHV 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances on macOS using https://github.com/crc-org/vfkit[vfkit]. 4 | 5 | 6 | == Prerequisites 7 | 8 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 9 | 10 | You will also need to build or acquire https://github.com/crc-org/vfkit[vfkit]. Prebuilt binaries are also available from its https://github.com/crc-org/vfkit/releases/tag/v0.6.0[releases] page. Vfkit, like QEMU, has many options which are outside the scope of this provisioning example. Please consider reading their https://github.com/crc-org/vfkit/tree/main/doc[documentation]. 11 | 12 | == Booting a new VM on macOS 13 | 14 | This section shows how to boot a new VM with vfkit. Vfkit is known to work with both Intel and Apple Silicon based Macs. 15 | 16 | === Fetching the AppleHV image 17 | 18 | Fetch the latest image suitable for your target stream (or https://fedoraproject.org/coreos/download/[download and verify] it from the web). Remember to download the appropriate image based on the architecture of your Mac. Once downloaded, you will also need to decompress the image. 19 | 20 | === Setting up a new VM 21 | 22 | Vfkit is not a stateful virtual machine framework. You simply need to run the vfkit binary to start a virtual machine. The following command line will launch a VM with: 23 | 24 | * 2 virtual CPUs 25 | * 2 GB of memory 26 | * a network device that will receive a IP address from Vfit 27 | * a GUI console with keyboard and mouse support 28 | 29 | .Launching FCOS with Vfkit 30 | [source, bash] 31 | ---- 32 | IGNITION_CONFIG="/path/to/example.ign" 33 | IMAGE="/path/to/image.qcow2" 34 | 35 | ./vfkit --cpus 2 --memory 2048 \ 36 | --bootloader efi,variable-store=efi-variable-store,create \ 37 | --device virtio-blk,path=${IMAGE} \ 38 | --device virtio-net,nat \ 39 | --ignition ${IGNITION_CONFIG} \ 40 | --device virtio-input,keyboard \ 41 | --device virtio-input,pointing \ 42 | --device virtio-gpu,width=800,height=600 \ 43 | --gui 44 | ---- 45 | 46 | Note: The AppleHV hypervisor does not allow you to see early boot and kernel messages. While you will see a GRUB boot menu, you will not see anything until later in the boot. 47 | 48 | === Exploring the OS 49 | 50 | image::vfkit.png[Vfkit GUI] 51 | 52 | When FCOS is completed booting, you will see the IP address of the VM displayed in the GUI window. Vfkit will lease an address in the `192.168.64.0/24` network. At this point, you can either choose to login or SSH to the VM. Unlike some other virtualization providers, you can SSH to the virtual machine from the host. 53 | 54 | [source, bash] 55 | ---- 56 | ssh core@192.168.64.5 57 | ---- 58 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-azure.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Azure 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Azure. Fedora currently does not publish Fedora CoreOS images within Azure, so you must download an Azure image from Fedora and upload it to your Azure subscription. 4 | 5 | NOTE: FCOS does not support legacy https://learn.microsoft.com/en-us/azure/virtual-machines/classic-vm-deprecation[Azure Service Manager] virtual machines. 6 | 7 | == Prerequisites 8 | 9 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 10 | 11 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 12 | 13 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 14 | 15 | You also need to have access to an Azure subscription. The examples below use the https://docs.microsoft.com/en-us/cli/azure/?view=azure-cli-latest[Azure CLI]. 16 | 17 | == Downloading an Azure image 18 | 19 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 20 | Once you have picked the relevant stream, download, verify, and decompress the latest Azure image: 21 | 22 | [source, bash] 23 | ---- 24 | STREAM="stable" 25 | coreos-installer download --decompress -s $STREAM -p azure -f vhd.xz 26 | ---- 27 | 28 | Alternatively, you can manually download an Azure image from the https://fedoraproject.org/coreos/download/?stream=stable#cloud_images[download page]. Verify the download, following the instructions on that page, and decompress it. 29 | 30 | == Uploading the image to Azure 31 | 32 | . Create any resources that don't already exist in your Azure account: 33 | + 34 | .Example creating Azure resources 35 | [source, bash] 36 | ---- 37 | az_region="westus2" 38 | az_resource_group="my-group" 39 | az_storage_account="mystorageacct" 40 | az_container="my-container" 41 | # Create resource group 42 | az group create -l "${az_region}" -n "${az_resource_group}" 43 | # Create storage account for uploading FCOS image 44 | az storage account create -g "${az_resource_group}" -n "${az_storage_account}" 45 | # Retrieve connection string for storage account 46 | cs=$(az storage account show-connection-string -n "${az_storage_account}" -g "${az_resource_group}" | jq -r .connectionString) 47 | # Create storage container for uploading FCOS image 48 | az storage container create --connection-string "${cs}" -n "${az_container}" 49 | ---- 50 | 51 | . Create an FCOS image: 52 | + 53 | .Example creating Azure image 54 | [source, bash] 55 | ---- 56 | downloaded_image_file="./image.vhd" 57 | az_image_name="my-fcos-image" 58 | az_image_blob="${az_image_name}.vhd" 59 | # Upload image blob 60 | az storage blob upload --connection-string "${cs}" -c "${az_container}" -f "${downloaded_image_file}" -n "${az_image_blob}" 61 | # Create the image 62 | az image create -n "${az_image_name}" -g "${az_resource_group}" --source "https://${az_storage_account}.blob.core.windows.net/${az_container}/${az_image_blob}" --location "${az_region}" --os-type Linux 63 | # Delete the uploaded blob 64 | az storage blob delete --connection-string "$cs" -c "${az_container}" -n "${az_image_blob}" 65 | ---- 66 | 67 | == Launching a VM instance using custom-data 68 | 69 | . Launch a VM. Your Ignition configuration can be passed to the VM as custom data, or you can skip passing custom data if you just want SSH access. Your SSH public key from `~/.ssh` will automatically be added to the VM. This provides an easy way to test out FCOS without first creating an Ignition config. 70 | + 71 | .Example launching Azure image 72 | [source, bash] 73 | ---- 74 | az_vm_name="my-fcos-vm" 75 | ignition_path="./config.ign" 76 | az vm create -n "${az_vm_name}" -g "${az_resource_group}" --image "${az_image_name}" --admin-username core --custom-data "$(cat ${ignition_path})" 77 | ---- 78 | 79 | . You now should be able to SSH into the instance using the associated IP address. 80 | + 81 | .Example connecting 82 | [source, bash] 83 | ---- 84 | ssh core@ 85 | ---- 86 | 87 | == Launching a VM instance using custom-data and a private Azure blob 88 | 89 | . Define your variables. 90 | 91 | [source, bash] 92 | ---- 93 | az_vm_name=my-fcos-vm 94 | ignition_path="./config.ign" 95 | az_blob_ignition_path=./privateConfig.ign 96 | az_blob_ignition_file_name=privateConfig.ign 97 | ---- 98 | 99 | . Upload your ign file to Azure blob storage. 100 | 101 | [source, bash] 102 | ---- 103 | az storage blob upload --connection-string "${cs}" -c "${az_blob_ignition_file_name}" -f "${az_blob_ignition_path}" -n "${ignition_file_name}" 104 | ---- 105 | 106 | . Create your remote ignition config to reference this new blob. Read about that here xref:remote-ign.adoc[Using a remote Ignition config] 107 | . Note: The source field should have a value similar to "https://${az_storage_account}.blob.core.windows.net/${az_image_blob}/${az_blob_ignition_file_name} 108 | 109 | . Create an identity and give it proper access to your storage account. 110 | 111 | [source, bash] 112 | ---- 113 | az identity create --name "${az_vm_name}-identity" --resource-group "${az_resource_group}" 114 | identity_principal_id=$(az identity show --name "${az_vm_name}-identity" --resource-group "${az_resource_group}" --query principalId -o tsv) 115 | identity_id=$(az identity show --name "${az_vm_name}-identity" --resource-group "${az_resource_group}" --query id -o tsv) 116 | az role assignment create --assignee "${identity_principal_id}" --role "Storage Blob Data Contributor" --scope /subscriptions/${subscription_id}/resourceGroups/${az_resource_group}/providers/Microsoft.Storage/storageAccounts/${az_storage_account} 117 | ---- 118 | 119 | . Create the VM passing the new identity. 120 | 121 | [source, bash] 122 | ---- 123 | az vm create -n "${az_vm_name}" -g "${az_resource_group}" --image "${az_image_name}" --admin-username core --custom-data "$(cat ${ignition_path})" --assign-identity "${identity_id}" 124 | ---- -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-digitalocean.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on DigitalOcean 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on DigitalOcean. Fedora CoreOS images are currently not published directly on DigitalOcean, so you must download a Fedora CoreOS DigitalOcean image and upload it to your DigitalOcean account as a https://www.digitalocean.com/docs/images/custom-images/[custom image]. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 12 | 13 | You also need to have access to a DigitalOcean account. The examples below use the https://github.com/digitalocean/doctl[doctl] command-line tool and https://stedolan.github.io/jq/[jq] as a command-line JSON processor. 14 | 15 | == Creating a DigitalOcean custom image 16 | 17 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 18 | 19 | . Once you have picked the relevant stream, find the corresponding DigitalOcean image on the https://fedoraproject.org/coreos/download/?stream=stable#cloud_images[download page] and copy the URL of the Download link. 20 | 21 | . Create the custom image: 22 | + 23 | .Example uploading FCOS to a DigitalOcean custom image 24 | [source, bash] 25 | ---- 26 | doctl compute image create my-fcos-image --region sfo2 --image-url 27 | # Wait for image creation to finish 28 | while ! doctl compute image list-user --output json | jq -c '.[] | select(.name=="my-fcos-image")' | grep available; do sleep 5; done 29 | ---- 30 | 31 | The above command uploads the image and waits until it is ready to be used. This process can take a long time, in our testing we have seen it take up to 15 minutes. Wait time is dependent on upload speeds and platform load. 32 | 33 | === Launching a droplet 34 | 35 | . If you don't already have an SSH key uploaded to DigitalOcean, upload one: 36 | + 37 | .Example uploading an SSH key to DigitalOcean 38 | [source, bash] 39 | ---- 40 | doctl compute ssh-key create my-key --public-key "$(cat ~/.ssh/id_rsa.pub)" 41 | ---- 42 | 43 | . Launch a droplet. Your Ignition configuration can be passed to the VM as its https://docs.digitalocean.com/products/droplets/how-to/provide-user-data/#about-user-data[user data], or you can skip passing user data if you just want SSH access. This provides an easy way to test out FCOS without first creating an Ignition config. 44 | + 45 | When creating a FCOS DigitalOcean droplet, you must specify an SSH key for the droplet, even if you plan to inject SSH keys via Ignition. 46 | + 47 | .Example launching FCOS on DigitalOcean using an Ignition configuration file 48 | [source, bash] 49 | ---- 50 | image_id=$(doctl compute image list-user | grep my-fcos-image | cut -f1 -d ' ') 51 | key_id=$(doctl compute ssh-key list | grep my-key | cut -f1 -d ' ') 52 | doctl compute droplet create my-fcos-droplet --image "${image_id}" --region sfo2 --size s-2vcpu-2gb --user-data-file --ssh-keys "${key_id}" --wait 53 | ---- 54 | + 55 | NOTE: While the DigitalOcean documentation mentions `cloud-init` and scripts, FCOS does not support cloud-init or the ability to run scripts from user-data. It accepts only Ignition configuration files. 56 | 57 | . You now should be able to SSH into the instance using the associated IP address. 58 | + 59 | .Example connecting 60 | [source, bash] 61 | ---- 62 | ssh core@ 63 | ---- 64 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-exoscale.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Exoscale 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances on https://exoscale.com[Exoscale] Cloud Hosting. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, it is recommended to have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support].. 12 | 13 | You also need to have access to an Exoscale account. https://portal.exoscale.com/register[Register] if you don't have one. 14 | 15 | == Uploading an FCOS image as a custom Template 16 | 17 | NOTE: Exoscale offers official FCOS templates, but they are currently out of date. For now, we recommend creating your own template. Track progress on fixing this in https://github.com/coreos/fedora-coreos-tracker/issues/1166[#1166]. 18 | 19 | Exoscale provides https://community.exoscale.com/documentation/compute/custom-templates[Custom Templates] to be able to upload any cloud image. To create a Custom Template you first need to download and decompress the image. 20 | 21 | .Download and decompress the QCOW2 image with https://github.com/coreos/coreos-installer[coreos-installer] 22 | [source, bash] 23 | ---- 24 | STREAM="stable" 25 | coreos-installer download -d -s $STREAM -p exoscale -f qcow2.xz 26 | ---- 27 | 28 | Alternatively, QCOW2 images can be downloaded from the https://fedoraproject.org/coreos/download/?stream=stable#cloud_images[download page] and manually decompressed. 29 | 30 | Next you can https://community.exoscale.com/documentation/compute/custom-templates/#register-a-custom-template[Register a Custom Template]. This can be done from the https://portal.exoscale.com/compute/templates/add[Web Portal] or the https://community.exoscale.com/documentation/tools/exoscale-command-line-interface/[Exoscale CLI]. Either option requires the uncompressed image to be uploaded somewhere public and for the URL and an MD5 checksum to be provided during registration. One option is to use the object storage provided by Exoscale to host the image. 31 | 32 | .Upload to Object Storage and create Custom Template 33 | [source, bash] 34 | ---- 35 | # Set the version and calcuate the checksum 36 | FCOS_VERSION='...' 37 | FILE="fedora-coreos-${FCOS_VERSION}-exoscale.x86_64.qcow2" 38 | CHECKSUM=$(md5sum "${FILE}" | cut -d " " -f 1) 39 | 40 | # Upload to object storage 41 | BUCKET='newbucket' 42 | exo storage mb "sos://${BUCKET}" 43 | exo storage upload --acl public-read "${FILE}" "sos://${BUCKET}/image-import/" 44 | 45 | # Create the template using given URL and CHECKSUM 46 | URL=$(exo storage show "sos://${BUCKET}/image-import/${FILE}" --output-template "{{.URL}}") 47 | TEMPLATE="fedora-coreos-${FCOS_VERSION}" 48 | exo compute instance-template register --boot-mode=uefi $TEMPLATE $URL $CHECKSUM 49 | ---- 50 | 51 | You can then view the template using `exo compute instance-template show --visibility=private $TEMPLATE`. 52 | 53 | == Launching a VM instance 54 | 55 | You can provision a FCOS instance using the Exoscale https://portal.exoscale.com/compute/instances/add[Web Portal] or via the https://community.exoscale.com/documentation/tools/exoscale-command-line-interface/[Exoscale CLI]. 56 | 57 | NOTE: You will need to use at least version https://github.com/exoscale/cli/releases/tag/v1.54.0[v1.54.0] of the Exoscale CLI. 58 | 59 | WARNING: Do not use the `--cloud-init-compress` argument to the CLI. It causes the Ignition config to be passed compressed to the instance and https://github.com/coreos/fedora-coreos-tracker/issues/1160[Ignition doesn't tolerate that]. 60 | 61 | .Add your ssh-key 62 | [source, bash] 63 | ---- 64 | exo compute ssh-key register key-name /path/to/key 65 | ---- 66 | 67 | .Launching a new instance with Exoscale CLI 68 | [source, bash] 69 | ---- 70 | NAME='worker' 71 | TYPE='standard.medium' 72 | DISK='10' # in GiB 73 | SSHKEY='key-name' 74 | TEMPLATE=$TEMPLATE # template name set above 75 | exo compute instance create "${NAME}" \ 76 | --disk-size $DISK \ 77 | --ssh-key "${SSHKEY}" \ 78 | --template $TEMPLATE \ 79 | --template-visibility private \ 80 | --cloud-init "path/to/ignition-file.ign" 81 | ---- 82 | 83 | NOTE: If just SSH access is desired and no further customization is required, you don't need to pass any Ignition file and can omit the `--cloud-init` argument. 84 | 85 | TIP: You can find out the instance's assigned IP by running `exo compute instance show "${NAME}"` 86 | 87 | You now should be able to SSH into the instance using the associated IP address. 88 | 89 | .Example connecting 90 | [source, bash] 91 | ---- 92 | ssh core@ 93 | ---- 94 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-gcp.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Google Cloud Platform 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances on Google Cloud Platform (GCP). 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 12 | 13 | You also need to have access to a GCP account. The examples below use the https://cloud.google.com/sdk/gcloud[gcloud] command-line tool, which must be separately installed and configured beforehand. 14 | 15 | == Selecting an image family 16 | 17 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 18 | 19 | FCOS images are published under the `fedora-coreos-cloud` project and further organized into image families, tracking the corresponding stream: 20 | 21 | * `fedora-coreos-stable` 22 | * `fedora-coreos-testing` 23 | * `fedora-coreos-next` 24 | 25 | Before proceeding, check the details of each xref:update-streams.adoc[update stream] and pick the one most suited for your use case. 26 | 27 | You can inspect the current state of an image family as follows: 28 | 29 | .Inspecting an image family 30 | [source, bash] 31 | ---- 32 | STREAM='stable' 33 | gcloud compute images describe-from-family \ 34 | --project "fedora-coreos-cloud" "fedora-coreos-${STREAM}" 35 | ---- 36 | 37 | == Launching a VM instance 38 | 39 | New GCP instances can be directly created and booted from public FCOS images. 40 | 41 | If you just want SSH access and no further customization, you don't need to pass any custom instance metadata. Depending on your GCP project configuration, relevant SSH public keys will be automatically added to the VM. This provides an easy way to test out FCOS without first creating an Ignition config. 42 | 43 | NOTE: Currently, we don't support logging in using SSH through the GCP web console, using the `gcloud compute ssh` CLI method or OS Login. See https://github.com/coreos/fedora-coreos-tracker/issues/648[fedora-coreos-tracker#648] for more information. 44 | 45 | .Launching a new instance 46 | [source, bash] 47 | ---- 48 | STREAM='stable' 49 | NAME='fcos-node01' 50 | ZONE='us-central1-a' 51 | gcloud compute instances create \ 52 | --image-project "fedora-coreos-cloud" \ 53 | --image-family "fedora-coreos-${STREAM}" \ 54 | --zone "${ZONE}" "${NAME}" 55 | ---- 56 | 57 | TIP: You can find out the instance's assigned IP by running `gcloud compute instances list` 58 | 59 | You now should be able to SSH into the instance using the associated IP address. 60 | 61 | .Example connecting 62 | [source, bash] 63 | ---- 64 | ssh core@ 65 | ---- 66 | 67 | 68 | In order to launch a customized FCOS instance, a valid Ignition configuration must be passed as metadata under the 69 | `user-data` key at creation time. In the web console, this is available under the Management section. 70 | From the command-line, use `--metadata-from-file`: 71 | 72 | .Launching and customizing a new instance 73 | [source, bash] 74 | ---- 75 | STREAM='stable' 76 | NAME='fcos-node01' 77 | ZONE='us-central1-a' 78 | CONFIG='example.ign' 79 | gcloud compute instances create \ 80 | --image-project "fedora-coreos-cloud" \ 81 | --image-family "fedora-coreos-${STREAM}" \ 82 | --metadata-from-file "user-data=${CONFIG}" \ 83 | --zone "${ZONE} "${NAME}" 84 | ---- 85 | 86 | NOTE: By design, https://cloud.google.com/compute/docs/startupscript[startup scripts] are not supported on FCOS. Instead, it is recommended to encode any startup logic as systemd service units in the Ignition configuration. 87 | Again, note you need to use the `user-data` key for Ignition; it will also not work to paste Ignition into this field in the web console. 88 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-hetzner.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Hetzner 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Hetzner. 4 | Fedora CoreOS is currently not available as an option in the operating system selection on Hetzner. 5 | Thus you must first download the Fedora CoreOS disk image for Hetzner, then create a snapshot from it in your Hetzner account using the https://github.com/apricote/hcloud-upload-image[hcloud-upload-image] tool, and finally create your servers from this snapshot. 6 | 7 | IMPORTANT: Support for Fedora CoreOS on Hetzner is considered emerging, in that it does not yet offer an optimized user experience and relies on tools not officially supported by Hetzner. 8 | See https://github.com/coreos/fedora-coreos-tracker/issues/1324[issue #1324] for more details. 9 | 10 | IMPORTANT: The https://github.com/apricote/hcloud-upload-image[hcloud-upload-image] tool is not an official Hetzner Cloud product and Hetzner Cloud does not provide support for it. 11 | Alternatively, you can also use the official https://github.com/hetznercloud/packer-plugin-hcloud[packer-plugin-hcloud] to install the image via `coreos-installer`. 12 | 13 | IMPORTANT: In order to create a snapshot, the https://github.com/apricote/hcloud-upload-image[hcloud-upload-image] tool will provision a small server and boot it in rescue mode. 14 | As this server is short lived, the cost should be very limited. 15 | The resulting snapshots are charged per GB per month. 16 | See https://docs.hetzner.com/cloud/servers/backups-snapshots/overview/[Backups/Snapshots] in the Hetzner Cloud documentation. 17 | You may delete this snapshot once the server has been provisioned. 18 | 19 | == Prerequisites 20 | 21 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. 22 | If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 23 | 24 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. 25 | If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 26 | 27 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support] and only configure SSH keys. 28 | 29 | You also need to have access to a Hetzner account. 30 | The examples below use the https://github.com/hetznercloud/cli[hcloud] command-line tool, the https://github.com/apricote/hcloud-upload-image[hcloud-upload-image] tool and https://stedolan.github.io/jq/[jq] as a command-line JSON processor. 31 | 32 | == Downloading an Hetzner image 33 | 34 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. Once you have picked the relevant stream, download and verify the latest Hetzner image: 35 | 36 | [source, bash] 37 | ---- 38 | ARCH="x86_64" # or "aarch64" 39 | STREAM="stable" # or "testing", "next" 40 | coreos-installer download -s "$STREAM" -p hetzner -a "$ARCH" -f raw.xz 41 | ---- 42 | 43 | NOTE: Both x86_64 and aarch64 architectures are supported on Hetzner. 44 | 45 | Alternatively, you can manually download an Hetzner image from the https://fedoraproject.org/coreos/download/?stream=stable[download page]. 46 | Verify the download, following the instructions on that page. 47 | 48 | == Creating a snapshot 49 | 50 | . Use the `hcloud-upload-image` to create a snapshot from this image: 51 | + 52 | [source, bash] 53 | ---- 54 | IMAGE_NAME="fedora-coreos-41.20250213.0-hetzner.x86_64.raw.xz" 55 | export HCLOUD_TOKEN="" 56 | STREAM="stable" # or "testing", "next" 57 | HETZNER_ARCH="x86" # or "arm" 58 | 59 | hcloud-upload-image upload \ 60 | --architecture "$HETZNER_ARCH" \ 61 | --compression xz \ 62 | --image-path "$IMAGE_NAME" \ 63 | --labels os=fedora-coreos,channel="$STREAM" \ 64 | --description "Fedora CoreOS ($STREAM, $ARCH)" 65 | ---- 66 | + 67 | NOTE: The `hcloud-upload-image` tool uses different names for architectures (`x86_64` -> `x86`, `aarch64` -> `arm`). 68 | + 69 | . Wait for the process to complete and validate that you have a snapshot: 70 | + 71 | [source, bash] 72 | ---- 73 | hcloud image list --type=snapshot --selector=os=fedora-coreos 74 | ---- 75 | 76 | == Launching a server 77 | 78 | . If you don't already have an SSH key uploaded to Hetzner, you may upload one: 79 | + 80 | .Example uploading an SSH key to Hetzner 81 | [source, bash] 82 | ---- 83 | SSH_PUBKEY="ssh-ed25519 ..." 84 | SSH_KEY_NAME="fedora-coreos-hetzner" 85 | hcloud ssh-key create --name "$SSH_KEY_NAME" --public-key "$SSH_PUBKEY" 86 | ---- 87 | + 88 | . Launch a server. Your Ignition configuration can be passed to the VM as its user data, or you can skip passing user data if you just want SSH access. 89 | This provides an easy way to test out FCOS without first creating an Ignition config. 90 | + 91 | .Example launching FCOS on Hetzner using an Ignition configuration file and SSH key 92 | [source, bash] 93 | ---- 94 | IMAGE_ID="$(hcloud image list \ 95 | --type=snapshot \ 96 | --selector=os=fedora-coreos \ 97 | --output json \ 98 | | jq -r '.[0].id')" 99 | SSH_KEY_NAME="fedora-coreos-hetzner" # See: hcloud ssh-key list 100 | DATACENTER="fsn1-dc14" # See: hcloud datacenter list 101 | TYPE="cx22" # See: hcloud server-type list 102 | NAME="fedora-coreos-test" 103 | IGNITION_CONFIG="./config.ign" 104 | hcloud server create \ 105 | --name "$NAME" \ 106 | --type "$TYPE" \ 107 | --datacenter "$DATACENTER" \ 108 | --image "$IMAGE_ID" \ 109 | --ssh-key "$SSH_KEY_NAME" \ 110 | --user-data-from-file "$IGNITION_CONFIG" 111 | ---- 112 | + 113 | NOTE: While the Hetzner documentation and website mentions `cloud-init` and "cloud config", FCOS does not support cloud-init. 114 | It accepts only Ignition configuration files. 115 | 116 | . You now should be able to SSH into the instance using the associated IP address. 117 | + 118 | .Example connecting 119 | [source, bash] 120 | ---- 121 | ssh core@"$(hcloud server ip "$NAME")" 122 | ---- 123 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-hyperv.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Microsoft Hyper-V 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Microsoft Hyper-V. 4 | 5 | == Prerequisites 6 | 7 | You must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | You will also need a small utility from https://github.com/containers/libhvee[libhvee] called `kvpctl`. It attaches your Ignition config to your virtual machine. Precompiled binaries can be found on the project's https://github.com/containers/libhvee/releases[releases page]. 10 | 11 | === Downloading the disk image 12 | 13 | Hyper-V disk images can be manually downloaded from the https://fedoraproject.org/coreos/download/[download page]. Be sure to decompress the image after downloading. 14 | 15 | == Booting a new VM on Microsoft Hyper-V 16 | 17 | === Creating a virtual switch 18 | 19 | You must first create a virtual switch so your virtual machine has a network to connect to. To do this, launch Hyper-V Manager and select your server from the list: 20 | 21 | image::hyperv-select-server.png[Hyper-V server list] 22 | 23 | Then click _Virtual Switch Manager..._ in the _Actions_ panel: 24 | 25 | image::hyperv-actions.png[Hyper-V Manager Actions panel] 26 | 27 | Follow the prompts under _New virtual network switch_ to create the virtual switch of the type you want: 28 | 29 | image::hyperv-switch-create.png[New Virtual Network Switch tab] 30 | 31 | === Creating a virtual machine 32 | 33 | In the Actions panel of Hyper-V Manager, click _New_, then _Virtual Machine..._: 34 | 35 | image::hyperv-new.png[Hyper-V Manager] 36 | 37 | This will launch the _New Virtual Machine Wizard_. When completing the wizard, note the following settings: 38 | 39 | . If you select a Generation 2 virtual machine, see <>. 40 | . When prompted to configure networking, select the virtual switch you created earlier. 41 | . When prompted to connect a virtual hard disk, select _Use an existing virtual disk_ and specify the disk image you downloaded earlier: 42 | 43 | image::hyperv-disk.png[Hyper-V Virtual Machine Disk Wizard] 44 | 45 | === Setting the Ignition config 46 | 47 | Before starting your virtual machine for the first time, you must attach your Ignition config containing the customizations you want to apply to Fedora CoreOS. 48 | 49 | On Hyper-V, the Ignition config is presented to the hypervisor in parts. Ignition reads the parts and reassembles them into a single config. You can use the `kvpctl add-ign` subcommand to create these parts and attach them to the virtual machine. The syntax for the command is as follows: 50 | 51 | [source, powershell] 52 | ---- 53 | .\kvpctl.exe add-ign 54 | ---- 55 | 56 | For example: 57 | 58 | [source, console] 59 | ---- 60 | > .\kvpctl.exe myvm add-ign C:\Users\joe\myvm.ign 61 | added key: ignition.config.0 62 | added key: ignition.config.1 63 | added key: ignition.config.2 64 | added key: ignition.config.3 65 | added key: ignition.config.4 66 | added key: ignition.config.5 67 | added key: ignition.config.6 68 | ---- 69 | 70 | === Starting the VM 71 | 72 | Once you've attached the Ignition config to the virtual machine, right-click the virtual machine in Hyper-V Manager and select _Start_. 73 | 74 | === Viewing the key-value pairs assigned to your virtual machine 75 | 76 | You can view the key-value pairs assigned to your machine with the `kvpctl get` subcommand. You can only get key-value pairs when the virtual machine is running. 77 | 78 | [source, powershell] 79 | ---- 80 | .\kvpctl.exe get 81 | ---- 82 | 83 | For example: 84 | 85 | [source, console] 86 | ---- 87 | > .\kvpctl.exe myvm get 88 | ignition.config.3 = th":"/etc/containers/registries.conf..." 89 | ignition.config.4 = ,"contents":{"source":"data:,makeste..." 90 | ignition.config.5 = nabled":false,"mask":true,"name":"do..." 91 | ignition.config.6 = service\n\n[Service]\nExecStart=/usr..." 92 | ignition.config.0 = {"ignition":{"config":{"replace":{"v..." 93 | ignition.config.1 = default.target.wants","user":{"name"..." 94 | ignition.config.2 = "user":{"name":"root"},"contents":{"..." 95 | ---- 96 | 97 | === Configuring Secure Boot 98 | 99 | If you configure a Generation 2 virtual machine, Fedora CoreOS will not successfully boot until you change the Secure Boot template to _Microsoft UEFI Certificate Authority_. You can do this in the _Security_ tab of the virtual machine's Settings dialog: 100 | 101 | image::hyperv-secure-boot.png[Virtual machine Secure Boot settings] 102 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-ibmcloud.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on IBM Cloud 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances in IBM Cloud for either the `x86_64` or `s390x` architectures. 4 | 5 | NOTE: FCOS does not support https://cloud.ibm.com/docs/cloud-infrastructure?topic=cloud-infrastructure-compare-infrastructure[IBM Cloud Classic Infrastructure]. 6 | 7 | == Prerequisites 8 | 9 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 10 | 11 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 12 | 13 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 14 | 15 | You also need to have access to an https://cloud.ibm.com/login[IBM Cloud account]. The examples below use the https://cloud.ibm.com/docs/cli?topic=cli-getting-started[`ibmcloud`] command-line tool, which must be separately installed and configured beforehand. 16 | Follow the directions at https://cloud.ibm.com/docs/cli?topic=cli-install-ibmcloud-cli to install the ibmcloud CLI. You'll need both the `cloud-object-storage` and `infrastructure-service` plugins installed. This can be done with: 17 | 18 | * `ibmcloud plugin install cloud-object-storage` 19 | * `ibmcloud plugin install infrastructure-service` 20 | 21 | After you've logged in using `ibmcloud login` you can set a target region: 22 | 23 | .Target a specific region 24 | [source, bash] 25 | ---- 26 | REGION='us-east' # run `ibmcloud regions` to view options 27 | ibmcloud target -r $REGION 28 | ---- 29 | 30 | .Target a specific resource group 31 | [source, bash] 32 | ---- 33 | RESOURCE_GROUP='my-resource-group' 34 | ibmcloud resource group-create $RESOURCE_GROUP # Create the resource group if it doesn't exist 35 | ibmcloud target -g $RESOURCE_GROUP 36 | ---- 37 | 38 | There are also several other pieces that need to be in place, like a VPC, SSH keys, networks, permissions, etc. Unfortunately, this guide is not a comprehensive IBM Cloud guide. If you are new to IBM Cloud please familiarize yourself using https://cloud.ibm.com/docs/vpc?topic=vpc-getting-started[the documentation for IBM Cloud VPC networks] first. 39 | 40 | === Creating an Image 41 | 42 | The following sets of commands will show you how to download the most recent image for a stream, upload it to cloud storage, and then create the cloud image in IBM Cloud. It is worth noting that Fedora CoreOS comes in three streams, with different update schedules per stream. These steps show the `stable` stream as an example, but can be used for other streams too. 43 | 44 | 45 | .Fetch the latest image suitable for your target stream (or https://fedoraproject.org/coreos/download/[download and verify] it from the web). 46 | [source, bash] 47 | ---- 48 | STREAM='stable' 49 | ARCH='x86_64' # or 's390x' 50 | coreos-installer download -s $STREAM -a $ARCH -p ibmcloud -f qcow2.xz --decompress 51 | ---- 52 | 53 | .Create a Service Account for uploading and an Authorization Policy to allow creating images from the uploaded objects. 54 | [source, bash] 55 | ---- 56 | BUCKET='my-unique-bucket' 57 | ibmcloud resource service-instance-create "${BUCKET}-service-instance" cloud-object-storage standard global 58 | 59 | SERVICE_INSTANCE_ID='25df0db0-89a4-4cb8-900f-ed8b44259f80' # from just created service account 60 | ibmcloud iam authorization-policy-create is --source-resource-type image cloud-object-storage Reader --target-service-instance-id $SERVICE_INSTANCE_ID 61 | ---- 62 | 63 | .Upload the fetched image file to IBM Cloud Object Storage. 64 | [source, bash] 65 | ---- 66 | FCOS_VERSION='...' 67 | FILE="fedora-coreos-${FCOS_VERSION}-ibmcloud.${ARCH}.qcow2" 68 | ibmcloud cos create-bucket --bucket $BUCKET --ibm-service-instance-id $SERVICE_INSTANCE_ID 69 | ibmcloud cos upload --bucket=$BUCKET --key="${FILE}" --file="${FILE}" 70 | ---- 71 | 72 | .Create the image from the storage object. 73 | [source, bash] 74 | ---- 75 | IMAGE=${FILE:0:-6} # pull off .qcow2 76 | IMAGE=${IMAGE//[._]/-} # replace . and _ with - 77 | [ $ARCH == 'x86_64' ] && OSNAME='fedora-coreos-stable-amd64' 78 | [ $ARCH == 's390x' ] && OSNAME='red-8-s390x-byol' 79 | ibmcloud is image-create "${IMAGE}" --file "cos://${REGION}/${BUCKET}/${FILE}" --os-name $OSNAME 80 | ---- 81 | 82 | NOTE: For `s390x` we use `--os-name=red-8-s390x-byol` (a RHEL 8 profile) here because there is not currently a `fedora-coreos-stable-s390x` profile to use. 83 | 84 | You'll have to wait for the image creation process to finish and go from `pending` to `available` before you can use the image. Monitor with the following command: 85 | 86 | .Monitor image creation progress by viewing the images in your account 87 | [source, bash] 88 | ---- 89 | ibmcloud is images --visibility private --status pending,available 90 | ---- 91 | 92 | == Launching a VM instance 93 | 94 | Now that you have an image created in your account you can launch a VM instance. You'll have to specify several pieces of information in the command. Embedded in the example below are tips for how to grab that information before launching an instance. 95 | 96 | You'll also need the Ignition config you created earlier. Here it is represented in the example command as `@example.ign`, which indicates a file in the current directory named `example.ign`. The @ is required before the path to the Ignition file. 97 | 98 | .Launching a VM instance 99 | [source, bash] 100 | ---- 101 | NAME='instance1' 102 | ZONE="${REGION}-1" # view more with `ibmcloud is zones` 103 | PROFILE='bx2-2x8' # view more with `ibmcloud is instance-profiles` 104 | VPC='r014-c9c65cc4-cfd3-44de-ad54-865aac182ea1' # `ibmcloud is vpcs` 105 | IMAGE='r014-1823b4cf-9c63-499e-8a27-b771be714ad8' # `ibmcloud is images --visibility private` 106 | SUBNET='0777-bf99cbf4-bc82-4c46-895a-5b7304201182' # `ibmcloud is subnets` 107 | SSHKEY='r014-b44c37d0-5c21-4c2b-aba2-438a5b0a228d' # `ibmcloud is keys` 108 | ibmcloud is instance-create "${NAME}" $VPC $ZONE $PROFILE $SUBNET \ 109 | --allow-ip-spoofing=true --image $IMAGE --keys $SSHKEY --user-data @example.ign 110 | ---- 111 | 112 | TIP: If needed you may have to first create a subnet with a command like `ibmcloud is subnet-create my-subnet $VPC --ipv4-address-count 256 --zone $ZONE`. 113 | 114 | WARNING: Make sure you choose an appropriate instance type based on your architecture. For example, you may want to use `bz2-2x8` instead of `bx2-2x8` above if you are targeting `s390x`. 115 | 116 | Next, if you'd like to SSH into the instance from outside IBM Cloud, you can assign a public IP to the instance: 117 | 118 | .Create and Assign a Floating IP 119 | [source, bash] 120 | ---- 121 | ibmcloud is floating-ip-reserve floating-ip-1 --zone=$ZONE 122 | FIP='72251a2e-d6c5-42b4-97b0-b5f8e8d1f479' 123 | NIC='0777-dd174c80-dbd9-41b1-b221-39bbcef8a481' # find from `ibmcloud is instance` output 124 | ibmcloud is floating-ip-update $FIP --nic $NIC 125 | ---- 126 | 127 | And you now should be able to SSH into the instance using the IP address associated with the floating IP. 128 | 129 | .Example connecting 130 | [source, bash] 131 | ---- 132 | ssh core@ 133 | ---- 134 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-kubevirt.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on KubeVirt 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on any KubeVirt-enabled Kubernetes cluster. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | You also need to have access to a Kubernetes environment with https://kubevirt.io/user-guide/operations/installation/[KubeVirt] installed. 10 | 11 | == Referencing the KubeVirt Image 12 | 13 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 14 | 15 | The image for each stream can directly be referenced from the official registry: 16 | 17 | - `quay.io/fedora/fedora-coreos-kubevirt:stable` 18 | - `quay.io/fedora/fedora-coreos-kubevirt:testing` 19 | - `quay.io/fedora/fedora-coreos-kubevirt:next` 20 | 21 | == Creating an Ignition config secret 22 | 23 | There are various ways to expose userdata to Kubevirt VMs that are covered in the https://kubevirt.io/user-guide/virtual_machines/startup_scripts/#startup-scripts[KubeVirt user guide]. In this example we'll use the Ignition config stored in local file `example.ign` to create a secret named `ignition-payload`. We'll then use this secret when defining our virtual machine in the examples below. 24 | 25 | .Creating the secret 26 | [source, bash] 27 | ---- 28 | kubectl create secret generic ignition-payload --from-file=userdata=example.ign 29 | ---- 30 | 31 | NOTE: If the user prefers, they can use `oc` instead of `kubectl` in the commands throughout this guide. 32 | 33 | 34 | == Launching a virtual machine 35 | 36 | Given the `quay.io/fedora/fedora-coreos-kubevirt` images you can create a VM definition and combine that with the Ignition secret reference to launch a virtual machine. 37 | 38 | .Launching a VM instance referencing the secret 39 | [source, bash] 40 | ---- 41 | STREAM="stable" # or "testing" or "next" 42 | cat < vm.yaml 43 | --- 44 | apiVersion: kubevirt.io/v1 45 | kind: VirtualMachine 46 | metadata: 47 | name: my-fcos 48 | spec: 49 | runStrategy: Always 50 | template: 51 | spec: 52 | domain: 53 | devices: 54 | disks: 55 | - name: containerdisk 56 | disk: 57 | bus: virtio 58 | - name: cloudinitdisk 59 | disk: 60 | bus: virtio 61 | rng: {} 62 | resources: 63 | requests: 64 | memory: 2048M 65 | volumes: 66 | - name: containerdisk 67 | containerDisk: 68 | image: quay.io/fedora/fedora-coreos-kubevirt:${STREAM} 69 | imagePullPolicy: Always 70 | - name: cloudinitdisk 71 | cloudInitConfigDrive: 72 | secretRef: 73 | name: ignition-payload 74 | END 75 | kubectl create -f vm.yaml 76 | ---- 77 | 78 | Now you should be able to SSH into the instance. If you didn't change the defaults, the 79 | username is `core`. 80 | 81 | .Accessing the VM instance using https://kubevirt.io/user-guide/operations/virtctl_client_tool/[`virtctl`] via ssh 82 | [source, bash] 83 | ---- 84 | virtctl ssh core@my-fcos 85 | ---- 86 | 87 | == Launching a virtual machine with persistent storage 88 | 89 | The above example will give you a VM that will lose any changes made to it if it is stopped and started again. You can instruct the cluster to import a containerdisk into a Physical Volume when provisioning in order to have virtual machine will have persistence of the OS disk across stop/start operations. 90 | 91 | The positive to this approach is that the machine behaves much more like a traditional virtual machine. The drawback is that the cluster needs to offer Block PV storage and not all clusters may do that. 92 | 93 | NOTE: You may have to specify a `storageClassName` parameter in the `spec.dataVolumeTemplates.spec.storage` section of the config if your cluster doesn't offer a default. See the https://kubevirt.io/api-reference/v1.0.0/definitions.html#_v1beta1_storagespec[API docs]. 94 | 95 | .Launching a VM with persistent storage 96 | [source, bash] 97 | ---- 98 | STREAM="stable" # or "testing" or "next" 99 | DISK=10 100 | cat < vm.yaml 101 | --- 102 | apiVersion: kubevirt.io/v1 103 | kind: VirtualMachine 104 | metadata: 105 | name: my-fcos 106 | spec: 107 | runStrategy: Always 108 | dataVolumeTemplates: 109 | - metadata: 110 | name: fcos-os-disk-volume 111 | spec: 112 | source: 113 | registry: 114 | url: 115 | docker://quay.io/fedora/fedora-coreos-kubevirt:${STREAM} 116 | storage: 117 | volumeMode: Block 118 | resources: 119 | requests: 120 | storage: ${DISK}Gi 121 | accessModes: 122 | - ReadWriteOnce 123 | template: 124 | spec: 125 | domain: 126 | devices: 127 | disks: 128 | - name: fcos-os-disk 129 | disk: 130 | bus: virtio 131 | - name: cloudinitdisk 132 | disk: 133 | bus: virtio 134 | name: cloudinitdisk 135 | rng: {} 136 | resources: 137 | requests: 138 | memory: 2048M 139 | volumes: 140 | - name: fcos-os-disk 141 | dataVolume: 142 | name: fcos-os-disk-volume 143 | - name: cloudinitdisk 144 | cloudInitConfigDrive: 145 | secretRef: 146 | name: ignition-payload 147 | END 148 | kubectl create -f vm.yaml 149 | ---- 150 | 151 | NOTE: The data volume import into the PVC from the container registry may take some time. You can monitor the import by watching the logs of the `importer-fcos-os-disk-volume` pod. 152 | 153 | After the machine is up you can connect to it using `virtctl` as shown in the previous example. 154 | 155 | == Mirroring the image for use in private registries 156 | 157 | If a private registry in air-gapped installations is used, the image can be mirrored to that registry using https://github.com/containers/skopeo[`skopeo`]. 158 | 159 | .Mirroring a stable stream FCOS image 160 | [source, bash] 161 | ---- 162 | skopeo copy docker://quay.io/fedora/fedora-coreos-kubevirt:stable docker://myregistry.io/myorg/fedora-coreos-kubevirt:stable 163 | ---- 164 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-libvirt.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on libvirt 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances on a https://libvirt.org/[libvirt] platform, using the https://www.qemu.org/[QEMU] hypervisor. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | You also need to have access to a host machine with `libvirt`. The examples below use the `virt-install` command-line tool, which must be separately installed beforehand. 12 | 13 | TIP: If running on a host with SELinux enabled (use the `sestatus` command to check SELinux status), make sure your OS image and Ignition file are labeled as `svirt_home_t`. You can do this by placing them under `~/.local/share/libvirt/images/` or running `chcon -t svirt_home_t /path/to/file`. 14 | 15 | == Launching a VM instance 16 | 17 | include::getting-started-libvirt.adoc[] 18 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-nutanix.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Nutanix AHV 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Nutanix AHV. Fedora currently does not publish Fedora CoreOS images within Nutanix, so you need to upload a Nutanix image to your Nutanix Prism Central subscription. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | You also need to have access to a Nutanix Prism Central subscription. The examples below use the `curl` command to access Nutanix Prism Central APIs. 12 | 13 | == Uploading an image to Nutanix AHV 14 | 15 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. Once you have picked the relevant stream, use the Nutanix Prism Central API to upload the latest image to Nutanix: 16 | 17 | [source, bash] 18 | ---- 19 | STREAM=stable 20 | IMAGE_NAME= 21 | API_HOST= 22 | API_USERNAME= 23 | API_PASSWORD= 24 | 25 | URL=$(curl https://builds.coreos.fedoraproject.org/streams/${STREAM}.json | \ 26 | jq -r .architectures.x86_64.artifacts.nutanix.formats.qcow2.disk.location) 27 | ENCODED_CREDS="$(echo -n "${API_USERNAME}:${API_PASSWORD}" | base64)" 28 | 29 | curl -X POST --header "Content-Type: application/json" \ 30 | --header "Accept: application/json" \ 31 | --header "Authorization: Basic ${ENCODED_CREDS}" \ 32 | "https://${API_HOST}:9440/api/nutanix/v3/images" \ 33 | -d @- << EOF 34 | { 35 | "spec": { 36 | "name": "${IMAGE_NAME}", 37 | "resources": { 38 | "image_type": "ISO_IMAGE", 39 | "source_uri": "${URL}", 40 | "architecture": "X86_64", 41 | "source_options": { 42 | "allow_insecure_connection": false 43 | } 44 | }, 45 | "description": "string" 46 | }, 47 | "api_version": "3.1.0", 48 | "metadata": { 49 | "use_categories_mapping": false, 50 | "kind": "image", 51 | "spec_version": 0, 52 | "categories_mapping": {}, 53 | "should_force_translate": true, 54 | "entity_version": "string", 55 | "categories": {}, 56 | "name": "string" 57 | } 58 | } 59 | EOF 60 | ---- 61 | 62 | == Launching a VM instance 63 | 64 | You can provision an FCOS instance using the Nutanix Prism Central web portal or via the Prism Central API with `curl`. Ignition configuration can be passed to the VM as a "cloud-init custom script". For example, to launch a VM using the API: 65 | 66 | [source, bash] 67 | ---- 68 | API_HOST= 69 | API_USERNAME= 70 | API_PASSWORD= 71 | CLUSTER_REFERENCE_NAME= 72 | CLUSTER_REFERENCE_UUID= 73 | SUBNET_REFERENCE_NAME= 74 | SUBNET_REFERENCE_UUID= 75 | VM_NAME= 76 | IGNITION_CONFIG=config.ign 77 | IMAGE_NAME= 78 | 79 | ENCODED_CONFIG="$(cat ${IGNITION_CONFIG} | base64 -w 0)" 80 | ENCODED_CREDS="$(echo -n "${API_USERNAME}:${API_PASSWORD}" | base64)" 81 | IMAGE_ID=$(curl -X POST --header "Content-Type: application/json" \ 82 | --header "Accept: application/json" \ 83 | --header "Authorization: Basic ${ENCODED_CREDS}" \ 84 | "https://${API_HOST}:9440/api/nutanix/v3/images/list" 85 | -d '{ "kind": "image","filter": "", "length": 30, "offset": 0}' | \ 86 | jq -r '.entities[] | select(.spec.name == "${IMAGE_NAME}") | .metadata.uuid') 87 | 88 | 89 | curl -X POST --header "Content-Type: application/json" \ 90 | --header "Accept: application/json" \ 91 | --header "Authorization: Basic ${ENCODED_CREDS}" \ 92 | "https://${API_HOST}:9440/api/nutanix/v3/vms" \ 93 | -d @- << EOF 94 | { 95 | "spec": { 96 | "name": "${VM_NAME}", 97 | "resources": { 98 | "power_state": "ON", 99 | "num_vcpus_per_socket": 1, 100 | "num_sockets": 1, 101 | "memory_size_mib": 16384, 102 | "disk_list": [ 103 | { 104 | "disk_size_mib": 32768, 105 | "device_properties": { 106 | "device_type": "DISK", 107 | "disk_address": { 108 | "device_index": 0, 109 | "adapter_type": "SCSI" 110 | } 111 | }, 112 | "data_source_reference": { 113 | "kind": "image", 114 | "uuid": "${IMAGE_ID}" 115 | } 116 | } 117 | ], 118 | "nic_list": [ 119 | { 120 | "nic_type": "NORMAL_NIC", 121 | "is_connected": true, 122 | "ip_endpoint_list": [ 123 | { 124 | "ip_type": "DHCP" 125 | } 126 | ], 127 | "subnet_reference": { 128 | "kind": "subnet", 129 | "name": "${SUBNET_REFERENCE_NAME}", 130 | "uuid": "${SUBNET_REFERENCE_UUID}" 131 | } 132 | } 133 | ], 134 | "guest_tools": { 135 | "nutanix_guest_tools": { 136 | "state": "ENABLED", 137 | "iso_mount_state": "MOUNTED" 138 | } 139 | }, 140 | "guest_customization": { 141 | "cloud_init": { 142 | "user_data": "${ENCODED_CONFIG}" 143 | }, 144 | "is_overridable": false 145 | } 146 | }, 147 | "cluster_reference": { 148 | "kind": "cluster", 149 | "name": "${CLUSTER_REFERENCE_NAME}", 150 | "uuid": "${CLUSTER_REFERENCE_UUID}" 151 | } 152 | }, 153 | "api_version": "3.1.0", 154 | "metadata": { 155 | "kind": "vm" 156 | } 157 | } 158 | EOF 159 | ---- 160 | 161 | You now should be able to SSH into the instance using the associated IP address. 162 | 163 | .Example connecting 164 | [source, bash] 165 | ---- 166 | ssh core@ 167 | ---- 168 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-openstack.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on OpenStack 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on an 4 | OpenStack cloud environment, either private, or public (like https://vexxhost.com/[VEXXHOST]). 5 | 6 | The steps below were tested against the OpenStack Victoria release. 7 | 8 | == Prerequisites 9 | 10 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 11 | 12 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 13 | 14 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 15 | 16 | You also need to have access to an OpenStack environment and a functioning 17 | https://docs.openstack.org/python-designateclient/latest/user/shell-v2.html[`openstack` CLI]. 18 | Typically, you'll https://docs.openstack.org/python-openstackclient/latest/configuration/index.html[configure the client] 19 | by using a `clouds.yaml` file or via environment variables. If you're starting from scratch, this 20 | environment may need networks, SSH key pairs, security groups, etc.. set up. Please consult the 21 | https://docs.openstack.org/[OpenStack Documentation] to learn more. 22 | 23 | == Downloading an OpenStack Image 24 | 25 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 26 | Once you have picked the relevant stream, download, verify, and decompress the latest 27 | OpenStack image: 28 | 29 | NOTE: For more information on FCOS stream offerings see xref:update-streams.adoc[Update Streams]. 30 | 31 | [source, bash] 32 | ---- 33 | STREAM="stable" 34 | coreos-installer download --decompress -s $STREAM -p openstack -f qcow2.xz 35 | ---- 36 | 37 | Alternatively, you can manually download an OpenStack image from the 38 | https://fedoraproject.org/coreos/download/?stream=stable#cloud_images[download page]. 39 | Verify the download, following the instructions on that page, and decompress it. 40 | 41 | == Uploading the Image to OpenStack 42 | 43 | .Create the FCOS image in OpenStack 44 | [source, bash] 45 | ---- 46 | FILE=fedora-coreos-XX.XXXXXXXX.X.X-openstack.x86_64.qcow2 47 | IMAGE=${FILE:0:-6} # pull off .qcow2 48 | openstack image create --disk-format=qcow2 --min-disk=10 --min-ram=2 --progress --file="${FILE}" "${IMAGE}" 49 | ---- 50 | 51 | NOTE: If you're uploading an `aarch64` disk image then add `--property architecture=aarch64`. 52 | 53 | .Monitor image creation progress by listing the image 54 | [source, bash] 55 | ---- 56 | openstack image list --name="${IMAGE}" 57 | ---- 58 | 59 | Once the image is listed as `active`, it's ready to be used. 60 | 61 | == Launching a VM instance 62 | 63 | Now that you have an image created in your account you can launch a VM 64 | instance. You’ll have to specify several pieces of information in the 65 | command, such as instance flavor, network information, SSH key, etc... 66 | 67 | You'll also need the Ignition config you created earlier. Here it is 68 | represented in the example command as `./example.ign`, which indicates 69 | a file in the current directory named `example.ign`. 70 | 71 | .Launching a VM instance 72 | [source, bash] 73 | ---- 74 | OPENSTACK_NETWORK="private" 75 | OPENSTACK_KEYPAIR="mykeypair" # optional 76 | OPENSTACK_FLAVOR="v1-standard-2" 77 | INSTANCE_NAME="myinstance" # choose a name 78 | openstack server create \ 79 | --key-name="${OPENSTACK_KEYPAIR}" \ 80 | --network=$OPENSTACK_NETWORK \ 81 | --flavor=$OPENSTACK_FLAVOR \ 82 | --image="${IMAGE}" \ 83 | --user-data ./example.ign \ 84 | "${INSTANCE_NAME}" 85 | ---- 86 | 87 | NOTE: Specifying `--key-name` is optional if you provide an SSH key in your Ignition config. 88 | 89 | TIP: Monitor progress of the instance creation with `openstack server show "${INSTANCE_NAME}"`. 90 | You can also use the `--wait` parameter when calling `openstack server create` to block 91 | until the instance is active. 92 | 93 | Next, if the instance's network isn't externally facing and you'd like to SSH 94 | into it from outside the OpenStack environment, you will have to assign a public 95 | IP to the instance: 96 | 97 | .Create and Assign a Floating IP 98 | [source, bash] 99 | ---- 100 | OPENSTACK_NETWORK=public 101 | openstack floating ip create $OPENSTACK_NETWORK 102 | 103 | FLOATING_IP=1.1.1.1 # from just created floating IP 104 | openstack server add floating ip "${INSTANCE_NAME}" $FLOATING_IP 105 | ---- 106 | 107 | You now should be able to SSH into the instance using the floating IP address. 108 | 109 | .Example connecting 110 | [source, bash] 111 | ---- 112 | ssh core@ 113 | ---- 114 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-qemu.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on QEMU 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) instances on a bare https://www.qemu.org/[QEMU] hypervisor. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | You also need to have access to a host machine with https://www.linux-kvm.org/page/Main_Page[KVM] support. The examples below use the `qemu-kvm` command-line tool, which must be separately installed beforehand. 12 | 13 | TIP: If running with SELinux enabled, make sure your OS image and Ignition file are labeled as `svirt_home_t`, for example by placing them under `~/.local/share/libvirt/images/`. 14 | 15 | == Booting a new VM on QEMU 16 | 17 | This section shows how to boot a new VM on QEMU. Based on the platform, The Ignition file is passed to the VM, which sets the `opt/com.coreos/config` key in the QEMU firmware configuration device. 18 | 19 | You can use `-snapshot` to make `qemu-kvm` allocate temporary storage for the VM, or `qemu-img create` to first create a layered qcow2. 20 | 21 | === Fetching the QCOW2 image 22 | 23 | Fetch the latest image suitable for your target stream (or https://fedoraproject.org/coreos/download/[download and verify] it from the web). 24 | 25 | [source, bash] 26 | ---- 27 | STREAM="stable" 28 | coreos-installer download -s $STREAM -p qemu -f qcow2.xz --decompress -C ~/.local/share/libvirt/images/ 29 | ---- 30 | 31 | === Setting up a new VM 32 | 33 | Launch the new VM using `qemu-kvm`. 34 | 35 | In snapshot mode, all changes that are performed live after boot are discarded once the machine is powered off. 36 | If you need to persist your changes, it is recommended to set up a dedicated persistent disk first. 37 | 38 | .Launching FCOS with QEMU (temporary storage) 39 | [source, bash] 40 | ---- 41 | IGNITION_CONFIG="/path/to/example.ign" 42 | IMAGE="/path/to/image.qcow2" 43 | # for x86/aarch64: 44 | IGNITION_DEVICE_ARG="-fw_cfg name=opt/com.coreos/config,file=${IGNITION_CONFIG}" 45 | 46 | # for s390x/ppc64le: 47 | IGNITION_DEVICE_ARG="-drive file=${IGNITION_CONFIG},if=none,format=raw,readonly=on,id=ignition -device virtio-blk,serial=ignition,drive=ignition" 48 | 49 | qemu-kvm -m 2048 -cpu host -nographic -snapshot \ 50 | -drive "if=virtio,file=${IMAGE}" ${IGNITION_DEVICE_ARG} \ 51 | -nic user,model=virtio,hostfwd=tcp::2222-:22 52 | ---- 53 | 54 | .Launching FCOS with QEMU (persistent storage) 55 | [source, bash] 56 | ---- 57 | qemu-img create -f qcow2 -F qcow2 -b "${IMAGE}" my-fcos-vm.qcow2 58 | qemu-kvm -m 2048 -cpu host -nographic \ 59 | -drive if=virtio,file=my-fcos-vm.qcow2 ${IGNITION_DEVICE_ARG} \ 60 | -nic user,model=virtio,hostfwd=tcp::2222-:22 61 | ---- 62 | 63 | === Exploring the OS 64 | 65 | With QEMU usermode networking, the assigned IP address is not reachable from the host. 66 | 67 | The examples above use `hostfwd` to selectively forward the SSH port on the guest machine to the local host (port 2222). 68 | 69 | If you set up an xref:authentication.adoc[SSH key] for the default `core` user, you can SSH into the VM via the forwarded port: 70 | 71 | [source, bash] 72 | ---- 73 | ssh -p 2222 core@localhost 74 | ---- 75 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-virtualbox.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on VirtualBox 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on the VirtualBox hypervisor. 4 | 5 | == Prerequisites 6 | 7 | Before importing an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | === Downloading the OVA 12 | 13 | Fedora CoreOS is designed to be updated automatically, with different schedules per stream. 14 | Once you have picked the relevant stream, you can download the latest OVA: 15 | 16 | [source, bash] 17 | ---- 18 | STREAM="stable" 19 | coreos-installer download -s $STREAM -p virtualbox -f ova 20 | ---- 21 | 22 | Alternatively, OVA images can be manually downloaded from the https://fedoraproject.org/coreos/download/?stream=stable#baremetal[download page]. 23 | 24 | == Booting a new VM on VirtualBox 25 | 26 | You can set up a VirtualBox virtual machine through the GUI or via the https://www.virtualbox.org/manual/UserManual.html#vboxmanage[`VBoxManage` CLI]. This guide will use the CLI for setting up the VM. 27 | 28 | === Importing the OVA 29 | 30 | To import the OVA, use `VBoxManage import`: 31 | 32 | [source, bash, subs="attributes"] 33 | ---- 34 | VM_NAME=my-instance 35 | VBoxManage import --vsys 0 --vmname "$VM_NAME" fedora-coreos-{stable-version}-virtualbox.x86_64.ova 36 | ---- 37 | 38 | === Setting the Ignition config 39 | 40 | Ignition reads its configuration from the `/Ignition/Config` https://docs.oracle.com/en/virtualization/virtualbox/6.0/user/guestadd-guestprops.html[guest property] of the virtual machine. At present, guest properties can only be set from the host command line, and not via the GUI. To set the Ignition config for a VM: 41 | 42 | [source, bash] 43 | ---- 44 | IGN_PATH="/path/to/config.ign" 45 | VM_NAME=my-instance 46 | VBoxManage guestproperty set "$VM_NAME" /Ignition/Config "$(cat $IGN_PATH)" 47 | ---- 48 | 49 | ==== Ignition config size limitations 50 | 51 | The length of the `/Ignition/Config` guestinfo property is constrained by the maximum length of a command line on your host operating system. The OS-specific limits are approximately: 52 | 53 | [cols="1,1"] 54 | |=== 55 | |OS 56 | |Limit 57 | 58 | |Linux 59 | |128 KiB 60 | |macOS 61 | |256 KiB 62 | |Windows shells 63 | |8 KiB 64 | |=== 65 | 66 | If your Ignition config is larger than this limit, you can host the config on an HTTPS server and refer to it from a small _pointer config_, as follows: 67 | 68 | . Upload your Ignition config to an HTTPS server. 69 | . xref:remote-ign.adoc[Create a Butane pointer config] that specifies the URL of your full Ignition config: 70 | + 71 | [source,yaml,subs="attributes"] 72 | ---- 73 | variant: fcos 74 | version: {butane-latest-stable-spec} 75 | ignition: 76 | config: 77 | replace: 78 | source: https://example.com/config.ign 79 | ---- 80 | . Use xref:producing-ign.adoc[Butane] to convert the Butane config to an Ignition config. 81 | . Set the `/Ignition/Config` guest property to the contents of the pointer Ignition config, following the instructions in <<_setting_the_ignition_config>>. 82 | 83 | === Configuring networking 84 | 85 | By default, the VM will use https://www.virtualbox.org/manual/UserManual.html#networkingmodes[NAT networking]. This will share the IP address of your host. Alternatively, if you want the VM to use a different IP address than your host, you can set the VM's network adapter to "Bridged networking". 86 | 87 | ==== NAT networking 88 | 89 | By default, NAT networking does not allow inbound connections to the VM. To allow inbound SSH connections, you can forward connections to e.g. port 2222 on the host to the SSH server in the VM: 90 | 91 | [source, bash] 92 | ---- 93 | VM_NAME=my-instance 94 | VBoxManage modifyvm "$VM_NAME" --natpf1 "guestssh,tcp,,2222,,22" 95 | ---- 96 | 97 | After booting the VM, you can SSH to the VM from your host: 98 | 99 | [source, bash] 100 | ---- 101 | ssh core@localhost -p 2222 102 | ---- 103 | 104 | ==== Bridged networking 105 | 106 | If you want the VM to use a different IP address than your host, you can set the VM's network adapter to "Bridged networking". 107 | 108 | . Determine the network adapter that should be bridged to the VM. To get the name of your host's default network adapter, you can run: 109 | + 110 | [source, bash] 111 | ---- 112 | ip route ls default | grep -Po '(?<= dev )(\S+)' 113 | ---- 114 | 115 | . Modify the VM's network adapter settings: 116 | + 117 | [source, bash] 118 | ---- 119 | VM_NAME=my-instance 120 | ADAPTER=adapter-name 121 | VBoxManage modifyvm "$VM_NAME" --nic1 bridged --bridgeadapter1 "$ADAPTER" 122 | ---- 123 | 124 | === Starting the VM 125 | 126 | You can now boot the VM you have configured: 127 | 128 | [source, bash] 129 | ---- 130 | VM_NAME=my-instance 131 | VBoxManage startvm "$VM_NAME" 132 | ---- 133 | 134 | == Troubleshooting first-boot problems 135 | 136 | You may encounter problems with your Ignition config that require access to the console log messages which appear during the first boot. To obtain a copy of the console log you can attach a https://www.virtualbox.org/manual/UserManual.html#serialports[serial device] to the VM before booting. 137 | 138 | To attach a serial device to a powered-off VM: 139 | 140 | [source, bash] 141 | ---- 142 | VM_NAME=my-instance 143 | VM_LOG=$(realpath .)/$VM_NAME.log 144 | VBoxManage modifyvm "$VM_NAME" --uart1 0x3F8 4 145 | VBoxManage modifyvm "$VM_NAME" --uartmode1 file "$VM_LOG" 146 | ---- 147 | 148 | When you power on the VM, console output will be logged to the file you specified. 149 | -------------------------------------------------------------------------------- /modules/ROOT/pages/provisioning-vultr.adoc: -------------------------------------------------------------------------------- 1 | = Provisioning Fedora CoreOS on Vultr 2 | 3 | This guide shows how to provision new Fedora CoreOS (FCOS) nodes on Vultr. Vultr publishes FCOS images, but they are out of date, so **we do not recommend using the standard Vultr images**. Instead, a current FCOS release can be uploaded as a https://www.vultr.com/docs/requirements-for-uploading-an-os-iso-to-vultr[custom image]. 4 | 5 | == Prerequisites 6 | 7 | Before provisioning an FCOS machine, you must have an Ignition configuration file containing your customizations. If you do not have one, see xref:producing-ign.adoc[Producing an Ignition File]. 8 | 9 | NOTE: Fedora CoreOS has a default `core` user that can be used to explore the OS. If you want to use it, finalize its xref:authentication.adoc[configuration] by providing e.g. an SSH key. 10 | 11 | If you do not want to use Ignition to get started, you can make use of the https://coreos.github.io/afterburn/platforms/[Afterburn support]. 12 | 13 | You also need to have access to a Vultr account. The examples below use the https://github.com/vultr/vultr-cli[vultr-cli] and https://s3tools.org/s3cmd[s3cmd] command-line tools. Both of these tools are available in Fedora and can be installed via `sudo dnf install vultr-cli s3cmd`. 14 | 15 | == Using a custom snapshot 16 | 17 | Vultr supports creating custom snapshots from public raw images. 18 | 19 | These steps show how to download a FCOS image and upload it to an existing storage bucket, in order to create a snapshot from that. 20 | 21 | See https://www.vultr.com/docs/vultr-object-storage[Vultr documentation] for further details on how to create a bucket and configure `s3cmd` to use it. 22 | 23 | === Creating a snapshot 24 | 25 | Fedora CoreOS comes in three streams, with different update schedules per stream. These steps show the `stable` stream as an example, but can be used for other streams too. 26 | 27 | . Fetch the latest image suitable for your target stream (or https://fedoraproject.org/coreos/download/[download and verify] it from the web). 28 | + 29 | [source, bash] 30 | ---- 31 | STREAM='stable' 32 | coreos-installer download -s $STREAM -p vultr -f raw.xz --decompress 33 | ---- 34 | 35 | . https://www.vultr.com/docs/how-to-use-s3cmd-with-vultr-object-storage[Use s3cmd to upload] the raw image to your bucket, and note its public URL. 36 | + 37 | [source, bash] 38 | ---- 39 | BUCKET='my-bucket' 40 | FCOS_VERSION='...' 41 | s3cmd put --acl-public "fedora-coreos-${FCOS_VERSION}-vultr.x86_64.raw" "s3://${BUCKET}/" 42 | ---- 43 | 44 | . Create the snapshot from your object URL, and note its ID. 45 | + 46 | [source, bash] 47 | ---- 48 | IMAGE_URL='https://...' 49 | VULTR_API_KEY='' 50 | vultr-cli snapshot create-url -u "${IMAGE_URL}" 51 | ---- 52 | 53 | NOTE: You'll need to wait for the snapshot to finish processing before using it. Monitor with `*vultr-cli snapshot list*`. 54 | 55 | === Launching an instance from a snapshot 56 | 57 | You can now create a FCOS Vultr instance using the snapshot ID above. 58 | 59 | This example creates a 2 vCPU, 4GB RAM instance named `instance1` in the New Jersey region. Use `vultr-cli regions list` and `vultr-cli plans list` for other options. 60 | 61 | [source, bash] 62 | ---- 63 | NAME='instance1' 64 | SNAPSHOT_ID='...' 65 | REGION='ewr' 66 | PLAN='vc2-2c-4gb' 67 | vultr-cli instance create --region "${REGION}" --plan "${PLAN}" \ 68 | --snapshot "${SNAPSHOT_ID}" --label "${NAME}" --host "${NAME}" \ 69 | --userdata "$(cat example.ign)" 70 | ---- 71 | 72 | NOTE: While the Vultr documentation mentions `cloud-init` and scripts, FCOS does not support `cloud-init` or the ability to run scripts from user-data. It accepts only Ignition configuration files. 73 | 74 | TIP: You can find out the instance's assigned IP by running `vultr-cli instance list`. 75 | 76 | You now should be able to SSH into the instance using the associated IP address. 77 | 78 | .Example connecting 79 | [source, bash] 80 | ---- 81 | ssh core@ 82 | ---- 83 | -------------------------------------------------------------------------------- /modules/ROOT/pages/proxy.adoc: -------------------------------------------------------------------------------- 1 | = Proxied Internet Access 2 | 3 | If you are deploying to an environment requiring internet access via a proxy, you will want to configure services so that they can access resources as intended. 4 | 5 | This is best done by defining a single file with required environment variables in your Butane configuration, and to reference this via systemd drop-in unit files for all such services. 6 | 7 | == Defining common proxy environment variables 8 | 9 | This common file has to be subsequently referenced explicitly by each service that requires internet access. 10 | 11 | [source,yaml,subs="attributes"] 12 | ---- 13 | variant: fcos 14 | version: {butane-latest-stable-spec} 15 | storage: 16 | files: 17 | - path: /etc/example-proxy.env 18 | mode: 0644 19 | contents: 20 | inline: | 21 | https_proxy="http://example.com:8080" 22 | all_proxy="http://example.com:8080" 23 | http_proxy="http://example.com:8080" 24 | HTTP_PROXY="http://example.com:8080" 25 | HTTPS_PROXY="http://example.com:8080" 26 | no_proxy="*.example.com,127.0.0.1,0.0.0.0,localhost" 27 | ---- 28 | 29 | == Defining drop-in units for core services 30 | 31 | https://github.com/coreos/zincati[Zincati] polls for OS updates, and https://github.com/coreos/rpm-ostree[rpm-ostree] is used to apply OS and layered package updates both therefore requiring internet access. The optional anonymized https://docs.fedoraproject.org/en-US/fedora-coreos/counting/[countme] service also requires access if enabled. 32 | 33 | TIP: You may be able to use local file references to systemd units instead of inlining them. See xref:tutorial-services.adoc#_using_butanes__files_dir_parameter_to_embed_files[Using butane's `--files-dir` Parameter to Embed Files] for more information. 34 | 35 | [source,yaml,subs="attributes"] 36 | ---- 37 | variant: fcos 38 | version: {butane-latest-stable-spec} 39 | systemd: 40 | units: 41 | - name: rpm-ostreed.service 42 | dropins: 43 | - name: 99-proxy.conf 44 | contents: | 45 | [Service] 46 | EnvironmentFile=/etc/example-proxy.env 47 | - name: zincati.service 48 | dropins: 49 | - name: 99-proxy.conf 50 | contents: | 51 | [Service] 52 | EnvironmentFile=/etc/example-proxy.env 53 | - name: rpm-ostree-countme.service 54 | dropins: 55 | - name: 99-proxy.conf 56 | contents: | 57 | [Service] 58 | EnvironmentFile=/etc/example-proxy.env 59 | ---- 60 | 61 | == Defining drop-in units for container daemons 62 | 63 | If using docker then the `docker.service` drop-in is sufficient. If running Kubernetes with containerd (and no docker) then the `containerd.service` drop-in may be necessary. 64 | 65 | [source,yaml,subs="attributes"] 66 | ---- 67 | variant: fcos 68 | version: {butane-latest-stable-spec} 69 | systemd: 70 | units: 71 | - name: docker.service 72 | enabled: true 73 | dropins: 74 | - name: 99-proxy.conf 75 | contents: | 76 | [Service] 77 | EnvironmentFile=/etc/example-proxy.env 78 | - name: containerd.service 79 | enabled: true 80 | dropins: 81 | - name: 99-proxy.conf 82 | contents: | 83 | [Service] 84 | EnvironmentFile=/etc/example-proxy.env 85 | ---- 86 | 87 | == Defining proxy use for podman systemd units 88 | 89 | Podman has no daemon and so configuration is for each individual service scheduled, and can be done as part of the full systemd unit definition. 90 | 91 | [source,yaml,subs="attributes"] 92 | ---- 93 | variant: fcos 94 | version: {butane-latest-stable-spec} 95 | systemd: 96 | units: 97 | - name: example-svc.service 98 | enabled: true 99 | contents: | 100 | [Unit] 101 | After=network-online.target 102 | Wants=network-online.target 103 | 104 | [Service] 105 | EnvironmentFile=/etc/example-proxy.env 106 | ExecStartPre=-/bin/podman kill example-svc 107 | ExecStartPre=-/bin/podman rm example-svc 108 | ExecStartPre=-/bin/podman pull example-image:latest 109 | ExecStart=/bin/podman run --name example-svc example-image:latest 110 | ExecStop=/bin/podman stop example-svc 111 | 112 | [Install] 113 | WantedBy=multi-user.target 114 | ---- 115 | -------------------------------------------------------------------------------- /modules/ROOT/pages/remote-ign.adoc: -------------------------------------------------------------------------------- 1 | = Using a remote Ignition config 2 | 3 | With Ignition, you are not limited to the configuration provided locally to a system and can retrieve other Ignition configs from a remote source. Those configs will then either replace or be merged into the existing config. 4 | 5 | The complete list of supported protocols and related options for remote Ignition files is described in the https://coreos.github.io/ignition/specs/[Ignition specification]. 6 | 7 | The following examples show how to retrieve an Ignition file from a remote source. They are both set to replace the current configuration with a remote Ignition file. 8 | 9 | .Retrieving a remote Ignition file via HTTPS 10 | [source,yaml,subs="attributes"] 11 | ---- 12 | variant: fcos 13 | version: {butane-latest-stable-spec} 14 | ignition: 15 | config: 16 | replace: 17 | source: https://example.com/sample.ign 18 | ---- 19 | 20 | .Retrieving a remote Ignition file via HTTPS with a custom certificate authority 21 | [source,yaml,subs="attributes"] 22 | ---- 23 | variant: fcos 24 | version: {butane-latest-stable-spec} 25 | ignition: 26 | config: 27 | replace: 28 | source: https://example.com/sample.ign 29 | security: 30 | tls: 31 | certificate_authorities: 32 | - source: https://example.com/source1 33 | ---- 34 | 35 | NOTE: The certificate authorities listed here are not automatically added to the host filesystem. They are solely used by Ignition itself when fetching over `https`. If you'd like to also install them on the host filesystem, include them as usual under the `storage.files` array. 36 | 37 | In some cases, if you need to merge a local configuration and one or several remote ones, you can use the `merge` rather than `replace` in a Butane config. 38 | 39 | .Retrieving a remote Ignition file via HTTPS and merging it with the current config 40 | [source,yaml,subs="attributes"] 41 | ---- 42 | variant: fcos 43 | version: {butane-latest-stable-spec} 44 | ignition: 45 | config: 46 | merge: 47 | - source: https://example.com/sample.ign 48 | passwd: 49 | users: 50 | - name: core 51 | ssh_authorized_keys: 52 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHn2eh... 53 | ---- 54 | 55 | Retrieving remote Ignition files via plain HTTP is also possible as shown below. 56 | 57 | WARNING: Retrieving a remote Ignition config via HTTP exposes the contents of the config to anyone monitoring network traffic. When using HTTP, it is advisable to use the verification option to ensure the contents haven't been tampered with. 58 | 59 | .Retrieving a remote Ignition file via HTTP 60 | [source,yaml,subs="attributes"] 61 | ---- 62 | variant: fcos 63 | version: {butane-latest-stable-spec} 64 | ignition: 65 | config: 66 | replace: 67 | source: http://example.com/sample.ign 68 | verification: 69 | hash: sha512-e2bb19fdbc3604f511b13d66f4c675f011a63dd967b97e2fe4f5d50bf6cb224e902182221ba0f9dd87c0bb4abcbd2ab428eb7965aa7f177eb5630e7a1793e2e6 70 | ---- 71 | 72 | If you need to retrieve a remote Ignition file but have no direct access to the remote host, you can specify a proxy for plain HTTP and/or HTTPS. You can also specify hosts that should be excluded from proxying. 73 | 74 | .Retrieving a remote Ignition file via a proxy 75 | [source,yaml,subs="attributes"] 76 | ---- 77 | variant: fcos 78 | version: {butane-latest-stable-spec} 79 | ignition: 80 | config: 81 | merge: 82 | - source: https://example.com/sample.ign 83 | - source: https://example.org/example.ign 84 | proxy: 85 | https_proxy: https://example.net 86 | no_proxy: 87 | - example.org 88 | ---- 89 | -------------------------------------------------------------------------------- /modules/ROOT/pages/running-containers.adoc: -------------------------------------------------------------------------------- 1 | = Running Containers 2 | 3 | == Introduction 4 | Fedora CoreOS ships with both the `docker` CLI tool (as provided via https://mobyproject.org/[Moby]) and https://podman.io[podman] installed. This page explains how to use systemd units to start and stop containers with podman. 5 | 6 | == Example configuration 7 | The following Butane config snippet configures the systemd `hello.service` to run https://www.busybox.net[busybox]. 8 | 9 | TIP: You may be able to use local file references to systemd units instead of inlining them. See xref:tutorial-services.adoc#_using_butanes__files_dir_parameter_to_embed_files[Using butane's `--files-dir` Parameter to Embed Files] for more information. 10 | 11 | .Example for running busybox using systemd and podman 12 | [source,yaml,subs="attributes"] 13 | ---- 14 | variant: fcos 15 | version: {butane-latest-stable-spec} 16 | systemd: 17 | units: 18 | - name: hello.service 19 | enabled: true 20 | contents: | 21 | [Unit] 22 | Description=MyApp 23 | After=network-online.target 24 | Wants=network-online.target 25 | 26 | [Service] 27 | TimeoutStartSec=0 28 | ExecStartPre=-/bin/podman kill busybox1 29 | ExecStartPre=-/bin/podman rm busybox1 30 | ExecStartPre=/bin/podman pull busybox 31 | ExecStart=/bin/podman run --name busybox1 busybox /bin/sh -c "trap 'exit 0' INT TERM; while true; do echo Hello World; sleep 1; done" 32 | 33 | [Install] 34 | WantedBy=multi-user.target 35 | ---- 36 | 37 | .Example for running busybox using Podman Quadlet 38 | 39 | https://docs.podman.io/en/latest/markdown/podman-systemd.unit.5.html[Podman Quadlet] is functionality included in podman that allows starting containers via systemd using a systemd generator. The example below is the same `hello.service` that was previously shown but deployed via the Podman Quadlet functionality. 40 | 41 | [source,yaml,subs="attributes"] 42 | ---- 43 | variant: fcos 44 | version: {butane-latest-stable-spec} 45 | storage: 46 | files: 47 | - path: /etc/containers/systemd/hello.container 48 | contents: 49 | inline: | 50 | [Unit] 51 | Description=Hello Service 52 | Wants=network-online.target 53 | After=network-online.target 54 | 55 | [Container] 56 | ContainerName=busybox1 57 | Image=docker.io/busybox 58 | Exec=/bin/sh -c "trap 'exit 0' INT TERM; while true; do echo Hello World; sleep 1; done" 59 | 60 | [Install] 61 | WantedBy=multi-user.target 62 | ---- 63 | 64 | === Running etcd 65 | 66 | https://etcd.io[etcd] is not shipped as part of Fedora CoreOS. To use it, run it as a container, as shown below. 67 | 68 | .Butane config for setting up single node etcd 69 | [source,yaml,subs="attributes"] 70 | ---- 71 | variant: fcos 72 | version: {butane-latest-stable-spec} 73 | systemd: 74 | units: 75 | - name: etcd-member.service 76 | enabled: true 77 | contents: | 78 | [Unit] 79 | Description=Run single node etcd 80 | After=network-online.target 81 | Wants=network-online.target 82 | 83 | [Service] 84 | ExecStartPre=mkdir -p /var/lib/etcd 85 | ExecStartPre=-/bin/podman kill etcd 86 | ExecStartPre=-/bin/podman rm etcd 87 | ExecStartPre=-/bin/podman pull quay.io/coreos/etcd 88 | ExecStart=/bin/podman run --name etcd --volume /var/lib/etcd:/etcd-data:z --net=host quay.io/coreos/etcd:latest /usr/local/bin/etcd --data-dir /etcd-data --name node1 \ 89 | --initial-advertise-peer-urls http://127.0.0.1:2380 --listen-peer-urls http://127.0.0.1:2380 \ 90 | --advertise-client-urls http://127.0.0.1:2379 \ 91 | --listen-client-urls http://127.0.0.1:2379 \ 92 | --initial-cluster node1=http://127.0.0.1:2380 93 | 94 | ExecStop=/bin/podman stop etcd 95 | 96 | [Install] 97 | WantedBy=multi-user.target 98 | ---- 99 | 100 | === For more information 101 | See the https://etcd.io/docs/latest/op-guide/container/#docker[etcd documentation] for more information on running etcd in containers and how to set up multi-node etcd. 102 | -------------------------------------------------------------------------------- /modules/ROOT/pages/stream-metadata.adoc: -------------------------------------------------------------------------------- 1 | = Stream metadata 2 | 3 | Metadata about Fedora CoreOS is available in a custom JSON format, called "stream metadata". For maintaining automation, it is expected that you will interact with this stream metadata. 4 | 5 | The format is stable, and intended to be relatively self-documenting. There is not yet a JSON schema. 6 | However, in most web browsers, navigating to the URL will render the JSON in an easy-to-read form. 7 | 8 | == Canonical URL 9 | 10 | The URL for the `stable` stream is: https://builds.coreos.fedoraproject.org/streams/stable.json 11 | You can similarly replace `stable` here with other available xref:update-streams.adoc[Update Streams]. 12 | 13 | == Using coreos-installer to download 14 | 15 | The `coreos-installer` tool has built-in support for fetching artifacts: 16 | 17 | [source, bash] 18 | ---- 19 | STREAM="stable" 20 | coreos-installer download --decompress -s $STREAM -p openstack -f qcow2.xz 21 | ---- 22 | 23 | == Using coreos/stream-metadata-go 24 | 25 | There is an official https://github.com/coreos/stream-metadata-go[coreos/stream-metadata-go] library for 26 | software written in the Go programming language. The `README.md` file in that repository contains a link to example code. 27 | 28 | == Example: Script ec2 CLI 29 | 30 | Fetch the latest `x86_64` AMI in `us-west-1` and use it to launch an instance: 31 | 32 | [source, bash] 33 | ---- 34 | $ AMI=$(curl -sSL https://builds.coreos.fedoraproject.org/streams/stable.json | jq -r '.architectures.x86_64.images.aws.regions["us-west-1"].image') 35 | $ echo "${AMI}" 36 | ami-021238084bf8c95ff 37 | $ aws ec2 run-instances --region us-west-1 --image-id "${AMI}" ... 38 | ---- 39 | -------------------------------------------------------------------------------- /modules/ROOT/pages/sysconfig-configure-swaponzram.adoc: -------------------------------------------------------------------------------- 1 | = Configuring Swap on ZRAM 2 | 3 | In Fedora 33 some editions https://www.fedoraproject.org/wiki/Releases/33/ChangeSet#swap_on_zram[enabled swap on ZRAM by default]. Fedora CoreOS currently has the `zram-generator` included but no configuration in place to enable swap on ZRAM by default. In order to configure swap on ZRAM you can lay down a configuration file via Ignition that will tell the zram generator to set up swap on top of a zram device. 4 | 5 | The documentation for the config file format lives in the https://github.com/systemd/zram-generator/blob/main/man/zram-generator.conf.md[upstream documentation] along with a comprehensive https://github.com/systemd/zram-generator/blob/main/zram-generator.conf.example[example]. The most basic form of a configuration file that will set up a `zram0` device for swap is: 6 | 7 | [source,yaml,subs="attributes"] 8 | ---- 9 | variant: fcos 10 | version: {butane-latest-stable-spec} 11 | storage: 12 | files: 13 | - path: /etc/systemd/zram-generator.conf 14 | mode: 0644 15 | contents: 16 | inline: | 17 | # This config file enables a /dev/zram0 device with the default settings 18 | [zram0] 19 | ---- 20 | 21 | Once booted, you can verify the swap device is set up by viewing the `swapon --show` output. You can also view the true compression ratio of the currently configured zram devices by running `zramctl`. 22 | -------------------------------------------------------------------------------- /modules/ROOT/pages/sysconfig-enabling-wifi.adoc: -------------------------------------------------------------------------------- 1 | = Enabling Wi-Fi 2 | 3 | The primary use for Fedora CoreOS has been driving server hardware in individual datacenters or cloud environments, which have high speed wired networking without the need for Wi-Fi enablement. Since there are many different types of wireless cards, link:https://github.com/coreos/fedora-coreos-tracker/issues/862[adding Wi-Fi enablement to Fedora CoreOS by default] would require many large firmware binaries to be installed for a non-standard use, which isn't ideal. 4 | 5 | On the other hand, Fedora CoreOS is versatile enough to run on smaller devices in IoT applications or in home labs where Wi-Fi may be required. In these cases it is easy enough to add a layer with the needed tools and firmware. 6 | 7 | == Adding Wi-Fi tools and firmware 8 | 9 | Typically enabling Wi-Fi on Fedora CoreOS involves adding the `NetworkManager-wifi` package along with the firmware package that corresponds to the wireless card in your system. Here is a list of some of the wireless firmware packages in Fedora: 10 | 11 | .Wi-Fi firmware packages in Fedora 12 | [source, text] 13 | ---- 14 | atheros-firmware - Firmware for Qualcomm Atheros WiFi/Bluetooth adapters 15 | b43-fwcutter - Firmware extraction tool for Broadcom wireless driver 16 | b43-openfwwf - Open firmware for some Broadcom 43xx series WLAN chips 17 | brcmfmac-firmware - Firmware for Broadcom/Cypress brcmfmac WiFi/Bluetooth adapters 18 | iwlegacy-firmware - Firmware for Intel(R) Wireless WiFi Link 3945(A)BG and 4965AGN adapters 19 | iwlwifi-dvm-firmware - DVM Firmware for Intel(R) Wireless WiFi adapters 20 | iwlwifi-mvm-firmware - MVM Firmware for Intel(R) Wireless WiFi adapters 21 | libertas-firmware - Firmware for Marvell Libertas SD/USB WiFi Network Adapters 22 | mt7xxx-firmware - Firmware for Mediatek 7600/7900 series WiFi/Bluetooth adapters 23 | nxpwireless-firmware - Firmware for NXP WiFi/Bluetooth/UWB adapters 24 | realtek-firmware - Firmware for Realtek WiFi/Bluetooth adapters 25 | tiwilink-firmware - Firmware for Texas Instruments WiFi/Bluetooth adapters 26 | atmel-firmware - Firmware for Atmel at76c50x wireless network chips 27 | zd1211-firmware - Firmware for wireless devices based on zd1211 chipset 28 | ---- 29 | 30 | For example, if a system has a Qualcomm wireless card then adding the `NetworkManager-wifi` and `atheros-firmware` packages would sufficiently enable the system for connecting to Wi-Fi. You can try to inspect your wireless card to determine what driver you need by running `lspci` (provided by the `pciutils` package) xref:debugging-with-toolbox.adoc[inside a Toolbx container]. 31 | 32 | 33 | == When installing Fedora CoreOS 34 | 35 | For new systems the packages can be added using the xref:os-extensions.adoc[Adding OS Extensions] workflow. A NetworkManager configuration for the Wi-Fi connection will also need to be added so the system knows which wireless network to connect to. For more information on network configuration in Fedora CoreOS see xref:sysconfig-network-configuration.adoc[Network Configuration]. 36 | 37 | An example Butane config that combines the extension and network configuration is shown below. 38 | 39 | .Butane config for Wi-Fi enablement 40 | [source,yaml,subs="attributes"] 41 | ---- 42 | variant: fcos 43 | version: {butane-latest-stable-spec} 44 | systemd: 45 | units: 46 | # Enable Wi-Fi in NetworkManager for an Intel wireless card 47 | - name: rpm-ostree-install-wifi.service 48 | enabled: true 49 | contents: | 50 | [Unit] 51 | Description=Enable Wi-Fi 52 | Wants=network-online.target 53 | After=network-online.target 54 | Before=zincati.service 55 | ConditionPathExists=!/var/lib/%N.stamp 56 | [Service] 57 | Type=oneshot 58 | RemainAfterExit=yes 59 | ExecStart=/usr/bin/rpm-ostree install -y --allow-inactive NetworkManager-wifi iwlwifi-dvm-firmware 60 | ExecStart=/bin/touch /var/lib/%N.stamp 61 | ExecStart=/bin/systemctl --no-block reboot 62 | [Install] 63 | WantedBy=multi-user.target 64 | storage: 65 | files: 66 | - path: /etc/NetworkManager/system-connections/wifi-guest.nmconnection 67 | mode: 0600 68 | contents: 69 | inline: | 70 | [connection] 71 | id=wifi-guest 72 | type=wifi 73 | autoconnect=true 74 | [wifi] 75 | cloned-mac-address=permanent 76 | mode=infrastructure 77 | ssid=guest 78 | mac-address=ab:cd:01:02:03:04 79 | [wifi-security] 80 | auth-alg=open 81 | key-mgmt=wpa-psk 82 | psk=PASSWORD 83 | [ipv4] 84 | method=auto 85 | ---- 86 | 87 | NOTE: When installing a system and adding Wi-Fi enablement in this way the system will need to be on a wired network for the initial install since it will need to use the network to retrieve the Wi-Fi enabling packages. 88 | 89 | 90 | == On an existing Fedora CoreOS system 91 | 92 | If you have a system up already and want to add Wi-Fi capabilities (i.e. if you want to move it to a location without wired access) you can request the required packages. 93 | 94 | .Request NetworkManager-wifi and a specific Wi-Fi firmware 95 | [source, text] 96 | ---- 97 | $ sudo rpm-ostree install -y --allow-inactive \ 98 | NetworkManager-wifi iwlwifi-dvm-firmware 99 | ---- 100 | 101 | If you don't know what firmware to request you can request all the wireless firmware available in Fedora. Please note this approach is sub-optimal as it will add many unneeded packages on your system. 102 | 103 | .Request NetworkManager-wifi and all available Wi-Fi firmware 104 | ---- 105 | $ sudo rpm-ostree install -y --allow-inactive \ 106 | NetworkManager-wifi \ 107 | atheros-firmware \ 108 | b43-fwcutter \ 109 | b43-openfwwf \ 110 | brcmfmac-firmware \ 111 | iwlegacy-firmware \ 112 | iwlwifi-dvm-firmware \ 113 | iwlwifi-mvm-firmware \ 114 | libertas-firmware \ 115 | mt7xxx-firmware \ 116 | nxpwireless-firmware \ 117 | realtek-firmware \ 118 | tiwilink-firmware \ 119 | atmel-firmware \ 120 | zd1211-firmware 121 | ---- 122 | 123 | Then reboot the system. 124 | -------------------------------------------------------------------------------- /modules/ROOT/pages/sysconfig-setting-keymap.adoc: -------------------------------------------------------------------------------- 1 | = Setting Keyboard Layout 2 | 3 | To set your system keyboard layout (keymap), use the following Butane config to write to `/etc/vconsole.conf`: 4 | 5 | [source,yaml,subs="attributes"] 6 | ---- 7 | variant: fcos 8 | version: {butane-latest-stable-spec} 9 | storage: 10 | files: 11 | - path: /etc/vconsole.conf 12 | mode: 0644 13 | contents: 14 | inline: KEYMAP=de 15 | ---- 16 | 17 | Once booted, you can also verify that the desired keymap has been set using `localectl`. 18 | -------------------------------------------------------------------------------- /modules/ROOT/pages/sysctl.adoc: -------------------------------------------------------------------------------- 1 | = Kernel tunables (sysctl) 2 | 3 | The Linux kernel offers a plethora of knobs under `/proc/sys` to control the availability of different features and tune performance parameters. 4 | 5 | Values under `/proc/sys` can be changed directly at runtime, but such changes will not be persisted across reboots. 6 | Persistent settings should be written under `/etc/sysctl.d/` during provisioning, in order to be applied on each boot. 7 | 8 | As an example, the xref:producing-ign.adoc[Butane] snippet below shows how to disable _SysRq_ keys: 9 | 10 | .Example: configuring kernel tunable to disable SysRq keys 11 | [source,yaml,subs="attributes"] 12 | ---- 13 | variant: fcos 14 | version: {butane-latest-stable-spec} 15 | storage: 16 | files: 17 | - path: /etc/sysctl.d/90-sysrq.conf 18 | contents: 19 | inline: | 20 | kernel.sysrq = 0 21 | ---- 22 | 23 | Further details can be found in the systemd man pages https://www.freedesktop.org/software/systemd/man/sysctl.d.html[sysctl.d(5)] and https://www.freedesktop.org/software/systemd/man/systemd-sysctl.service.html[systemd-sysctl.service(8)]. 24 | -------------------------------------------------------------------------------- /modules/ROOT/pages/time-zone.adoc: -------------------------------------------------------------------------------- 1 | = Configuring time zone 2 | 3 | By default, Fedora CoreOS machines keep time in the Coordinated Universal Time (UTC) zone and synchronize their clocks with the Network Time Protocol (NTP). This page contains information about customizing the time zone. 4 | 5 | == Viewing and changing time zone 6 | 7 | The `timedatectl` command displays and sets the date, time, and time zone. 8 | 9 | [source,bash] 10 | ---- 11 | $ timedatectl status 12 | Local time: Mon 2021-05-17 20:10:20 UTC 13 | Universal time: Mon 2021-05-17 20:10:20 UTC 14 | RTC time: Mon 2021-05-17 20:10:20 15 | Time zone: UTC (UTC, +0000) 16 | System clock synchronized: yes 17 | NTP service: active 18 | RTC in local TZ: no 19 | ---- 20 | 21 | You can use the `list-timezones` subcommand to list the available time zones. Available time zones are represented by https://man7.org/linux/man-pages/man5/tzfile.5.html[`tzfile`] entries in the system's time zone database under `/usr/share/zoneinfo`. 22 | 23 | [source,bash] 24 | ---- 25 | $ timedatectl list-timezones 26 | Africa/Abidjan 27 | Africa/Accra 28 | Africa/Addis_Ababa 29 | … 30 | ---- 31 | 32 | See the https://www.freedesktop.org/software/systemd/man/timedatectl.html[manual page] for more information about how `timedatectl` can be used; however, we do not recommend changing the time zone per-machine imperatively via SSH. 33 | 34 | === Recommended time zone: Coordinated Universal Time (UTC) 35 | 36 | We recommend that all machines in Fedora CoreOS clusters use the default UTC time zone. It is strongly discouraged to set a non-UTC time zone for reasons including, but not limited to, time zone confusions, complexities of adjusting clocks for daylight savings time depending on regional customs, difficulty in correlating log files across systems, possibility of a stale time zone database, and unpredictability, as local time zones are subject to arbitrary local policies and laws. 37 | 38 | If your applications require a different time zone, in most cases, it is possible to set a different time zone than the system one for individual applications by setting the `TZ` environment variable. 39 | 40 | === Setting the time zone via Ignition 41 | 42 | If you are aware of the downsides to setting a system time zone that is different from the default UTC time zone, you can set a different system time zone by setting the local time zone configuration file, https://www.freedesktop.org/software/systemd/man/localtime.html[`/etc/localtime`], to be an absolute or relative symlink to a `tzfile` entry under `/usr/share/zoneinfo/`. 43 | It is recommended that you set the same time zone across all your machines in the cluster. 44 | 45 | For example, you can set the time zone to `America/New_York` by using a Butane config like the following: 46 | 47 | [source,yaml,subs="attributes"] 48 | ---- 49 | variant: fcos 50 | version: {butane-latest-stable-spec} 51 | storage: 52 | links: 53 | - path: /etc/localtime 54 | target: ../usr/share/zoneinfo/America/New_York 55 | ---- 56 | 57 | == Time synchronization 58 | 59 | Fedora CoreOS uses the https://chrony.tuxfamily.org/[`chrony`] implementation of NTP, with some additional custom logic for specific clouds. For details, see the https://github.com/coreos/fedora-coreos-tracker/blob/main/internals/README-internals.md#time-synchronization[Fedora CoreOS internals documentation]. 60 | -------------------------------------------------------------------------------- /modules/ROOT/pages/tutorial-conclusion.adoc: -------------------------------------------------------------------------------- 1 | = Conclusion 2 | 3 | In these tutorials we have learned a little bit about Fedora CoreOS. We have learned how it is delivered as a pre-created disk image, how it is provisioned in an automated fashion via Ignition, and also how automated updates are configured and achieved via Zincati and rpm-ostree. The next step is to try out Fedora CoreOS for your own use cases and https://github.com/coreos/fedora-coreos-tracker/blob/main/README.md#communication-channels-for-fedora-coreos[join the community]! 4 | -------------------------------------------------------------------------------- /modules/ROOT/pages/tutorial-services.adoc: -------------------------------------------------------------------------------- 1 | = Starting a script on first boot via a systemd service 2 | 3 | NOTE: Make sure that you have completed the steps described in the xref:tutorial-setup.adoc[initial setup page] before starting this tutorial. 4 | 5 | In this tutorial, we will run a script on the first boot via a systemd service. We will add the following to the Butane config from the previous scenario: 6 | 7 | * Add a script at `/usr/local/bin/public-ipv4.sh`. 8 | * Configure a systemd service to run the script on first boot. 9 | 10 | == Writing the script 11 | 12 | Let's write a small script that uses https://icanhazip.com/[icanhazip.com] to create an issue file to display as a prelogin message on the console and store it in `public-ipv4.sh`. 13 | 14 | NOTE: This is only an example to show how to run a service on boot. Do not use this if you don't trust the owners of https://icanhazip.com/[icanhazip.com]. 15 | 16 | [source,bash] 17 | ---- 18 | cat <<'EOF' > public-ipv4.sh 19 | #!/bin/bash 20 | echo "Detected Public IPv4: is $(curl https://ipv4.icanhazip.com)" > \ 21 | /etc/issue.d/50_public-ipv4.issue 22 | EOF 23 | ---- 24 | 25 | This could be useful in cloud environments where you might have different public and private addresses. 26 | 27 | We will store this script into `/usr/local/bin/public-ipv4.sh` when we provision the machine. 28 | 29 | == Writing the systemd service 30 | 31 | We need to call the script from the previous section by using a systemd unit. Let's write a systemd unit into the `issuegen-public-ipv4.service` file that does what we want, which is to execute on first boot and not again: 32 | 33 | [source,bash] 34 | ---- 35 | cat <<'EOF' > issuegen-public-ipv4.service 36 | [Unit] 37 | Before=systemd-user-sessions.service 38 | Wants=network-online.target 39 | After=network-online.target 40 | ConditionPathExists=!/var/lib/issuegen-public-ipv4 41 | 42 | [Service] 43 | Type=oneshot 44 | ExecStart=/usr/local/bin/public-ipv4.sh 45 | ExecStartPost=/usr/bin/touch /var/lib/issuegen-public-ipv4 46 | RemainAfterExit=yes 47 | 48 | [Install] 49 | WantedBy=multi-user.target 50 | EOF 51 | ---- 52 | 53 | == Writing the Butane config and converting to Ignition 54 | 55 | We can now create a Butane config that will include the script and systemd unit file contents by picking up the local `public-ipv4.sh` and `issuegen-public-ipv4.service` files using local file references. The final Butane config, stored in `services.bu`, will be: 56 | 57 | [source,yaml,subs="attributes"] 58 | ---- 59 | variant: fcos 60 | version: {butane-latest-stable-spec} 61 | systemd: 62 | units: 63 | - name: serial-getty@ttyS0.service 64 | dropins: 65 | - name: autologin-core.conf 66 | contents: | 67 | [Service] 68 | # Override Execstart in main unit 69 | ExecStart= 70 | # Add new Execstart with `-` prefix to ignore failure` 71 | ExecStart=-/usr/sbin/agetty --autologin core --noclear %I $TERM 72 | - name: issuegen-public-ipv4.service 73 | enabled: true 74 | contents_local: issuegen-public-ipv4.service 75 | storage: 76 | files: 77 | - path: /etc/hostname 78 | mode: 0644 79 | contents: 80 | inline: | 81 | tutorial 82 | - path: /etc/profile.d/systemd-pager.sh 83 | mode: 0644 84 | contents: 85 | inline: | 86 | # Tell systemd to not use a pager when printing information 87 | export SYSTEMD_PAGER=cat 88 | - path: /usr/local/bin/public-ipv4.sh 89 | mode: 0755 90 | contents: 91 | local: public-ipv4.sh 92 | ---- 93 | 94 | NOTE: Check the Butane https://coreos.github.io/butane/examples/[Examples] and https://coreos.github.io/butane/specs/[Configuration specifications] for more details about local file includes. 95 | 96 | With the files `public-ipv4.sh`, `issuegen-public-ipv4.service`, and `services.bu` in the current working directory we can now convert to Ignition: 97 | 98 | [source,bash] 99 | ---- 100 | butane --pretty --strict --files-dir=./ services.bu --output services.ign 101 | ---- 102 | 103 | == Testing 104 | 105 | Just as before we will use the following to boot the instance: 106 | 107 | [source,bash] 108 | ---- 109 | # Setup the correct SELinux label to allow access to the config 110 | chcon --verbose --type svirt_home_t services.ign 111 | 112 | # Start a Fedora CoreOS virtual machine 113 | virt-install --name=fcos --vcpus=2 --ram=2048 --os-variant=fedora-coreos-stable \ 114 | --import --network=bridge=virbr0 --graphics=none \ 115 | --qemu-commandline="-fw_cfg name=opt/com.coreos/config,file=${PWD}/services.ign" \ 116 | --disk="size=20,backing_store=${PWD}/fedora-coreos.qcow2" 117 | ---- 118 | 119 | And view on the console that the `Detected Public IPv4` is shown in the console output right before you are dropped to a login prompt: 120 | 121 | ---- 122 | Fedora CoreOS 38.20230709.3.0 123 | Kernel 6.3.11-200.fc38.x86_64 on an x86_64 (ttyS0) 124 | 125 | SSH host key: SHA256:tYHKk26+NZ/+ZytWLXClGz813PQJDGP/2+AiuZ8fiqk (ECDSA) 126 | SSH host key: SHA256:jJASZec/91zXd4or0uiFsvsfaLC6viLronfxIwQlNCs (ED25519) 127 | SSH host key: SHA256:2XlSZAehEu666fmXeM8d47lpIJd92MBOqgMazT4GsVw (RSA) 128 | enp1s0: 192.168.124.150 fe80::475a:7a10:2302:b670 129 | Ignition: ran on 2023/08/03 16:40:45 UTC (this boot) 130 | Ignition: user-provided config was applied 131 | No SSH authorized keys provided by Ignition or Afterburn 132 | Detected Public IPv4: is 3.252.102.80 133 | tutorial login: core (automatic login) 134 | 135 | Fedora CoreOS 38.20230709.3.0 136 | [core@tutorial ~]$ 137 | ---- 138 | 139 | And the service shows it was launched successfully: 140 | 141 | ---- 142 | [core@tutorial ~]$ systemctl status --full issuegen-public-ipv4.service 143 | ● issuegen-public-ipv4.service 144 | Loaded: loaded (/etc/systemd/system/issuegen-public-ipv4.service; enabled; preset: enabled) 145 | Drop-In: /usr/lib/systemd/system/service.d 146 | └─10-timeout-abort.conf 147 | Active: active (exited) since Thu 2023-08-03 16:40:55 UTC; 1min 7s ago 148 | Process: 1423 ExecStart=/usr/local/bin/public-ipv4.sh (code=exited, status=0/SUCCESS) 149 | Process: 1460 ExecStartPost=/usr/bin/touch /var/lib/issuegen-public-ipv4 (code=exited, status=0/SUCCESS) 150 | Main PID: 1423 (code=exited, status=0/SUCCESS) 151 | CPU: 84ms 152 | 153 | Aug 03 16:40:55 tutorial systemd[1]: Starting issuegen-public-ipv4.service... 154 | Aug 03 16:40:55 tutorial public-ipv4.sh[1424]: % Total % Received % Xferd Average Speed Time Time Time Current 155 | Aug 03 16:40:55 tutorial public-ipv4.sh[1424]: Dload Upload Total Spent Left Speed 156 | Aug 03 16:40:55 tutorial public-ipv4.sh[1424]: [158B blob data] 157 | Aug 03 16:40:55 tutorial systemd[1]: Finished issuegen-public-ipv4.service. 158 | ---- 159 | 160 | == Cleanup 161 | 162 | Now let's take down the instance for the next test. First, disconnect from the serial console by pressing `CTRL` + `]` and then destroy the machine: 163 | 164 | ---- 165 | virsh destroy fcos 166 | virsh undefine --remove-all-storage fcos 167 | ---- 168 | 169 | You may now proceed with the xref:tutorial-containers.adoc[next tutorial]. 170 | -------------------------------------------------------------------------------- /modules/ROOT/pages/update-barrier-signing-keys.adoc: -------------------------------------------------------------------------------- 1 | = Signing keys and updates 2 | 3 | All binary artifacts, ostree commits, and OS images belonging to Fedora and Fedora CoreOS (FCOS) are signed via GPG. The current set of trusted signing keys is available at https://fedoraproject.org/security/. 4 | 5 | == Keys rotation and update barriers 6 | 7 | At the beginning of every new Fedora major release cycle, a new signing key is generated and its public portion published on the Fedora website. The new key will be later used to sign new artifacts, replacing the currently used one. This is done in order to establish an automatic chain of trust from an older release to a more recent one, which can be possibly signed by a different newer key. 8 | 9 | In order to make automatic updates of Fedora CoreOS work across major Fedora releases, the above set of embedded signing key is refreshed at least once per Fedora release cycle. When that happens, an update barrier is put in place in the FCOS update graph. 10 | 11 | The primary reason for such update barrier is to make sure that older (and possibly stale) instances automatically receive and trust newly generated keys. This is achieved by forcing such machines to progressively catch up on intermediate updates (signed by an already trusted key) before jumping to the latest published release. 12 | 13 | == Example 14 | 15 | Taking the Fedora 32 release cycle as an example, in the beginning FCOS images only know about signing keys for 32 and 33 majors: 16 | 17 | ---- 18 | $ grep OSTREE /etc/os-release 19 | OSTREE_VERSION='32.20200615.3.0' 20 | 21 | $ ls -v /usr/etc/pki/rpm-gpg/*primary | tail -3 22 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-31-primary 23 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-32-primary 24 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-33-primary 25 | ---- 26 | 27 | Later in that release cycle, the signing key is generated for future Fedora 34 releases and added to FCOS. A barrier is put in place in FCOS update graph, for example on release `32.20200907.3.0`. Inspecting that image shows the following: 28 | 29 | ---- 30 | $ grep OSTREE /etc/os-release 31 | OSTREE_VERSION='32.20200907.3.0' 32 | 33 | $ ls -v /usr/etc/pki/rpm-gpg/*primary | tail -3 34 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-32-primary 35 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-33-primary 36 | /usr/etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-34-primary 37 | ---- 38 | 39 | With this barrier in place, older instances must first upgrade to `32.20200907.3.0` in order to make sure they know (and trust) the signing key for Fedora 34, before being able to upgrade to new releases based on that. 40 | -------------------------------------------------------------------------------- /modules/ROOT/pages/update-streams.adoc: -------------------------------------------------------------------------------- 1 | = Update Streams 2 | 3 | == Individual Update Streams 4 | 5 | Fedora CoreOS (FCOS) has several individual update streams that are available to end users. They are: 6 | 7 | * `stable` 8 | 9 | ** The `stable` stream is the most reliable stream offered with changes 10 | only reaching that stream after spending a period of time in the `testing` 11 | stream. 12 | 13 | * `testing` 14 | 15 | ** The `testing` stream represents what is coming in the next `stable` 16 | release. Content in this stream is updated regularly and offers our 17 | community an opportunity to catch breaking changes before they hit 18 | the `stable` stream. 19 | 20 | * `next` 21 | 22 | ** The `next` stream represents the future. It will often be 23 | used to experiment with new features and also test out rebases of our 24 | platform on top of the next major version of Fedora. The content in 25 | the `next` stream will also eventually filter down into `testing` 26 | and on to `stable`. 27 | 28 | When following a stream, a system is updated automatically when a new release is rolled out on that stream. While all streams of FCOS are automatically tested, it is strongly encouraged for users to devote a percentage of their FCOS deployment to running the `testing` and `next` streams. This ensures possible breaking changes can be caught early enough that `stable` deployments experience fewer regressions. 29 | 30 | == Switching to a Different Stream 31 | 32 | In order to switch between the different streams of Fedora CoreOS (FCOS) a user can leverage the `rpm-ostree rebase` command. 33 | 34 | [TIP] 35 | ==== 36 | It may be a good idea to backup data under `/var` before switching streams. 37 | ==== 38 | 39 | [NOTE] 40 | ==== 41 | Software updates generally follow the `next` -> `testing` -> `stable` flow, meaning `next` has the newest software and `stable` has the oldest software. Upstream software components are generally tested for upgrading, not downgrading, which means that upstream software typically can handle a data/configuration migration forward (upgrade), but not backwards (downgrade). For this reason it is typically safer to rebase from `stable` -> `testing` or `testing` -> `next`, but less safe to go the other direction. 42 | ==== 43 | 44 | 45 | [NOTE] 46 | ==== 47 | Switching between streams may introduce regressions or bugs due to skipping update barriers. If you experience a regression please attempt a xref:manual-rollbacks.adoc[rollback]. 48 | ==== 49 | 50 | [source,bash] 51 | ---- 52 | # Stop the service that performs automatic updates 53 | sudo systemctl stop zincati.service 54 | 55 | # Perform the rebase to a different stream 56 | # Supported architectures: aarch64, ppc64le, s390x, x86_64 57 | # Available streams: "stable", "testing", and "next" 58 | STREAM="testing" 59 | sudo rpm-ostree rebase "ostree-remote-registry:fedora:quay.io/fedora/fedora-coreos:${STREAM}" 60 | ---- 61 | 62 | After inspecting the package difference the user can reboot. After boot the system will be loaded into the latest release on the new stream and will follow that stream for future updates. 63 | -------------------------------------------------------------------------------- /nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | location / { 6 | root /antora/public; 7 | index index.html index.htm; 8 | } 9 | 10 | error_page 500 502 503 504 /50x.html; 11 | location = /50x.html { 12 | root /usr/share/nginx/html; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | site: 2 | title: Local Preview 3 | start_page: fedora-coreos::index.adoc 4 | content: 5 | sources: 6 | - url: . 7 | branches: HEAD 8 | ui: 9 | bundle: 10 | url: https://gitlab.com/fedora/docs/docs-website/ui-bundle/-/jobs/artifacts/HEAD/raw/build/ui-bundle.zip?job=bundle-stable 11 | snapshot: true 12 | default_layout: with_menu 13 | output: 14 | clean: true 15 | dir: ./public 16 | runtime: 17 | fetch: true 18 | cache_dir: ./cache 19 | urls: 20 | html_extension_style: indexify 21 | --------------------------------------------------------------------------------