├── .dockerignore ├── .github ├── release-drafter.yml └── workflows │ ├── docker.yaml │ └── release-drafter.yaml ├── .gitignore ├── .golangci.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── cmd └── metal-api │ ├── internal │ ├── datastore │ │ ├── event.go │ │ ├── filesystem.go │ │ ├── health.go │ │ ├── health_integration_test.go │ │ ├── image.go │ │ ├── image_integration_test.go │ │ ├── image_test.go │ │ ├── integer.go │ │ ├── integer_integration_test.go │ │ ├── integer_test.go │ │ ├── ip.go │ │ ├── ip_test.go │ │ ├── machine.go │ │ ├── machine_integration_test.go │ │ ├── machine_test.go │ │ ├── migrate.go │ │ ├── migrate_test.go │ │ ├── migrations │ │ │ ├── 01_remove_wait_table.go │ │ │ ├── 02_ip_uuids.go │ │ │ ├── 03_machine_role.go │ │ │ ├── 04_provisioning_event_validation.go │ │ │ ├── 05_allocation_uuids.go │ │ │ ├── 06_additional_announcable_cidrs.go │ │ │ ├── 07_size_reservations_table.go │ │ │ ├── 08_childprefixlength.go │ │ │ └── doc.go │ │ ├── migrations_integration │ │ │ └── migrate_integration_test.go │ │ ├── network.go │ │ ├── network_integration_test.go │ │ ├── network_test.go │ │ ├── partition.go │ │ ├── partition_test.go │ │ ├── rethinkdb.go │ │ ├── rethinkdb_integration_test.go │ │ ├── rethinkdb_test.go │ │ ├── shared_mutex.go │ │ ├── shared_mutex_test.go │ │ ├── size.go │ │ ├── size_integration_test.go │ │ ├── size_reservation.go │ │ ├── size_reservation_integration_test.go │ │ ├── size_test.go │ │ ├── sizeimageconstraint.go │ │ ├── switch.go │ │ ├── switch_integration_test.go │ │ └── testing.go │ ├── eventbus │ │ ├── nsq.go │ │ ├── nsq_test.go │ │ └── testing.go │ ├── fsm │ │ ├── events.go │ │ ├── events_test.go │ │ ├── fsm.go │ │ ├── fsm_test.go │ │ └── states │ │ │ ├── alive.go │ │ │ ├── booting-new-kernel.go │ │ │ ├── crashed.go │ │ │ ├── initial.go │ │ │ ├── installing.go │ │ │ ├── machine-reclaim.go │ │ │ ├── phoned-home.go │ │ │ ├── planned-reboot.go │ │ │ ├── preparing.go │ │ │ ├── pxe-booting.go │ │ │ ├── registering.go │ │ │ ├── states.go │ │ │ └── waiting.go │ ├── grpc │ │ ├── boot-service-wait.go │ │ ├── boot-service-wait_integration_test.go │ │ ├── boot-service.go │ │ ├── boot-service_test.go │ │ ├── event-service.go │ │ ├── event-service_test.go │ │ └── grpc-server.go │ ├── headscale │ │ ├── auth.go │ │ └── client.go │ ├── ipam │ │ ├── ipam.go │ │ └── testing.go │ ├── issues │ │ ├── asn-uniqueness.go │ │ ├── bmc-info-outdated.go │ │ ├── bmc-without-ip.go │ │ ├── bmc-without-mac.go │ │ ├── crash-loop.go │ │ ├── failed-machine-reclaim.go │ │ ├── issues.go │ │ ├── issues_test.go │ │ ├── last-event-error.go │ │ ├── liveliness-dead.go │ │ ├── liveliness-not-available.go │ │ ├── liveliness-unknown.go │ │ ├── no-event-container.go │ │ ├── no-partition.go │ │ ├── non-distinct-bmc-ip.go │ │ ├── severeties.go │ │ └── types.go │ ├── masterdata │ │ └── masterdata.go │ ├── metal │ │ ├── errors.go │ │ ├── errors_test.go │ │ ├── filesystem.go │ │ ├── filesystem_test.go │ │ ├── firmware.go │ │ ├── image.go │ │ ├── image_test.go │ │ ├── ip.go │ │ ├── ip_test.go │ │ ├── machine.go │ │ ├── machine_test.go │ │ ├── metal.go │ │ ├── network.go │ │ ├── network_test.go │ │ ├── partition.go │ │ ├── partition_test.go │ │ ├── provisioning.go │ │ ├── provisioning_test.go │ │ ├── size.go │ │ ├── size_reservation.go │ │ ├── size_reservation_test.go │ │ ├── size_test.go │ │ ├── sizeimageconstraint.go │ │ ├── sizeimageconstraint_test.go │ │ ├── switch.go │ │ └── switch_test.go │ ├── metrics │ │ └── metrics.go │ ├── service │ │ ├── asn.go │ │ ├── async-actor.go │ │ ├── audit-service.go │ │ ├── common_test.go │ │ ├── filesystem-service.go │ │ ├── firewall-service.go │ │ ├── firmware-service.go │ │ ├── firmware-service_test.go │ │ ├── image-service.go │ │ ├── image-service_integration_test.go │ │ ├── image-service_test.go │ │ ├── integration_test.go │ │ ├── ip-service.go │ │ ├── ip-service_test.go │ │ ├── machine-service.go │ │ ├── machine-service_allocation_test.go │ │ ├── machine-service_integration_test.go │ │ ├── machine-service_test.go │ │ ├── network-service.go │ │ ├── network-service_test.go │ │ ├── partition-service.go │ │ ├── partition-service_test.go │ │ ├── project-service.go │ │ ├── project-service_test.go │ │ ├── s3client │ │ │ └── s3client.go │ │ ├── service.go │ │ ├── service_test.go │ │ ├── size-service.go │ │ ├── size-service_test.go │ │ ├── sizeimageconstraint-service.go │ │ ├── switch-service.go │ │ ├── switch-service_integration_test.go │ │ ├── switch-service_test.go │ │ ├── tenant-service.go │ │ ├── tenant-service_test.go │ │ ├── user-service.go │ │ ├── v1 │ │ │ ├── audit.go │ │ │ ├── common.go │ │ │ ├── filesystem.go │ │ │ ├── firewall.go │ │ │ ├── firmware.go │ │ │ ├── image.go │ │ │ ├── ip.go │ │ │ ├── machine.go │ │ │ ├── network.go │ │ │ ├── partition.go │ │ │ ├── size.go │ │ │ ├── sizeimageconstraint.go │ │ │ ├── switch.go │ │ │ ├── user.go │ │ │ └── vpn.go │ │ ├── vpn-service.go │ │ ├── vpn-service_test.go │ │ └── vrf.go │ ├── tags │ │ ├── tags.go │ │ └── tags_test.go │ ├── testdata │ │ ├── ipam.go │ │ └── testdata.go │ └── tools │ │ └── visualize_fsm │ │ ├── fsm.dot │ │ ├── fsm.svg │ │ └── main.go │ └── main.go ├── go.mod ├── go.sum ├── pkg ├── api │ └── v1 │ │ ├── boot.pb.go │ │ ├── boot_grpc.pb.go │ │ ├── event.pb.go │ │ └── event_grpc.pb.go └── grpc │ └── wait.go ├── proto ├── Makefile ├── api │ └── v1 │ │ ├── boot.proto │ │ └── event.proto ├── buf.gen.yaml └── buf.yaml ├── spec └── metal-api.json └── test ├── integration.go └── rest ├── allocate-network-and-machine.rest ├── firewall.rest ├── images.rest ├── ips.rest ├── machine-groups.rest ├── machines.rest ├── metadata.rest ├── networks.rest ├── partitions.rest ├── projects.rest ├── readme.md ├── sizes.rest └── switch.rest /.dockerignore: -------------------------------------------------------------------------------- 1 | vendor 2 | docker-compose.yml 3 | docker-make.yml -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | 4 | template: | 5 | ## General Changes 6 | 7 | $CHANGES 8 | 9 | categories: 10 | - title: '🚀 Features' 11 | labels: 12 | - 'feature' 13 | - 'enhancement' 14 | - title: '🐛 Bug Fixes' 15 | labels: 16 | - 'fix' 17 | - 'bugfix' 18 | - 'bug' 19 | 20 | version-resolver: 21 | major: 22 | labels: 23 | - 'major' 24 | minor: 25 | labels: 26 | - 'minor' 27 | patch: 28 | labels: 29 | - 'patch' 30 | default: patch 31 | -------------------------------------------------------------------------------- /.github/workflows/docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Docker Build Action 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | release: 8 | types: 9 | - published 10 | push: 11 | branches: 12 | - master 13 | 14 | env: 15 | REGISTRY: ghcr.io 16 | IMAGE_NAME: ${{ github.repository }} 17 | 18 | jobs: 19 | build: 20 | name: Docker Build 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - name: Log in to the container registry 25 | uses: docker/login-action@v3 26 | with: 27 | registry: ${{ env.REGISTRY }} 28 | username: ${{ secrets.DOCKER_REGISTRY_USER }} 29 | password: ${{ secrets.DOCKER_REGISTRY_TOKEN }} 30 | 31 | - name: Checkout 32 | uses: actions/checkout@v4 33 | 34 | - name: Setup Go 35 | uses: actions/setup-go@v5 36 | with: 37 | go-version-file: 'go.mod' 38 | cache: false 39 | 40 | - name: Lint 41 | uses: golangci/golangci-lint-action@v7 42 | with: 43 | args: --build-tags integration -D protogetter --timeout=5m 44 | 45 | - name: Make tag 46 | run: | 47 | [ "${GITHUB_EVENT_NAME}" == 'pull_request' ] && echo "tag=${GITHUB_HEAD_REF##*/}" >> $GITHUB_ENV || true 48 | [ "${GITHUB_EVENT_NAME}" == 'release' ] && echo "tag=${GITHUB_REF##*/}" >> $GITHUB_ENV || true 49 | [ "${GITHUB_EVENT_NAME}" == 'push' ] && echo "tag=latest" >> $GITHUB_ENV || true 50 | 51 | - name: Build 52 | run: | 53 | make release 54 | 55 | - name: Push image 56 | uses: docker/build-push-action@v5 57 | with: 58 | context: . 59 | push: true 60 | tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.tag }} 61 | 62 | integration: 63 | runs-on: ubuntu-latest 64 | steps: 65 | - name: Checkout 66 | uses: actions/checkout@v4 67 | 68 | - name: Setup Go 69 | uses: actions/setup-go@v5 70 | with: 71 | go-version-file: 'go.mod' 72 | 73 | - name: Run integration tests 74 | run: | 75 | make test-integration 76 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Release Drafter Action 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: release-drafter/release-drafter@v6 14 | env: 15 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin 2 | vendor 3 | .idea 4 | .vscode 5 | generate 6 | coverage.out 7 | __debug_bin 8 | .mirrord 9 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | concurrency: 4 4 | linters: 5 | enable: 6 | - asciicheck 7 | - bidichk 8 | - gocheckcompilerdirectives 9 | - testifylint 10 | disable: 11 | - errcheck 12 | - govet 13 | - musttag 14 | - protogetter 15 | - staticcheck 16 | exclusions: 17 | generated: lax 18 | presets: 19 | - comments 20 | - common-false-positives 21 | - legacy 22 | - std-error-handling 23 | paths: 24 | - third_party$ 25 | - builtin$ 26 | - examples$ 27 | formatters: 28 | exclusions: 29 | generated: lax 30 | paths: 31 | - third_party$ 32 | - builtin$ 33 | - examples$ 34 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @metal-stack/metal-api-maintainers -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Please check out the [contributing section](https://docs.metal-stack.io/stable/development/contributing/) in our [docs](https://docs.metal-stack.io/). -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/distroless/static-debian12:nonroot 2 | COPY bin/metal-api /metal-api 3 | CMD ["/metal-api"] 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CGO_ENABLED := 1 2 | 3 | SHA := $(shell git rev-parse --short=8 HEAD) 4 | GITVERSION := $(shell git describe --long --all) 5 | # gnu date format iso-8601 is parsable with Go RFC3339 6 | BUILDDATE := $(shell date --iso-8601=seconds) 7 | VERSION := $(or ${VERSION},$(shell git describe --tags --exact-match 2> /dev/null || git symbolic-ref -q --short HEAD || git rev-parse --short HEAD)) 8 | 9 | MINI_LAB_KUBECONFIG := $(shell pwd)/../mini-lab/.kubeconfig 10 | 11 | LINKMODE := -linkmode external -extldflags '-static -s -w' \ 12 | -X 'github.com/metal-stack/v.Version=$(VERSION)' \ 13 | -X 'github.com/metal-stack/v.Revision=$(GITVERSION)' \ 14 | -X 'github.com/metal-stack/v.GitSHA1=$(SHA)' \ 15 | -X 'github.com/metal-stack/v.BuildDate=$(BUILDDATE)' 16 | 17 | .PHONY: release 18 | release: protoc test build 19 | 20 | .PHONY: build 21 | build: 22 | go build \ 23 | -tags 'osusergo netgo static_build' \ 24 | -ldflags \ 25 | "$(LINKMODE)" \ 26 | -o bin/metal-api \ 27 | github.com/metal-stack/metal-api/cmd/metal-api 28 | 29 | md5sum bin/metal-api > bin/metal-api.md5 30 | 31 | bin/metal-api dump-swagger | jq -r -S 'walk(if type == "array" then sort_by(strings) else . end)' > spec/metal-api.json || { echo "jq >=1.6 required"; exit 1; } 32 | 33 | .PHONY: test 34 | test: test-unit check-diff 35 | 36 | .PHONY: test-unit 37 | test-unit: 38 | go test -race -cover ./... 39 | 40 | .PHONY: test-integration 41 | test-integration: 42 | go test -v -count=1 -tags=integration -timeout 600s -p 1 ./... 43 | 44 | .PHONY: check-diff 45 | check-diff: spec 46 | git diff --exit-code spec pkg 47 | 48 | .PHONY: protoc 49 | protoc: 50 | rm -rf pkg/api/v1 51 | make -C proto protoc 52 | 53 | .PHONY: mini-lab-push 54 | mini-lab-push: 55 | make 56 | docker build -f Dockerfile -t metalstack/metal-api:latest . 57 | kind --name metal-control-plane load docker-image metalstack/metal-api:latest 58 | kubectl --kubeconfig=$(MINI_LAB_KUBECONFIG) patch deployments.apps -n metal-control-plane metal-api --patch='{"spec":{"template":{"spec":{"containers":[{"name": "metal-api","imagePullPolicy":"IfNotPresent","image":"metalstack/metal-api:latest"}]}}}}' 59 | kubectl --kubeconfig=$(MINI_LAB_KUBECONFIG) delete pod -n metal-control-plane -l app=metal-api 60 | 61 | .PHONY: visualize-fsm 62 | visualize-fsm: 63 | cd cmd/metal-api/internal/tools/visualize_fsm 64 | go run main.go 65 | dot -Tsvg fsm.dot > fsm.svg 66 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # metal-api 2 | 3 | [![Build](https://github.com/metal-stack/metal-api/actions/workflows/docker.yaml/badge.svg?branch=master)](https://github.com/metal-stack/metal-api/actions) 4 | [![Slack](https://img.shields.io/badge/slack-metal--stack-brightgreen.svg?logo=slack)](https://metal-stack.slack.com/) 5 | [![Go Report Card](https://goreportcard.com/badge/github.com/metal-stack/metal-api)](https://goreportcard.com/report/github.com/metal-stack/metal-api) 6 | [![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/metal-stack/metal-api) 7 | [![Docker Pulls](https://img.shields.io/docker/pulls/metalstack/metal-api.svg)](https://hub.docker.com/r/metalstack/metal-api/) 8 | 9 | The metal-api is one of the major components of the metal-stack control plane. It is both the public interface for users to manage machines, networks, ips, and so forth and it is also the interface for metal-stack components running inside a partition. 10 | 11 | The CLI tool for using the API is called `metalctl`. You can find this project [here](https://github.com/metal-stack/metalctl). 12 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/event.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/fsm" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | ) 10 | 11 | // ListProvisioningEventContainers returns all machine provisioning event containers. 12 | func (rs *RethinkStore) ListProvisioningEventContainers() (metal.ProvisioningEventContainers, error) { 13 | es := make(metal.ProvisioningEventContainers, 0) 14 | err := rs.listEntities(rs.eventTable(), &es) 15 | return es, err 16 | } 17 | 18 | // FindProvisioningEventContainer finds a provisioning event container to a given machine id. 19 | func (rs *RethinkStore) FindProvisioningEventContainer(id string) (*metal.ProvisioningEventContainer, error) { 20 | var e metal.ProvisioningEventContainer 21 | err := rs.findEntityByID(rs.eventTable(), &e, id) 22 | if err != nil { 23 | return nil, err 24 | } 25 | return &e, nil 26 | } 27 | 28 | // UpdateProvisioningEventContainer updates a provisioning event container. 29 | func (rs *RethinkStore) UpdateProvisioningEventContainer(old *metal.ProvisioningEventContainer, new *metal.ProvisioningEventContainer) error { 30 | return rs.updateEntity(rs.eventTable(), new, old) 31 | } 32 | 33 | // CreateProvisioningEventContainer creates a new provisioning event container. 34 | func (rs *RethinkStore) CreateProvisioningEventContainer(ec *metal.ProvisioningEventContainer) error { 35 | return rs.createEntity(rs.eventTable(), ec) 36 | } 37 | 38 | // UpsertProvisioningEventContainer inserts a machine's event container. 39 | func (rs *RethinkStore) UpsertProvisioningEventContainer(ec *metal.ProvisioningEventContainer) error { 40 | return rs.upsertEntity(rs.eventTable(), ec) 41 | } 42 | 43 | func (rs *RethinkStore) ProvisioningEventForMachine(ctx context.Context, log *slog.Logger, event *metal.ProvisioningEvent, machineID string) (*metal.ProvisioningEventContainer, error) { 44 | ec, err := rs.FindProvisioningEventContainer(machineID) 45 | if err != nil && !metal.IsNotFound(err) { 46 | return nil, err 47 | } 48 | 49 | if ec == nil { 50 | ec = &metal.ProvisioningEventContainer{ 51 | Base: metal.Base{ 52 | ID: machineID, 53 | }, 54 | Liveliness: metal.MachineLivelinessAlive, 55 | } 56 | } 57 | 58 | newEC, err := fsm.HandleProvisioningEvent(ctx, log, ec, event) 59 | if err != nil { 60 | return nil, err 61 | } 62 | 63 | if err = newEC.Validate(); err != nil { 64 | return nil, err 65 | } 66 | 67 | newEC.TrimEvents(100) 68 | 69 | err = rs.UpsertProvisioningEventContainer(newEC) 70 | return newEC, err 71 | } 72 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/filesystem.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | // FindFilesystemLayout return a filesystemlayout for a given id. 6 | func (rs *RethinkStore) FindFilesystemLayout(id string) (*metal.FilesystemLayout, error) { 7 | var fl metal.FilesystemLayout 8 | err := rs.findEntityByID(rs.filesystemLayoutTable(), &fl, id) 9 | if err != nil { 10 | return nil, err 11 | } 12 | return &fl, nil 13 | } 14 | 15 | // ListFilesystemLayouts returns all filesystemlayouts. 16 | func (rs *RethinkStore) ListFilesystemLayouts() (metal.FilesystemLayouts, error) { 17 | fls := make(metal.FilesystemLayouts, 0) 18 | err := rs.listEntities(rs.filesystemLayoutTable(), &fls) 19 | return fls, err 20 | } 21 | 22 | // CreateFilesystemLayout creates a new filesystemlayout. 23 | func (rs *RethinkStore) CreateFilesystemLayout(fl *metal.FilesystemLayout) error { 24 | return rs.createEntity(rs.filesystemLayoutTable(), fl) 25 | } 26 | 27 | // DeleteFilesystemLayout deletes a filesystemlayout. 28 | func (rs *RethinkStore) DeleteFilesystemLayout(fl *metal.FilesystemLayout) error { 29 | return rs.deleteEntity(rs.filesystemLayoutTable(), fl) 30 | } 31 | 32 | // UpdateFilesystemLayout updates a filesystemlayout. 33 | func (rs *RethinkStore) UpdateFilesystemLayout(oldFilesystemLayout *metal.FilesystemLayout, newFilesystemLayout *metal.FilesystemLayout) error { 34 | return rs.updateEntity(rs.filesystemLayoutTable(), newFilesystemLayout, oldFilesystemLayout) 35 | } 36 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/health.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/metal-stack/metal-lib/pkg/healthstatus" 8 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 9 | ) 10 | 11 | func (rs *RethinkStore) ServiceName() string { 12 | return "rethinkdb" 13 | } 14 | 15 | // Check implements the health interface and tests if the database is healthy. 16 | func (rs *RethinkStore) Check(ctx context.Context) (healthstatus.HealthResult, error) { 17 | var version string 18 | 19 | returnStatus := func(err error) (healthstatus.HealthResult, error) { 20 | if err != nil { 21 | return healthstatus.HealthResult{ 22 | Status: healthstatus.HealthStatusUnhealthy, 23 | }, err 24 | } 25 | 26 | return healthstatus.HealthResult{ 27 | Status: healthstatus.HealthStatusHealthy, 28 | Message: fmt.Sprintf("connected to rethinkdb version: %s", version), 29 | }, nil 30 | } 31 | 32 | t := r.Branch( 33 | rs.db().TableList().SetIntersection(r.Expr(tables)).Count().Eq(len(tables)), 34 | r.Expr(true), 35 | r.Error("required tables are missing"), 36 | ) 37 | 38 | err := t.Exec(rs.session, r.ExecOpts{Context: ctx}) 39 | if err != nil { 40 | return returnStatus(err) 41 | } 42 | 43 | cursor, err := r.DB("rethinkdb").Table("server_status").Field("process").Field("version").Run(rs.session, r.RunOpts{Context: ctx}) 44 | if err != nil { 45 | return returnStatus(err) 46 | } 47 | 48 | err = cursor.One(&version) 49 | if err != nil { 50 | return returnStatus(err) 51 | } 52 | 53 | return returnStatus(err) 54 | 55 | } 56 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/health_integration_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | // +build integration 3 | 4 | package datastore 5 | 6 | import ( 7 | "context" 8 | "testing" 9 | 10 | "github.com/metal-stack/metal-lib/pkg/healthstatus" 11 | "github.com/stretchr/testify/require" 12 | ) 13 | 14 | func TestRethinkStore_Health(t *testing.T) { 15 | result, err := sharedDS.Check(context.Background()) 16 | require.NoError(t, err) 17 | require.Equal(t, healthstatus.HealthStatusHealthy, result.Status) 18 | require.Contains(t, result.Message, "connected to rethinkdb version: rethinkdb") 19 | } 20 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/integer_integration_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | // +build integration 3 | 4 | package datastore 5 | 6 | import ( 7 | "context" 8 | "log/slog" 9 | "os" 10 | "sync" 11 | 12 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 13 | "github.com/metal-stack/metal-api/test" 14 | 15 | "testing" 16 | 17 | "github.com/stretchr/testify/assert" 18 | "github.com/stretchr/testify/require" 19 | ) 20 | 21 | func TestRethinkStore_AcquireRandomUniqueIntegerIntegration(t *testing.T) { 22 | container, c, err := test.StartRethink(t) 23 | require.NoError(t, err) 24 | defer func() { 25 | _ = container.Terminate(context.Background()) 26 | }() 27 | 28 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 29 | 30 | rs := New(log, c.IP+":"+c.Port, c.DB, c.User, c.Password) 31 | rs.VRFPoolRangeMin = 10000 32 | rs.VRFPoolRangeMax = 10010 33 | rs.ASNPoolRangeMin = 10000 34 | rs.ASNPoolRangeMax = 10010 35 | 36 | err = rs.Connect() 37 | require.NoError(t, err) 38 | err = rs.Initialize() 39 | require.NoError(t, err) 40 | 41 | pool := rs.GetVRFPool() 42 | got, err := pool.AcquireRandomUniqueInteger() 43 | require.NoError(t, err) 44 | assert.GreaterOrEqual(t, got, uint(rs.VRFPoolRangeMin)) 45 | assert.LessOrEqual(t, got, uint(rs.VRFPoolRangeMax)) 46 | } 47 | 48 | func TestRethinkStore_AcquireUniqueIntegerTwiceIntegration(t *testing.T) { 49 | container, c, err := test.StartRethink(t) 50 | require.NoError(t, err) 51 | defer func() { 52 | _ = container.Terminate(context.Background()) 53 | }() 54 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 55 | 56 | rs := New(log, c.IP+":"+c.Port, c.DB, c.User, c.Password) 57 | rs.VRFPoolRangeMin = 10000 58 | rs.VRFPoolRangeMax = 10010 59 | rs.ASNPoolRangeMin = 10000 60 | rs.ASNPoolRangeMax = 10010 61 | 62 | err = rs.Connect() 63 | require.NoError(t, err) 64 | err = rs.Initialize() 65 | require.NoError(t, err) 66 | 67 | pool := rs.GetVRFPool() 68 | got, err := pool.AcquireUniqueInteger(10000) 69 | require.NoError(t, err) 70 | assert.Equal(t, uint(10000), got) 71 | 72 | _, err = pool.AcquireUniqueInteger(10000) 73 | assert.True(t, metal.IsConflict(err)) 74 | } 75 | 76 | func TestRethinkStore_AcquireUniqueIntegerPoolExhaustionIntegration(t *testing.T) { 77 | container, c, err := test.StartRethink(t) 78 | require.NoError(t, err) 79 | defer func() { 80 | _ = container.Terminate(context.Background()) 81 | }() 82 | 83 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 84 | 85 | rs := New(log, c.IP+":"+c.Port, c.DB, c.User, c.Password) 86 | rs.VRFPoolRangeMin = 10000 87 | rs.VRFPoolRangeMax = 10010 88 | rs.ASNPoolRangeMin = 10000 89 | rs.ASNPoolRangeMax = 10010 90 | 91 | err = rs.Connect() 92 | require.NoError(t, err) 93 | err = rs.Initialize() 94 | require.NoError(t, err) 95 | 96 | pool := rs.GetVRFPool() 97 | var wg sync.WaitGroup 98 | 99 | for i := rs.VRFPoolRangeMin; i <= rs.VRFPoolRangeMax; i++ { 100 | wg.Add(1) 101 | go func() { 102 | defer wg.Done() 103 | got, err := pool.AcquireRandomUniqueInteger() 104 | if err != nil { 105 | t.Fail() 106 | } 107 | assert.GreaterOrEqual(t, got, uint(rs.VRFPoolRangeMin)) 108 | assert.LessOrEqual(t, got, uint(rs.VRFPoolRangeMax)) 109 | }() 110 | } 111 | 112 | wg.Wait() 113 | 114 | _, err = pool.AcquireRandomUniqueInteger() 115 | assert.True(t, metal.IsInternal(err)) 116 | } 117 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/ip_test.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/google/go-cmp/cmp" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 9 | ) 10 | 11 | func TestRethinkStore_FindIPByID(t *testing.T) { 12 | ds, mock := InitMockDB(t) 13 | testdata.InitMockDBData(mock) 14 | 15 | tests := []struct { 16 | name string 17 | rs *RethinkStore 18 | id string 19 | want *metal.IP 20 | wantErr bool 21 | }{ 22 | { 23 | name: "TestRethinkStore_FindIP Test 1", 24 | rs: ds, 25 | id: "1.2.3.4", 26 | want: &testdata.IP1, 27 | wantErr: false, 28 | }, 29 | { 30 | name: "TestRethinkStore_FindIP Test 2", 31 | rs: ds, 32 | id: "2.3.4.5", 33 | want: &testdata.IP2, 34 | wantErr: false, 35 | }, 36 | } 37 | for i := range tests { 38 | tt := tests[i] 39 | t.Run(tt.name, func(t *testing.T) { 40 | got, err := tt.rs.FindIPByID(tt.id) 41 | if (err != nil) != tt.wantErr { 42 | t.Errorf("RethinkStore.FindIP() error = %v, wantErr %v", err, tt.wantErr) 43 | return 44 | } 45 | if diff := cmp.Diff(got, tt.want); diff != "" { 46 | t.Errorf("RethinkStore.FindIP() mismatch (-want +got):\n%s", diff) 47 | } 48 | }) 49 | } 50 | } 51 | 52 | func TestRethinkStore_ListIPs(t *testing.T) { 53 | ds, mock := InitMockDB(t) 54 | testdata.InitMockDBData(mock) 55 | 56 | tests := []struct { 57 | name string 58 | rs *RethinkStore 59 | want metal.IPs 60 | wantErr bool 61 | }{ 62 | { 63 | name: "TestRethinkStore_ListIPs Test 1", 64 | rs: ds, 65 | want: testdata.TestIPs, 66 | wantErr: false, 67 | }, 68 | } 69 | for i := range tests { 70 | tt := tests[i] 71 | t.Run(tt.name, func(t *testing.T) { 72 | got, err := tt.rs.ListIPs() 73 | if (err != nil) != tt.wantErr { 74 | t.Errorf("RethinkStore.ListIPs() error = %v, wantErr %v", err, tt.wantErr) 75 | return 76 | } 77 | if diff := cmp.Diff(got, tt.want); diff != "" { 78 | t.Errorf("RethinkStore.ListIPs() mismatch (-want +got):\n%s", diff) 79 | } 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrate_test.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestMigrations_Between(t *testing.T) { 9 | type args struct { 10 | current int 11 | target *int 12 | } 13 | tests := []struct { 14 | name string 15 | ms Migrations 16 | args args 17 | want Migrations 18 | wantErr bool 19 | }{ 20 | { 21 | name: "no migrations is fine", 22 | ms: []Migration{}, 23 | args: args{ 24 | current: 0, 25 | }, 26 | want: nil, 27 | wantErr: false, 28 | }, 29 | { 30 | name: "get all migrations from 0, sorted", 31 | ms: []Migration{ 32 | { 33 | Name: "migration 4", 34 | Version: 4, 35 | }, 36 | { 37 | Name: "migration 2", 38 | Version: 2, 39 | }, 40 | { 41 | Name: "migration 1", 42 | Version: 1, 43 | }, 44 | }, 45 | args: args{ 46 | current: 0, 47 | }, 48 | want: []Migration{ 49 | { 50 | Name: "migration 1", 51 | Version: 1, 52 | }, 53 | { 54 | Name: "migration 2", 55 | Version: 2, 56 | }, 57 | { 58 | Name: "migration 4", 59 | Version: 4, 60 | }, 61 | }, 62 | wantErr: false, 63 | }, 64 | { 65 | name: "get all migrations from 1, sorted", 66 | ms: []Migration{ 67 | { 68 | Name: "migration 4", 69 | Version: 4, 70 | }, 71 | { 72 | Name: "migration 2", 73 | Version: 2, 74 | }, 75 | { 76 | Name: "migration 1", 77 | Version: 1, 78 | }, 79 | }, 80 | args: args{ 81 | current: 1, 82 | }, 83 | want: []Migration{ 84 | { 85 | Name: "migration 2", 86 | Version: 2, 87 | }, 88 | { 89 | Name: "migration 4", 90 | Version: 4, 91 | }, 92 | }, 93 | wantErr: false, 94 | }, 95 | { 96 | name: "get migrations up to target version, sorted", 97 | ms: []Migration{ 98 | { 99 | Name: "migration 4", 100 | Version: 4, 101 | }, 102 | { 103 | Name: "migration 2", 104 | Version: 2, 105 | }, 106 | { 107 | Name: "migration 1", 108 | Version: 1, 109 | }, 110 | }, 111 | args: args{ 112 | current: 0, 113 | target: intPtr(2), 114 | }, 115 | want: []Migration{ 116 | { 117 | Name: "migration 1", 118 | Version: 1, 119 | }, 120 | { 121 | Name: "migration 2", 122 | Version: 2, 123 | }, 124 | }, 125 | wantErr: false, 126 | }, 127 | { 128 | name: "error on unknown target version", 129 | ms: []Migration{ 130 | { 131 | Name: "migration 4", 132 | Version: 4, 133 | }, 134 | { 135 | Name: "migration 2", 136 | Version: 2, 137 | }, 138 | { 139 | Name: "migration 1", 140 | Version: 1, 141 | }, 142 | }, 143 | args: args{ 144 | current: 0, 145 | target: intPtr(3), 146 | }, 147 | want: nil, 148 | wantErr: true, 149 | }, 150 | } 151 | for i := range tests { 152 | tt := tests[i] 153 | t.Run(tt.name, func(t *testing.T) { 154 | got, err := tt.ms.Between(tt.args.current, tt.args.target) 155 | if (err != nil) != tt.wantErr { 156 | t.Errorf("Migrations.Between() error = %v, wantErr %v", err, tt.wantErr) 157 | return 158 | } 159 | if !reflect.DeepEqual(got, tt.want) { 160 | t.Errorf("Migrations.Between() = %v, want %v", got, tt.want) 161 | } 162 | }) 163 | } 164 | } 165 | 166 | func intPtr(i int) *int { 167 | return &i 168 | } 169 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/01_remove_wait_table.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 7 | ) 8 | 9 | func init() { 10 | datastore.MustRegisterMigration(datastore.Migration{ 11 | Name: "remove wait table (not used anymore since grpc wait server was introduced)", 12 | Version: 1, 13 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 14 | res, err := db.TableList().Contains("wait").Run(session) 15 | if err != nil { 16 | return err 17 | } 18 | defer res.Close() 19 | 20 | var exists bool 21 | err = res.One(&exists) 22 | if err != nil { 23 | return err 24 | } 25 | 26 | if exists { 27 | _, err = db.TableDrop("wait").RunWrite(session) 28 | } 29 | return err 30 | }, 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/02_ip_uuids.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 5 | 6 | "github.com/google/uuid" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 8 | ) 9 | 10 | func init() { 11 | datastore.MustRegisterMigration(datastore.Migration{ 12 | Name: "generate allocation uuids for new ip address field (#70)", 13 | Version: 2, 14 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 15 | ips, err := rs.ListIPs() 16 | if err != nil { 17 | return err 18 | } 19 | 20 | for i := range ips { 21 | old := ips[i] // avoids implicit memory aliasing 22 | if old.AllocationUUID != "" { 23 | continue 24 | } 25 | 26 | u, err := uuid.NewRandom() 27 | if err != nil { 28 | return err 29 | } 30 | 31 | n := old 32 | n.AllocationUUID = u.String() 33 | err = rs.UpdateIP(&old, &n) 34 | if err != nil { 35 | return err 36 | } 37 | } 38 | return nil 39 | }, 40 | }) 41 | } 42 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/03_machine_role.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | func init() { 11 | datastore.MustRegisterMigration(datastore.Migration{ 12 | Name: "introduction of machine roles (#24)", 13 | Version: 3, 14 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 15 | ms, err := rs.ListMachines() 16 | if err != nil { 17 | return err 18 | } 19 | 20 | for i := range ms { 21 | old := ms[i] 22 | if old.Allocation == nil { 23 | continue 24 | } 25 | 26 | n := old 27 | 28 | if isFirewall(n.Allocation.MachineNetworks) { 29 | n.Allocation.Role = metal.RoleFirewall 30 | } else { 31 | n.Allocation.Role = metal.RoleMachine 32 | } 33 | 34 | err = rs.UpdateMachine(&old, &n) 35 | if err != nil { 36 | return err 37 | } 38 | } 39 | return nil 40 | }, 41 | }) 42 | } 43 | 44 | func isFirewall(nws []*metal.MachineNetwork) bool { 45 | // only firewalls are part of the underlay network, so that is a unique and sufficient indicator 46 | for _, n := range nws { 47 | if n.Underlay { 48 | return true 49 | } 50 | } 51 | return false 52 | } 53 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/04_provisioning_event_validation.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | 7 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 8 | 9 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 10 | ) 11 | 12 | func init() { 13 | datastore.MustRegisterMigration(datastore.Migration{ 14 | Name: "introduction of provisioning event validation (#265)", 15 | Version: 4, 16 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 17 | ecs, err := rs.ListProvisioningEventContainers() 18 | if err != nil { 19 | return err 20 | } 21 | 22 | for i := range ecs { 23 | old := ecs[i] 24 | if old.Validate() == nil { 25 | continue 26 | } 27 | 28 | n := old 29 | 30 | sort.Slice(n.Events, func(i, j int) bool { 31 | return n.Events[i].Time.After(n.Events[j].Time) 32 | }) 33 | 34 | n.LastEventTime = &n.Events[0].Time 35 | 36 | if n.Validate() != nil { 37 | return fmt.Errorf("unable to fix invalid event container: %s", n.ID) 38 | } 39 | 40 | if err := rs.UpsertProvisioningEventContainer(&n); err != nil { 41 | return fmt.Errorf("unable to upsert event container: %w", err) 42 | } 43 | } 44 | return nil 45 | }, 46 | }) 47 | } 48 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/05_allocation_uuids.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 5 | 6 | "github.com/google/uuid" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 8 | ) 9 | 10 | func init() { 11 | datastore.MustRegisterMigration(datastore.Migration{ 12 | Name: "generate allocation uuids for already allocated machines", 13 | Version: 5, 14 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 15 | machines, err := rs.ListMachines() 16 | if err != nil { 17 | return err 18 | } 19 | 20 | for _, m := range machines { 21 | m := m 22 | 23 | if m.Allocation == nil { 24 | continue 25 | } 26 | 27 | if m.Allocation.UUID != "" { 28 | continue 29 | } 30 | 31 | newMachine := m 32 | newMachine.Allocation.UUID = uuid.New().String() 33 | 34 | err = rs.UpdateMachine(&m, &newMachine) 35 | if err != nil { 36 | return err 37 | } 38 | } 39 | return nil 40 | }, 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/06_additional_announcable_cidrs.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 7 | ) 8 | 9 | func init() { 10 | datastore.MustRegisterMigration(datastore.Migration{ 11 | Name: "migrate super tenant networks to contain additionannouncablecidrs", 12 | Version: 6, 13 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 14 | nws, err := rs.ListNetworks() 15 | if err != nil { 16 | return err 17 | } 18 | 19 | for _, old := range nws { 20 | if !old.PrivateSuper { 21 | continue 22 | } 23 | new := old 24 | 25 | if len(old.AdditionalAnnouncableCIDRs) == 0 { 26 | new.AdditionalAnnouncableCIDRs = []string{ 27 | // This was the previous hard coded default in metal-core 28 | "10.240.0.0/12", 29 | } 30 | } 31 | 32 | err = rs.UpdateNetwork(&old, &new) 33 | if err != nil { 34 | return err 35 | } 36 | } 37 | return nil 38 | }, 39 | }) 40 | } 41 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/07_size_reservations_table.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "fmt" 5 | 6 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 9 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 10 | ) 11 | 12 | type OldReservation_Mig07 struct { 13 | Amount int `rethinkdb:"amount" json:"amount"` 14 | Description string `rethinkdb:"description" json:"description"` 15 | ProjectID string `rethinkdb:"projectid" json:"projectid"` 16 | PartitionIDs []string `rethinkdb:"partitionids" json:"partitionids"` 17 | Labels map[string]string `rethinkdb:"labels" json:"labels"` 18 | } 19 | 20 | type OldReservations_Mig07 []OldReservation_Mig07 21 | 22 | type OldSize_Mig07 struct { 23 | metal.Base 24 | Reservations OldReservations_Mig07 `rethinkdb:"reservations" json:"reservations"` 25 | } 26 | 27 | func init() { 28 | getOldSizes := func(db *r.Term, session r.QueryExecutor) ([]OldSize_Mig07, error) { 29 | res, err := db.Table("size").Run(session) 30 | if err != nil { 31 | return nil, err 32 | } 33 | defer res.Close() 34 | 35 | var entities []OldSize_Mig07 36 | err = res.All(&entities) 37 | if err != nil { 38 | return nil, fmt.Errorf("cannot fetch all entities: %w", err) 39 | } 40 | 41 | return entities, nil 42 | } 43 | 44 | datastore.MustRegisterMigration(datastore.Migration{ 45 | Name: "migrate size reservations to dedicated table", 46 | Version: 7, 47 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 48 | oldSizes, err := getOldSizes(db, session) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | for _, old := range oldSizes { 54 | for _, rv := range old.Reservations { 55 | err = rs.CreateSizeReservation(&metal.SizeReservation{ 56 | Base: metal.Base{ 57 | ID: "", 58 | Name: "", 59 | Description: rv.Description, 60 | }, 61 | SizeID: old.ID, 62 | Amount: rv.Amount, 63 | ProjectID: rv.ProjectID, 64 | PartitionIDs: rv.PartitionIDs, 65 | Labels: rv.Labels, 66 | }) 67 | if err != nil { 68 | return err 69 | } 70 | } 71 | } 72 | 73 | // now remove the old field 74 | 75 | _, err = db.Table("size").Replace(func(row r.Term) r.Term { 76 | return row.Without("reservations") 77 | }).RunWrite(session) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | return nil 83 | }, 84 | }) 85 | } 86 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/08_childprefixlength.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "net/netip" 5 | 6 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 9 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 10 | ) 11 | 12 | func init() { 13 | type tmpPartition struct { 14 | PrivateNetworkPrefixLength uint8 `rethinkdb:"privatenetworkprefixlength"` 15 | } 16 | datastore.MustRegisterMigration(datastore.Migration{ 17 | Name: "migrate partition.childprefixlength to tenant super network", 18 | Version: 8, 19 | Up: func(db *r.Term, session r.QueryExecutor, rs *datastore.RethinkStore) error { 20 | nws, err := rs.ListNetworks() 21 | if err != nil { 22 | return err 23 | } 24 | 25 | for _, old := range nws { 26 | cursor, err := db.Table("partition").Get(old.PartitionID).Run(session) 27 | if err != nil { 28 | return err 29 | } 30 | if cursor.IsNil() { 31 | _ = cursor.Close() 32 | continue 33 | } 34 | var partition tmpPartition 35 | err = cursor.One(&partition) 36 | if err != nil { 37 | _ = cursor.Close() 38 | return err 39 | } 40 | err = cursor.Close() 41 | if err != nil { 42 | return err 43 | } 44 | new := old 45 | 46 | var ( 47 | defaultChildPrefixLength = metal.ChildPrefixLength{} 48 | ) 49 | for _, prefix := range new.Prefixes { 50 | parsed, err := netip.ParsePrefix(prefix.String()) 51 | if err != nil { 52 | return err 53 | } 54 | if parsed.Addr().Is4() { 55 | defaultChildPrefixLength[metal.IPv4AddressFamily] = 22 56 | } 57 | if parsed.Addr().Is6() { 58 | defaultChildPrefixLength[metal.IPv6AddressFamily] = 64 59 | } 60 | } 61 | 62 | if new.PrivateSuper { 63 | if new.DefaultChildPrefixLength == nil { 64 | new.DefaultChildPrefixLength = metal.ChildPrefixLength{} 65 | } 66 | if partition.PrivateNetworkPrefixLength > 0 { 67 | defaultChildPrefixLength[metal.IPv4AddressFamily] = partition.PrivateNetworkPrefixLength 68 | } 69 | new.DefaultChildPrefixLength = defaultChildPrefixLength 70 | } 71 | err = rs.UpdateNetwork(&old, &new) 72 | if err != nil { 73 | return err 74 | } 75 | } 76 | 77 | _, err = db.Table("partition").Replace(r.Row.Without("privatenetworkprefixlength")).RunWrite(session) 78 | return err 79 | }, 80 | }) 81 | } 82 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/migrations/doc.go: -------------------------------------------------------------------------------- 1 | // Package migrations contains migration functions for migrating the RethinkDB. 2 | // 3 | // Migrating RethinkDB is a bit different than compared to regular SQL databases because 4 | // clients define the schema and not the server. 5 | // 6 | // Currently, migrations are only intended to be run *after* the rollout of the new clients. 7 | // This prevents older clients to write their old schema into the database after the migration 8 | // was applied. This approach allows us to apply zero-downtime migrations for most of the 9 | // use-cases we have seen in the past. 10 | // 11 | // There are probably scenarios where it makes sense to migrate *before* instance 12 | // rollout and stop the instances before the migration (downtime migration) but for now 13 | // this use-case has not been implemented and it possibly requires more difficult 14 | // deployment orchestration to apply a migration. 15 | // 16 | // We also do not support down migrations for the time being because it also makes 17 | // things more complicated than they need to be. 18 | // 19 | // Please ensure that your migrations are idempotent (they need to work for existing and 20 | // for fresh deployments). Check the state before modifying it. 21 | package migrations 22 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/network_test.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/google/go-cmp/cmp" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 9 | ) 10 | 11 | func TestRethinkStore_FindNetworkByID(t *testing.T) { 12 | ds, mock := InitMockDB(t) 13 | testdata.InitMockDBData(mock) 14 | 15 | tests := []struct { 16 | name string 17 | rs *RethinkStore 18 | id string 19 | want *metal.Network 20 | wantErr bool 21 | }{ 22 | { 23 | name: "TestRethinkStore_FindNetworkByID Test 1", 24 | rs: ds, 25 | id: "1", 26 | want: &testdata.Nw1, 27 | wantErr: false, 28 | }, 29 | { 30 | name: "TestRethinkStore_FindNetworkByID Test 2", 31 | rs: ds, 32 | id: "2", 33 | want: &testdata.Nw2, 34 | wantErr: false, 35 | }, 36 | } 37 | for i := range tests { 38 | tt := tests[i] 39 | t.Run(tt.name, func(t *testing.T) { 40 | got, err := tt.rs.FindNetworkByID(tt.id) 41 | if (err != nil) != tt.wantErr { 42 | t.Errorf("RethinkStore.FindNetworkByID() error = %v, wantErr %v", err, tt.wantErr) 43 | return 44 | } 45 | if diff := cmp.Diff(got, tt.want); diff != "" { 46 | t.Errorf("RethinkStore.FindNetworkByID() mismatch (-want +got):\n%s", diff) 47 | } 48 | }) 49 | } 50 | } 51 | 52 | func TestRethinkStore_Mock_ListNetworks(t *testing.T) { 53 | ds, mock := InitMockDB(t) 54 | testdata.InitMockDBData(mock) 55 | 56 | tests := []struct { 57 | name string 58 | rs *RethinkStore 59 | want metal.Networks 60 | wantErr bool 61 | }{ 62 | { 63 | name: "TestRethinkStore_ListNetworks Test 1", 64 | rs: ds, 65 | want: testdata.TestNetworks, 66 | wantErr: false, 67 | }, 68 | } 69 | for i := range tests { 70 | tt := tests[i] 71 | t.Run(tt.name, func(t *testing.T) { 72 | got, err := tt.rs.ListNetworks() 73 | if (err != nil) != tt.wantErr { 74 | t.Errorf("RethinkStore.ListNetworks() error = %v, wantErr %v", err, tt.wantErr) 75 | return 76 | } 77 | if diff := cmp.Diff(got, tt.want); diff != "" { 78 | t.Errorf("RethinkStore.ListNetworks() mismatch (-want +got):\n%s", diff) 79 | } 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/partition.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 5 | ) 6 | 7 | // FindPartition return a partition for the given id. 8 | func (rs *RethinkStore) FindPartition(id string) (*metal.Partition, error) { 9 | var p metal.Partition 10 | err := rs.findEntityByID(rs.partitionTable(), &p, id) 11 | if err != nil { 12 | return nil, err 13 | } 14 | return &p, nil 15 | } 16 | 17 | // ListPartitions returns all partition. 18 | func (rs *RethinkStore) ListPartitions() (metal.Partitions, error) { 19 | ps := make(metal.Partitions, 0) 20 | err := rs.listEntities(rs.partitionTable(), &ps) 21 | return ps, err 22 | } 23 | 24 | // CreatePartition creates a new partition. 25 | func (rs *RethinkStore) CreatePartition(p *metal.Partition) error { 26 | return rs.createEntity(rs.partitionTable(), p) 27 | } 28 | 29 | // DeletePartition deletes a partition. 30 | func (rs *RethinkStore) DeletePartition(p *metal.Partition) error { 31 | return rs.deleteEntity(rs.partitionTable(), p) 32 | } 33 | 34 | // UpdatePartition updates a partition. 35 | func (rs *RethinkStore) UpdatePartition(oldPartition *metal.Partition, newPartition *metal.Partition) error { 36 | return rs.updateEntity(rs.partitionTable(), newPartition, oldPartition) 37 | } 38 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/rethinkdb_test.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | "testing" 7 | 8 | "github.com/google/go-cmp/cmp" 9 | "github.com/google/go-cmp/cmp/cmpopts" 10 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 11 | "github.com/metal-stack/metal-lib/pkg/testcommon" 12 | ) 13 | 14 | func TestNew(t *testing.T) { 15 | logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 16 | type args struct { 17 | log *slog.Logger 18 | dbhost string 19 | dbname string 20 | dbuser string 21 | dbpass string 22 | } 23 | tests := []struct { 24 | name string 25 | args args 26 | want *RethinkStore 27 | }{ 28 | { 29 | name: "TestNew Test 1", 30 | args: args{ 31 | log: logger, 32 | dbhost: "dbhost", 33 | dbname: "dbname", 34 | dbuser: "dbuser", 35 | dbpass: "password", 36 | }, 37 | want: &RethinkStore{ 38 | log: logger, 39 | 40 | dbhost: "dbhost", 41 | dbname: "dbname", 42 | dbuser: "dbuser", 43 | dbpass: "password", 44 | 45 | VRFPoolRangeMin: DefaultVRFPoolRangeMin, 46 | VRFPoolRangeMax: DefaultVRFPoolRangeMax, 47 | ASNPoolRangeMin: DefaultASNPoolRangeMin, 48 | ASNPoolRangeMax: DefaultASNPoolRangeMax, 49 | 50 | sharedMutexCheckInterval: defaultSharedMutexCheckInterval, 51 | }, 52 | }, 53 | } 54 | for i := range tests { 55 | tt := tests[i] 56 | t.Run(tt.name, func(t *testing.T) { 57 | got := New(tt.args.log, tt.args.dbhost, tt.args.dbname, tt.args.dbuser, tt.args.dbpass) 58 | if diff := cmp.Diff(got, tt.want, testcommon.IgnoreUnexported(), cmpopts.IgnoreTypes(slog.Logger{})); diff != "" { 59 | t.Errorf("New() mismatch (-want +got):\n%s", diff) 60 | } 61 | }) 62 | } 63 | } 64 | 65 | func TestRethinkStore_Close(t *testing.T) { 66 | ds, mock := InitMockDB(t) 67 | testdata.InitMockDBData(mock) 68 | 69 | tests := []struct { 70 | name string 71 | rs *RethinkStore 72 | wantErr bool 73 | }{ 74 | { 75 | name: "TestRethinkStore_Close Test 1", 76 | rs: ds, 77 | wantErr: false, 78 | }, 79 | } 80 | for i := range tests { 81 | tt := tests[i] 82 | t.Run(tt.name, func(t *testing.T) { 83 | if err := tt.rs.Close(); (err != nil) != tt.wantErr { 84 | t.Errorf("RethinkStore.Close() error = %v, wantErr %v", err, tt.wantErr) 85 | } 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/shared_mutex_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | // +build integration 3 | 4 | package datastore 5 | 6 | import ( 7 | "context" 8 | "log/slog" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/require" 15 | 16 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 17 | ) 18 | 19 | func newLockOptAcquireTimeout(t time.Duration) *lockOptAcquireTimeout { 20 | return &lockOptAcquireTimeout{timeout: t} 21 | } 22 | 23 | func Test_sharedMutex_reallyLocking(t *testing.T) { 24 | defer mutexCleanup(t) 25 | ctx := context.Background() 26 | expiration := 10 * time.Second 27 | 28 | err := sharedDS.sharedMutex.lock(ctx, "test", expiration, newLockOptAcquireTimeout(10*time.Millisecond)) 29 | require.NoError(t, err) 30 | 31 | err = sharedDS.sharedMutex.lock(ctx, "test", expiration, newLockOptAcquireTimeout(5*time.Millisecond)) 32 | require.Error(t, err) 33 | require.ErrorContains(t, err, "unable to acquire mutex") 34 | 35 | err = sharedDS.sharedMutex.lock(ctx, "test2", expiration, newLockOptAcquireTimeout(10*time.Millisecond)) 36 | require.NoError(t, err) 37 | 38 | err = sharedDS.sharedMutex.lock(ctx, "test", expiration, newLockOptAcquireTimeout(10*time.Millisecond)) 39 | require.Error(t, err) 40 | require.ErrorContains(t, err, "unable to acquire mutex") 41 | 42 | sharedDS.sharedMutex.unlock(ctx, "test") 43 | 44 | err = sharedDS.sharedMutex.lock(ctx, "test2", expiration, newLockOptAcquireTimeout(10*time.Millisecond)) 45 | require.Error(t, err) 46 | require.ErrorContains(t, err, "unable to acquire mutex") 47 | 48 | err = sharedDS.sharedMutex.lock(ctx, "test", expiration, newLockOptAcquireTimeout(10*time.Millisecond)) 49 | require.NoError(t, err) 50 | } 51 | 52 | func Test_sharedMutex_acquireAfterRelease(t *testing.T) { 53 | defer mutexCleanup(t) 54 | ctx := context.Background() 55 | 56 | err := sharedDS.sharedMutex.lock(ctx, "test", 3*time.Second, newLockOptAcquireTimeout(10*time.Millisecond)) 57 | require.NoError(t, err) 58 | 59 | var wg sync.WaitGroup 60 | wg.Add(1) 61 | 62 | go func() { 63 | defer wg.Done() 64 | 65 | err = sharedDS.sharedMutex.lock(ctx, "test", 1*time.Second, newLockOptAcquireTimeout(3*time.Second)) 66 | assert.NoError(t, err) 67 | }() 68 | 69 | time.Sleep(1 * time.Second) 70 | 71 | sharedDS.sharedMutex.unlock(ctx, "test") 72 | 73 | wg.Wait() 74 | } 75 | 76 | func Test_sharedMutex_expires(t *testing.T) { 77 | defer mutexCleanup(t) 78 | ctx := context.Background() 79 | 80 | err := sharedDS.sharedMutex.lock(ctx, "test", 2*time.Second, newLockOptAcquireTimeout(10*time.Millisecond)) 81 | require.NoError(t, err) 82 | 83 | err = sharedDS.sharedMutex.lock(ctx, "test", 2*time.Second, newLockOptAcquireTimeout(10*time.Millisecond)) 84 | require.Error(t, err) 85 | require.ErrorContains(t, err, "unable to acquire mutex") 86 | 87 | done := make(chan bool) 88 | go func() { 89 | err = sharedDS.sharedMutex.lock(ctx, "test", 2*time.Second, newLockOptAcquireTimeout(2*sharedDS.sharedMutex.checkinterval)) 90 | if err != nil { 91 | t.Errorf("mutex was not acquired: %s", err) 92 | } 93 | done <- true 94 | }() 95 | 96 | timeoutCtx, cancel := context.WithTimeout(context.Background(), 2*sharedDS.sharedMutex.checkinterval) 97 | defer cancel() 98 | 99 | select { 100 | case <-done: 101 | case <-timeoutCtx.Done(): 102 | t.Errorf("shared mutex has not expired") 103 | } 104 | } 105 | 106 | func Test_sharedMutex_stop(t *testing.T) { 107 | defer mutexCleanup(t) 108 | ctx, cancel := context.WithCancel(context.Background()) 109 | 110 | mutex, err := newSharedMutex(context.Background(), slog.Default(), sharedDS.dbsession) 111 | require.NoError(t, err) 112 | 113 | done := make(chan bool) 114 | 115 | go func() { 116 | mutex.expireloop(ctx) 117 | done <- true 118 | }() 119 | 120 | cancel() 121 | 122 | timeoutCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 123 | defer cancel() 124 | 125 | select { 126 | case <-done: 127 | case <-timeoutCtx.Done(): 128 | t.Errorf("shared mutex expiration did not stop") 129 | } 130 | } 131 | 132 | func mutexCleanup(t *testing.T) { 133 | _, err := r.Table("sharedmutex").Delete().RunWrite(sharedDS.dbsession) 134 | require.NoError(t, err) 135 | } 136 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/size.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 7 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 8 | ) 9 | 10 | // SizeSearchQuery can be used to search sizes. 11 | type SizeSearchQuery struct { 12 | ID *string `json:"id" optional:"true"` 13 | Name *string `json:"name" optional:"true"` 14 | Labels map[string]string `json:"labels" optional:"true"` 15 | Reservation Reservation `json:"reservation" optional:"true"` 16 | } 17 | 18 | type Reservation struct { 19 | Partition *string `json:"partition" optional:"true"` 20 | Project *string `json:"project" optional:"true"` 21 | } 22 | 23 | // GenerateTerm generates the project search query term. 24 | func (s *SizeSearchQuery) generateTerm(rs *RethinkStore) *r.Term { 25 | q := *rs.sizeTable() 26 | 27 | if s.ID != nil { 28 | q = q.Filter(func(row r.Term) r.Term { 29 | return row.Field("id").Eq(*s.ID) 30 | }) 31 | } 32 | 33 | if s.Name != nil { 34 | q = q.Filter(func(row r.Term) r.Term { 35 | return row.Field("name").Eq(*s.Name) 36 | }) 37 | } 38 | 39 | for k, v := range s.Labels { 40 | k := k 41 | v := v 42 | q = q.Filter(func(row r.Term) r.Term { 43 | return row.Field("labels").Field(k).Eq(v) 44 | }) 45 | } 46 | 47 | if s.Reservation.Project != nil { 48 | q = q.Filter(func(row r.Term) r.Term { 49 | return row.Field("reservations").Contains(func(p r.Term) r.Term { 50 | return p.Field("projectid").Eq(r.Expr(*s.Reservation.Project)) 51 | }) 52 | }) 53 | } 54 | 55 | if s.Reservation.Partition != nil { 56 | q = q.Filter(func(row r.Term) r.Term { 57 | return row.Field("reservations").Contains(func(p r.Term) r.Term { 58 | return p.Field("partitionids").Contains(r.Expr(*s.Reservation.Partition)) 59 | }) 60 | }) 61 | } 62 | 63 | return &q 64 | } 65 | 66 | // FindSize return a size for a given id. 67 | func (rs *RethinkStore) FindSize(id string) (*metal.Size, error) { 68 | var s metal.Size 69 | err := rs.findEntityByID(rs.sizeTable(), &s, id) 70 | if err != nil { 71 | return nil, err 72 | } 73 | return &s, nil 74 | } 75 | 76 | // SearchSizes returns the result of the sizes search request query. 77 | func (rs *RethinkStore) SearchSizes(q *SizeSearchQuery, sizes *metal.Sizes) error { 78 | return rs.searchEntities(q.generateTerm(rs), sizes) 79 | } 80 | 81 | // ListSizes returns all sizes. 82 | func (rs *RethinkStore) ListSizes() (metal.Sizes, error) { 83 | szs := make(metal.Sizes, 0) 84 | err := rs.listEntities(rs.sizeTable(), &szs) 85 | return szs, err 86 | } 87 | 88 | // CreateSize creates a new size. 89 | func (rs *RethinkStore) CreateSize(size *metal.Size) error { 90 | return rs.createEntity(rs.sizeTable(), size) 91 | } 92 | 93 | // DeleteSize deletes a size. 94 | func (rs *RethinkStore) DeleteSize(size *metal.Size) error { 95 | return rs.deleteEntity(rs.sizeTable(), size) 96 | } 97 | 98 | // UpdateSize updates a size. 99 | func (rs *RethinkStore) UpdateSize(oldSize *metal.Size, newSize *metal.Size) error { 100 | return rs.updateEntity(rs.sizeTable(), newSize, oldSize) 101 | } 102 | 103 | // FromHardware tries to find a size which matches the given hardware specs. 104 | func (rs *RethinkStore) FromHardware(hw metal.MachineHardware) (*metal.Size, error) { 105 | sz, err := rs.ListSizes() 106 | if err != nil { 107 | return nil, err 108 | } 109 | if len(sz) < 1 { 110 | // this should not happen, so we do not return a notfound 111 | return nil, errors.New("no sizes found in database") 112 | } 113 | var sizes metal.Sizes 114 | for _, s := range sz { 115 | if len(s.Constraints) < 1 { 116 | rs.log.Error("missing constraints", "size", s) 117 | continue 118 | } 119 | sizes = append(sizes, s) 120 | } 121 | return sizes.FromHardware(hw) 122 | } 123 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/size_reservation.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 5 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 6 | ) 7 | 8 | // SizeReservationSearchQuery can be used to search sizes. 9 | type SizeReservationSearchQuery struct { 10 | ID *string `json:"id" optional:"true"` 11 | SizeID *string `json:"sizeid" optional:"true"` 12 | Name *string `json:"name" optional:"true"` 13 | Labels map[string]string `json:"labels" optional:"true"` 14 | Partition *string `json:"partition" optional:"true"` 15 | Project *string `json:"project" optional:"true"` 16 | } 17 | 18 | func (s *SizeReservationSearchQuery) generateTerm(rs *RethinkStore) *r.Term { 19 | q := *rs.sizeReservationTable() 20 | 21 | if s.ID != nil { 22 | q = q.Filter(func(row r.Term) r.Term { 23 | return row.Field("id").Eq(*s.ID) 24 | }) 25 | } 26 | 27 | if s.SizeID != nil { 28 | q = q.Filter(func(row r.Term) r.Term { 29 | return row.Field("sizeid").Eq(*s.SizeID) 30 | }) 31 | } 32 | 33 | if s.Name != nil { 34 | q = q.Filter(func(row r.Term) r.Term { 35 | return row.Field("name").Eq(*s.Name) 36 | }) 37 | } 38 | 39 | for k, v := range s.Labels { 40 | k := k 41 | v := v 42 | q = q.Filter(func(row r.Term) r.Term { 43 | return row.Field("labels").Field(k).Eq(v) 44 | }) 45 | } 46 | 47 | if s.Project != nil { 48 | q = q.Filter(func(row r.Term) r.Term { 49 | return row.Field("projectid").Eq(*s.Project) 50 | }) 51 | } 52 | 53 | if s.Partition != nil { 54 | q = q.Filter(func(row r.Term) r.Term { 55 | return row.Field("partitionids").Contains(r.Expr(*s.Partition)) 56 | }) 57 | } 58 | 59 | return &q 60 | } 61 | 62 | func (rs *RethinkStore) FindSizeReservation(id string) (*metal.SizeReservation, error) { 63 | var s metal.SizeReservation 64 | err := rs.findEntityByID(rs.sizeReservationTable(), &s, id) 65 | if err != nil { 66 | return nil, err 67 | } 68 | return &s, nil 69 | } 70 | 71 | func (rs *RethinkStore) SearchSizeReservations(q *SizeReservationSearchQuery, rvs *metal.SizeReservations) error { 72 | return rs.searchEntities(q.generateTerm(rs), rvs) 73 | } 74 | 75 | func (rs *RethinkStore) ListSizeReservations() (metal.SizeReservations, error) { 76 | szs := make(metal.SizeReservations, 0) 77 | err := rs.listEntities(rs.sizeReservationTable(), &szs) 78 | return szs, err 79 | } 80 | 81 | func (rs *RethinkStore) CreateSizeReservation(rv *metal.SizeReservation) error { 82 | return rs.createEntity(rs.sizeReservationTable(), rv) 83 | } 84 | 85 | func (rs *RethinkStore) DeleteSizeReservation(rv *metal.SizeReservation) error { 86 | return rs.deleteEntity(rs.sizeReservationTable(), rv) 87 | } 88 | 89 | func (rs *RethinkStore) UpdateSizeReservation(oldRv *metal.SizeReservation, newRv *metal.SizeReservation) error { 90 | return rs.updateEntity(rs.sizeReservationTable(), newRv, oldRv) 91 | } 92 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/size_test.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 9 | ) 10 | 11 | func TestRethinkStore_FromHardware(t *testing.T) { 12 | ds, mock := InitMockDB(t) 13 | testdata.InitMockDBData(mock) 14 | 15 | tests := []struct { 16 | name string 17 | rs *RethinkStore 18 | hw metal.MachineHardware 19 | want string 20 | wantErr bool 21 | }{ 22 | { 23 | name: "determine size from machine hardware", 24 | rs: ds, 25 | hw: testdata.MachineHardware1, 26 | want: testdata.Sz1.ID, 27 | wantErr: false, 28 | }, 29 | } 30 | for i := range tests { 31 | tt := tests[i] 32 | t.Run(tt.name, func(t *testing.T) { 33 | got, err := tt.rs.FromHardware(tt.hw) 34 | if (err != nil) != tt.wantErr { 35 | t.Errorf("RethinkStore.FromHardware() error = %v, wantErr %v", err, tt.wantErr) 36 | return 37 | } 38 | if !reflect.DeepEqual(got.ID, tt.want) { 39 | t.Errorf("RethinkStore.FromHardware() = %v, want %v", got.ID, tt.want) 40 | } 41 | }) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/sizeimageconstraint.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | // FindSizeImageConstraint return a SizeImageConstraint for a given size. 6 | func (rs *RethinkStore) FindSizeImageConstraint(sizeID string) (*metal.SizeImageConstraint, error) { 7 | var ic metal.SizeImageConstraint 8 | err := rs.findEntityByID(rs.sizeImageConstraintTable(), &ic, sizeID) 9 | if err != nil { 10 | return nil, err 11 | } 12 | return &ic, nil 13 | } 14 | 15 | // ListSizeImageConstraints returns all SizeImageConstraints. 16 | func (rs *RethinkStore) ListSizeImageConstraints() (metal.SizeImageConstraints, error) { 17 | fls := make(metal.SizeImageConstraints, 0) 18 | err := rs.listEntities(rs.sizeImageConstraintTable(), &fls) 19 | return fls, err 20 | } 21 | 22 | // CreateSizeImageConstraint creates a new SizeImageConstraint. 23 | func (rs *RethinkStore) CreateSizeImageConstraint(ic *metal.SizeImageConstraint) error { 24 | return rs.createEntity(rs.sizeImageConstraintTable(), ic) 25 | } 26 | 27 | // DeleteSizeImageConstraint deletes a SizeImageConstraint. 28 | func (rs *RethinkStore) DeleteSizeImageConstraint(ic *metal.SizeImageConstraint) error { 29 | return rs.deleteEntity(rs.sizeImageConstraintTable(), ic) 30 | } 31 | 32 | // UpdateSizeImageConstraint updates a SizeImageConstraint. 33 | func (rs *RethinkStore) UpdateSizeImageConstraint(oldSizeImageConstraint *metal.SizeImageConstraint, newSizeImageConstraint *metal.SizeImageConstraint) error { 34 | return rs.updateEntity(rs.sizeImageConstraintTable(), newSizeImageConstraint, oldSizeImageConstraint) 35 | } 36 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/datastore/testing.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | "testing" 7 | 8 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 9 | ) 10 | 11 | /* 12 | InitMockDB ... 13 | 14 | Description: 15 | This Function initializes the Mocked rethink DB. 16 | It is recommended to execute metal.InitMockDBData() to fill it with mocks 17 | 18 | Return Values: 19 | - RethinkStore // The Database 20 | - Mock // The Mock endpoint (Used for mocks) 21 | */ 22 | func InitMockDB(t *testing.T) (*RethinkStore, *r.Mock) { 23 | rs := New( 24 | slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})), 25 | "db-addr", 26 | "mockdb", 27 | "db-user", 28 | "db-password", 29 | ) 30 | mock := rs.Mock() 31 | return rs, mock 32 | } 33 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/eventbus/nsq.go: -------------------------------------------------------------------------------- 1 | package eventbus 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "time" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | "github.com/metal-stack/metal-lib/bus" 10 | ) 11 | 12 | // nsqdRetryDelay represents the delay that is used for retries in blocking calls. 13 | const nsqdRetryDelay = 3 * time.Second 14 | 15 | type PublisherProvider func(*slog.Logger, *bus.PublisherConfig) (bus.Publisher, error) 16 | 17 | // NSQClient is a type to request NSQ related tasks such as creation of topics. 18 | type NSQClient struct { 19 | logger *slog.Logger 20 | config *bus.PublisherConfig 21 | publisherProvider PublisherProvider 22 | Publisher bus.Publisher 23 | Endpoints *bus.Endpoints 24 | } 25 | 26 | // NewNSQ create a new NSQClient. 27 | func NewNSQ(publisherConfig *bus.PublisherConfig, logger *slog.Logger, publisherProvider PublisherProvider) NSQClient { 28 | return NSQClient{ 29 | config: publisherConfig, 30 | logger: logger, 31 | publisherProvider: publisherProvider, 32 | } 33 | } 34 | 35 | // WaitForPublisher blocks until the given provider is able to provide a non nil publisher. 36 | func (n *NSQClient) WaitForPublisher() { 37 | for { 38 | publisher, err := n.publisherProvider(n.logger, n.config) 39 | if err != nil { 40 | n.logger.Error("cannot create nsq publisher", "error", err) 41 | n.delay() 42 | continue 43 | } 44 | n.logger.Info("nsq connected", "nsqd", fmt.Sprintf("%+v", n.config)) 45 | n.Publisher = publisher 46 | break 47 | } 48 | } 49 | 50 | func (n *NSQClient) CreateEndpoints(lookupds ...string) error { 51 | c, err := bus.NewConsumer(n.logger, n.config.TLS, lookupds...) 52 | if err != nil { 53 | return fmt.Errorf("cannot create consumer for endpoints: %w", err) 54 | } 55 | // change loglevel to warning, because nsq is very noisy 56 | c.With(bus.LogLevel(bus.Warning)) 57 | n.Endpoints = bus.NewEndpoints(c, n.Publisher) 58 | return nil 59 | } 60 | 61 | // WaitForTopicsCreated blocks until the topices are created within the given partitions. 62 | func (n *NSQClient) WaitForTopicsCreated(partitions metal.Partitions, topics []metal.NSQTopic) { 63 | for { 64 | if err := n.createTopics(partitions, topics); err != nil { 65 | n.logger.Error("cannot create topics", "error", err) 66 | n.delay() 67 | continue 68 | } 69 | break 70 | } 71 | } 72 | 73 | // CreateTopic creates a topic with given name. 74 | func (n *NSQClient) CreateTopic(name string) error { 75 | if err := n.Publisher.CreateTopic(name); err != nil { 76 | n.logger.Error("cannot create topic", "topic", name) 77 | return err 78 | } 79 | n.logger.Info("topic created", "topic", name) 80 | return nil 81 | } 82 | 83 | func (n *NSQClient) createTopics(partitions metal.Partitions, topics []metal.NSQTopic) error { 84 | for _, topic := range topics { 85 | if topic.PartitionAgnostic { 86 | continue 87 | } 88 | if err := n.CreateTopic(topic.Name); err != nil { 89 | n.logger.Error("cannot create topic", "topic", topic.Name) 90 | return err 91 | } 92 | } 93 | 94 | for _, partition := range partitions { 95 | for _, topic := range topics { 96 | if !topic.PartitionAgnostic { 97 | continue 98 | } 99 | topicFQN := topic.GetFQN(partition.GetID()) 100 | if err := n.CreateTopic(topicFQN); err != nil { 101 | n.logger.Error("cannot create topic", "topic", topicFQN, "partition", partition.GetID()) 102 | return err 103 | } 104 | } 105 | } 106 | return nil 107 | } 108 | 109 | func (n *NSQClient) delay() { 110 | time.Sleep(nsqdRetryDelay) 111 | } 112 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/eventbus/nsq_test.go: -------------------------------------------------------------------------------- 1 | package eventbus 2 | 3 | import ( 4 | "log/slog" 5 | "os" 6 | "testing" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | "github.com/metal-stack/metal-lib/bus" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestNewNSQ(t *testing.T) { 14 | cfg := &bus.PublisherConfig{ 15 | TCPAddress: "addr", 16 | HTTPEndpoint: "rest", 17 | } 18 | publisher := bus.NewPublisher 19 | logger := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 20 | actual := NewNSQ(cfg, logger, publisher) 21 | 22 | assert.NotNil(t, actual) 23 | assert.Equal(t, cfg.TCPAddress, actual.config.TCPAddress) 24 | assert.Equal(t, cfg.HTTPEndpoint, actual.config.HTTPEndpoint) 25 | assert.Nil(t, actual.Publisher) 26 | } 27 | 28 | func TestNSQ_WaitForPublisher(t *testing.T) { 29 | cfg := &bus.PublisherConfig{ 30 | TCPAddress: "addr", 31 | HTTPEndpoint: "rest", 32 | } 33 | publisher := NopPublisher{} 34 | 35 | nsq := NewNSQ(cfg, slog.Default(), func(logger *slog.Logger, config *bus.PublisherConfig) (bus.Publisher, error) { 36 | assert.Equal(t, cfg.TCPAddress, config.TCPAddress) 37 | assert.Equal(t, cfg.HTTPEndpoint, config.HTTPEndpoint) 38 | return publisher, nil 39 | }) 40 | assert.NotNil(t, nsq) 41 | assert.Nil(t, nsq.Publisher) 42 | 43 | nsq.WaitForPublisher() 44 | assert.NotNil(t, nsq.Publisher) 45 | assert.Equal(t, publisher, nsq.Publisher) 46 | } 47 | 48 | func TestNSQ_WaitForTopicsCreated(t *testing.T) { 49 | topic := metal.NSQTopic{Name: "gopher"} 50 | partition := metal.Partition{ 51 | Base: metal.Base{ID: "partition-id"}, 52 | } 53 | publisher := NopPublisher{ 54 | T: t, 55 | topic: topic.GetFQN(partition.GetID()), 56 | } 57 | nsq := NewNSQ(nil, slog.Default(), func(*slog.Logger, *bus.PublisherConfig) (bus.Publisher, error) { 58 | return nil, nil 59 | }) 60 | assert.NotNil(t, nsq) 61 | nsq.Publisher = publisher 62 | 63 | nsq.WaitForTopicsCreated([]metal.Partition{partition}, []metal.NSQTopic{metal.NSQTopic(topic)}) 64 | 65 | // assertions are checked within the NopPublisher stub 66 | } 67 | 68 | type NopPublisher struct { 69 | T assert.TestingT 70 | topic string 71 | } 72 | 73 | func (p NopPublisher) Publish(topic string, data interface{}) error { 74 | return nil 75 | } 76 | 77 | func (p NopPublisher) CreateTopic(topic string) error { 78 | assert.Equal(p.T, p.topic, topic) 79 | return nil 80 | } 81 | 82 | func (p NopPublisher) Stop() {} 83 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/eventbus/testing.go: -------------------------------------------------------------------------------- 1 | package eventbus 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | type noopPublisher struct{} 8 | 9 | func (n *noopPublisher) Publish(topic string, data interface{}) error { 10 | return nil 11 | } 12 | 13 | func (n *noopPublisher) CreateTopic(topic string) error { 14 | return nil 15 | } 16 | 17 | func (n *noopPublisher) Stop() { 18 | } 19 | 20 | func InitTestPublisher(t *testing.T) *NSQClient { 21 | pub := &noopPublisher{} 22 | nsq := &NSQClient{ 23 | Publisher: pub, 24 | } 25 | return nsq 26 | } 27 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/events_test.go: -------------------------------------------------------------------------------- 1 | package fsm 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/fsm/states" 7 | "github.com/stretchr/testify/assert" 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestEventsProperlyDefined(t *testing.T) { 12 | events := Events() 13 | allStates := states.AllStates(&states.StateConfig{}) 14 | allStates[SelfTransitionState] = nil 15 | 16 | for _, e := range events { 17 | require.NotEmpty(t, e.Dst) 18 | assert.Contains(t, allStates, e.Dst) 19 | assert.NotEmpty(t, e.Src) 20 | assert.NotEmpty(t, e.Name) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/fsm.go: -------------------------------------------------------------------------------- 1 | package fsm 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "strings" 9 | 10 | "github.com/looplab/fsm" 11 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/fsm/states" 12 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 13 | ) 14 | 15 | // HandleProvisioningEvent can be called to determine whether the given incoming event follows an expected lifecycle of a machine considering the event history of the given provisioning event container. 16 | // 17 | // The function returns a new provisioning event container that can then be safely persisted in the database. If an error is returned, the incoming event is not supposed to be persisted in the database. 18 | // 19 | // Among other things, this function can detect crash loops or other irregularities within a machine lifecycle and enriches the returned provisioning event container with this information. 20 | func HandleProvisioningEvent(ctx context.Context, log *slog.Logger, ec *metal.ProvisioningEventContainer, event *metal.ProvisioningEvent) (*metal.ProvisioningEventContainer, error) { 21 | if ec == nil { 22 | return nil, fmt.Errorf("provisioning event container must not be nil") 23 | } 24 | 25 | if event == nil { 26 | return nil, fmt.Errorf("provisioning event must not be nil") 27 | } 28 | 29 | var ( 30 | clone = *ec 31 | container = &clone 32 | f = fsm.NewFSM( 33 | initialStateFromEventContainer(container), 34 | Events(), 35 | eventCallbacks(&states.StateConfig{Log: log, Event: event, Container: container}), 36 | ) 37 | ) 38 | 39 | err := f.Event(ctx, event.Event.String()) 40 | if err == nil { 41 | return container, nil 42 | } 43 | 44 | if errors.As(err, &fsm.InvalidEventError{}) { 45 | if event.Message == "" { 46 | event.Message = fmt.Sprintf("[unexpectedly received in %s]", strings.ToLower(f.Current())) 47 | } else { 48 | event.Message = fmt.Sprintf("[unexpectedly received in %s]: %s", strings.ToLower(f.Current()), event.Message) 49 | } 50 | 51 | container.LastEventTime = &event.Time 52 | container.Liveliness = metal.MachineLivelinessAlive 53 | container.LastErrorEvent = event 54 | 55 | switch e := event.Event; e { //nolint:exhaustive 56 | case metal.ProvisioningEventPXEBooting, metal.ProvisioningEventPreparing: 57 | container.CrashLoop = true 58 | container.Events = append([]metal.ProvisioningEvent{*event}, container.Events...) 59 | case metal.ProvisioningEventAlive: 60 | // under no circumstances we want to persists alive in the events container. 61 | // when this happens the FSM gets stuck in invalid transitions 62 | // (e.g. all following transitions are invalid and all subsequent alive events will be stored, cramping history). 63 | default: 64 | container.Events = append([]metal.ProvisioningEvent{*event}, container.Events...) 65 | } 66 | 67 | return container, nil 68 | } 69 | 70 | return nil, fmt.Errorf("internal error while calculating provisioning event container for machine %s: %w", container.ID, err) 71 | } 72 | 73 | func initialStateFromEventContainer(container *metal.ProvisioningEventContainer) string { 74 | lastEvent := "" 75 | if len(container.Events) != 0 { 76 | lastEvent = container.Events[0].Event.String() 77 | } 78 | 79 | return getEventDestination(lastEvent) 80 | } 81 | 82 | func getEventDestination(event string) string { 83 | for _, e := range Events() { 84 | if e.Name == event && e.Dst != SelfTransitionState { 85 | return e.Dst 86 | } 87 | } 88 | 89 | return states.Initial.String() 90 | } 91 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/alive.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | 7 | "github.com/looplab/fsm" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | ) 10 | 11 | type AliveState struct { 12 | log *slog.Logger 13 | container *metal.ProvisioningEventContainer 14 | event *metal.ProvisioningEvent 15 | } 16 | 17 | func newAlive(c *StateConfig) *AliveState { 18 | return &AliveState{ 19 | log: c.Log, 20 | container: c.Container, 21 | event: c.Event, 22 | } 23 | } 24 | 25 | func (p *AliveState) OnTransition(ctx context.Context, e *fsm.Event) { 26 | updateTimeAndLiveliness(p.event, p.container) 27 | p.log.Debug("received provisioning alive event", "id", p.container.ID) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/booting-new-kernel.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type BootingNewKernelState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newBootingNewKernel(c *StateConfig) *BootingNewKernelState { 16 | return &BootingNewKernelState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *BootingNewKernelState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | appendEventToContainer(p.event, p.container) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/crashed.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type CrashState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newCrash(c *StateConfig) *CrashState { 16 | return &CrashState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *CrashState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | p.container.CrashLoop = true 24 | p.container.LastErrorEvent = p.event 25 | appendEventToContainer(p.event, p.container) 26 | } 27 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/initial.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/looplab/fsm" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | ) 10 | 11 | type InitialState struct { 12 | container *metal.ProvisioningEventContainer 13 | event *metal.ProvisioningEvent 14 | } 15 | 16 | func newInitial(c *StateConfig) *InitialState { 17 | return &InitialState{ 18 | container: c.Container, 19 | event: c.Event, 20 | } 21 | } 22 | 23 | func (p *InitialState) OnTransition(ctx context.Context, e *fsm.Event) { 24 | e.Err = fmt.Errorf("unexpected transition back to initial state") 25 | } 26 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/installing.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type InstallingState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newInstalling(c *StateConfig) *InstallingState { 16 | return &InstallingState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *InstallingState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | appendEventToContainer(p.event, p.container) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/machine-reclaim.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type MachineReclaimState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newMachineReclaim(c *StateConfig) *MachineReclaimState { 16 | return &MachineReclaimState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *MachineReclaimState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | p.container.CrashLoop = false 24 | appendEventToContainer(p.event, p.container) 25 | } 26 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/phoned-home.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "time" 7 | 8 | "github.com/looplab/fsm" 9 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 10 | ) 11 | 12 | // failedMachineReclaimThreshold is the duration after which the machine reclaim is assumed to have failed. 13 | const failedMachineReclaimThreshold = 5 * time.Minute 14 | 15 | type PhonedHomeState struct { 16 | log *slog.Logger 17 | container *metal.ProvisioningEventContainer 18 | event *metal.ProvisioningEvent 19 | } 20 | 21 | func newPhonedHome(c *StateConfig) *PhonedHomeState { 22 | return &PhonedHomeState{ 23 | log: c.Log, 24 | container: c.Container, 25 | event: c.Event, 26 | } 27 | } 28 | 29 | func (p *PhonedHomeState) OnTransition(ctx context.Context, e *fsm.Event) { 30 | switch e.Src { 31 | case PhonedHome.String(): 32 | updateTimeAndLiveliness(p.event, p.container) 33 | p.log.Debug("swallowing repeated phoned home event", "id", p.container.ID) 34 | case MachineReclaim.String(): 35 | // swallow on machine reclaim 36 | if p.container.LastEventTime != nil && p.event.Time.Sub(*p.container.LastEventTime) > failedMachineReclaimThreshold { 37 | updateTimeAndLiveliness(p.event, p.container) 38 | p.container.FailedMachineReclaim = true 39 | } 40 | default: 41 | p.container.CrashLoop = false 42 | appendEventToContainer(p.event, p.container) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/planned-reboot.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type PlannedRebootState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newPlannedReboot(c *StateConfig) *PlannedRebootState { 16 | return &PlannedRebootState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *PlannedRebootState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | p.container.CrashLoop = false 24 | appendEventToContainer(p.event, p.container) 25 | } 26 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/preparing.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type PreparingState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newPreparing(c *StateConfig) *PreparingState { 16 | return &PreparingState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *PreparingState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | p.container.FailedMachineReclaim = false 24 | 25 | appendEventToContainer(p.event, p.container) 26 | } 27 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/pxe-booting.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type PXEBootingState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newPXEBooting(c *StateConfig) *PXEBootingState { 16 | return &PXEBootingState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *PXEBootingState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | p.container.FailedMachineReclaim = false 24 | 25 | if e.Src == PXEBooting.String() { 26 | // swallow repeated pxe booting events, which happens regularly 27 | updateTimeAndLiveliness(p.event, p.container) 28 | return 29 | } 30 | 31 | appendEventToContainer(p.event, p.container) 32 | } 33 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/registering.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type RegisteringState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newRegistering(c *StateConfig) *RegisteringState { 16 | return &RegisteringState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *RegisteringState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | appendEventToContainer(p.event, p.container) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/states.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | 7 | "github.com/looplab/fsm" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | ) 10 | 11 | const ( 12 | Initial stateType = "State Initial" 13 | Alive stateType = "State Alive" 14 | Crashing stateType = "State Crashing" 15 | PXEBooting stateType = "State PXE Booting" 16 | Preparing stateType = "State Preparing" 17 | Registering stateType = "State Registering" 18 | Waiting stateType = "State Waiting" 19 | Installing stateType = "State Installing" 20 | BootingNewKernel stateType = "State Booting New Kernel" 21 | PhonedHome stateType = "State Phoned Home" 22 | PlannedReboot stateType = "State Planned Reboot" 23 | MachineReclaim stateType = "State Machine Reclaim" 24 | ) 25 | 26 | type FSMState interface { 27 | OnTransition(ctx context.Context, e *fsm.Event) 28 | } 29 | 30 | type stateType string 31 | 32 | func (t stateType) String() string { 33 | return string(t) 34 | } 35 | 36 | type StateConfig struct { 37 | Log *slog.Logger 38 | Container *metal.ProvisioningEventContainer 39 | Event *metal.ProvisioningEvent 40 | } 41 | 42 | func AllStates(c *StateConfig) map[string]FSMState { 43 | return map[string]FSMState{ 44 | Alive.String(): newAlive(c), 45 | Crashing.String(): newCrash(c), 46 | Initial.String(): newInitial(c), 47 | PXEBooting.String(): newPXEBooting(c), 48 | Preparing.String(): newPreparing(c), 49 | Registering.String(): newRegistering(c), 50 | Waiting.String(): newWaiting(c), 51 | Installing.String(): newInstalling(c), 52 | BootingNewKernel.String(): newBootingNewKernel(c), 53 | PhonedHome.String(): newPhonedHome(c), 54 | PlannedReboot.String(): newPlannedReboot(c), 55 | MachineReclaim.String(): newMachineReclaim(c), 56 | } 57 | } 58 | 59 | func AllStateNames() []string { 60 | var result []string 61 | 62 | for name := range AllStates(&StateConfig{}) { 63 | result = append(result, name) 64 | } 65 | 66 | return result 67 | } 68 | 69 | func appendEventToContainer(event *metal.ProvisioningEvent, container *metal.ProvisioningEventContainer) { 70 | updateTimeAndLiveliness(event, container) 71 | container.Events = append([]metal.ProvisioningEvent{*event}, container.Events...) 72 | } 73 | 74 | func updateTimeAndLiveliness(event *metal.ProvisioningEvent, container *metal.ProvisioningEventContainer) { 75 | container.LastEventTime = &event.Time 76 | container.Liveliness = metal.MachineLivelinessAlive 77 | } 78 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/fsm/states/waiting.go: -------------------------------------------------------------------------------- 1 | package states 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/looplab/fsm" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type WaitingState struct { 11 | container *metal.ProvisioningEventContainer 12 | event *metal.ProvisioningEvent 13 | } 14 | 15 | func newWaiting(c *StateConfig) *WaitingState { 16 | return &WaitingState{ 17 | container: c.Container, 18 | event: c.Event, 19 | } 20 | } 21 | 22 | func (p *WaitingState) OnTransition(ctx context.Context, e *fsm.Event) { 23 | appendEventToContainer(p.event, p.container) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/grpc/boot-service-wait.go: -------------------------------------------------------------------------------- 1 | package grpc 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/google/uuid" 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | v1 "github.com/metal-stack/metal-api/pkg/api/v1" 10 | "github.com/metal-stack/metal-lib/bus" 11 | ) 12 | 13 | const ( 14 | receiverHandlerTimeout = 15 * time.Second 15 | allocationTopicTTL = time.Duration(30) * time.Second 16 | ) 17 | 18 | func (b *BootService) Wait(req *v1.BootServiceWaitRequest, srv v1.BootService_WaitServer) error { 19 | machineID := req.MachineId 20 | b.log.Info("wait for allocation called by", "machineID", machineID) 21 | 22 | m, err := b.ds.FindMachineByID(machineID) 23 | if err != nil { 24 | return err 25 | } 26 | allocated := m.Allocation != nil 27 | if allocated { 28 | return nil 29 | } 30 | 31 | // machine is not yet allocated, so we set the waiting flag 32 | err = b.updateWaitingFlag(machineID, true) 33 | if err != nil { 34 | return err 35 | } 36 | defer func() { 37 | if err != nil { 38 | return 39 | } 40 | err := b.updateWaitingFlag(machineID, false) 41 | if err != nil { 42 | b.log.Error("unable to remove waiting flag from machine", "machineID", machineID, "error", err) 43 | } 44 | }() 45 | 46 | // we also create and listen to a channel that will be used as soon as the machine is allocated 47 | value, ok := b.queue.Load(machineID) 48 | 49 | var can chan bool 50 | if !ok { 51 | can = make(chan bool) 52 | b.queue.Store(machineID, can) 53 | } else { 54 | can, ok = value.(chan bool) 55 | if !ok { 56 | return fmt.Errorf("unable to cast queue entry to a chan bool") 57 | } 58 | } 59 | 60 | defer func() { 61 | b.queue.Delete(machineID) 62 | close(can) 63 | }() 64 | 65 | nextCheck := time.Now() 66 | ctx := srv.Context() 67 | for { 68 | select { 69 | case <-ctx.Done(): 70 | err = ctx.Err() 71 | return err 72 | case allocated := <-can: 73 | if allocated { 74 | return nil 75 | } 76 | case now := <-time.After(b.responseInterval): 77 | if now.After(nextCheck) { 78 | m, err = b.ds.FindMachineByID(machineID) 79 | if err != nil { 80 | return err 81 | } 82 | allocated := m.Allocation != nil 83 | if allocated { 84 | return nil 85 | } 86 | nextCheck = now.Add(b.checkInterval) 87 | } 88 | err = sendKeepPatientResponse(srv) 89 | if err != nil { 90 | return err 91 | } 92 | } 93 | } 94 | } 95 | 96 | func (b *BootService) initWaitEndpoint() error { 97 | channel := fmt.Sprintf("alloc-%s#ephemeral", uuid.NewString()) 98 | return b.consumer.With(bus.LogLevel(bus.Warning)). 99 | MustRegister(metal.TopicAllocation.Name, channel). 100 | Consume(metal.AllocationEvent{}, func(message interface{}) error { 101 | evt := message.(*metal.AllocationEvent) 102 | b.log.Debug("got message", "topic", metal.TopicAllocation.Name, "channel", channel, "machineID", evt.MachineID) 103 | b.handleAllocation(evt.MachineID) 104 | return nil 105 | }, 5, bus.Timeout(receiverHandlerTimeout, b.timeoutHandler), bus.TTL(allocationTopicTTL)) 106 | } 107 | 108 | func (b *BootService) timeoutHandler(err bus.TimeoutError) error { 109 | b.log.Error("Timeout processing event", "event", err.Event()) 110 | return nil 111 | } 112 | 113 | // https://github.com/grpc/grpc-go/issues/1229#issuecomment-302755717 114 | func sendKeepPatientResponse(srv v1.BootService_WaitServer) error { 115 | errChan := make(chan error, 1) 116 | ctx := srv.Context() 117 | go func() { 118 | errChan <- srv.Send(&v1.BootServiceWaitResponse{}) 119 | close(errChan) 120 | }() 121 | select { 122 | case <-ctx.Done(): 123 | return ctx.Err() 124 | case err := <-errChan: 125 | return err 126 | } 127 | } 128 | 129 | func (b *BootService) handleAllocation(machineID string) { 130 | value, ok := b.queue.Load(machineID) 131 | if !ok { 132 | return 133 | } 134 | can, ok := value.(chan bool) 135 | if !ok { 136 | b.log.Error("handleAllocation: unable to cast queue entry to chan bool") 137 | return 138 | } 139 | can <- true 140 | } 141 | 142 | func (b *BootService) updateWaitingFlag(machineID string, flag bool) error { 143 | m, err := b.ds.FindMachineByID(machineID) 144 | if err != nil { 145 | return err 146 | } 147 | old := *m 148 | m.Waiting = flag 149 | return b.ds.UpdateMachine(&old, m) 150 | } 151 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/grpc/event-service.go: -------------------------------------------------------------------------------- 1 | package grpc 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "log/slog" 8 | "time" 9 | 10 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 11 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 12 | v1 "github.com/metal-stack/metal-api/pkg/api/v1" 13 | ) 14 | 15 | type EventService struct { 16 | log *slog.Logger 17 | ds *datastore.RethinkStore 18 | } 19 | 20 | func NewEventService(cfg *ServerConfig) *EventService { 21 | return &EventService{ 22 | ds: cfg.Store, 23 | log: cfg.Logger.WithGroup("event-service"), 24 | } 25 | } 26 | func (e *EventService) Send(ctx context.Context, req *v1.EventServiceSendRequest) (*v1.EventServiceSendResponse, error) { 27 | e.log.Debug("send", "event", req) 28 | if req == nil { 29 | return nil, fmt.Errorf("no event send") 30 | } 31 | 32 | failed := []string{} 33 | processed := uint64(0) 34 | var processErrs []error 35 | for machineID, event := range req.Events { 36 | 37 | m, err := e.ds.FindMachineByID(machineID) 38 | if err != nil && !metal.IsNotFound(err) { 39 | processErrs = append(processErrs, fmt.Errorf("machine with ID:%s not found %w", machineID, err)) 40 | failed = append(failed, machineID) 41 | continue 42 | } 43 | 44 | // an event can actually create an empty machine. This enables us to also catch the very first PXE Booting event 45 | // in a machine lifecycle 46 | if m == nil { 47 | m = &metal.Machine{ 48 | Base: metal.Base{ 49 | ID: machineID, 50 | }, 51 | } 52 | err = e.ds.CreateMachine(m) 53 | if err != nil { 54 | processErrs = append(processErrs, err) 55 | failed = append(failed, machineID) 56 | continue 57 | } 58 | } 59 | 60 | ok := metal.AllProvisioningEventTypes[metal.ProvisioningEventType(event.Event)] 61 | if !ok { 62 | processErrs = append(processErrs, err) 63 | failed = append(failed, machineID) 64 | continue 65 | } 66 | 67 | ev := metal.ProvisioningEvent{ 68 | Time: time.Now(), 69 | Event: metal.ProvisioningEventType(event.Event), 70 | Message: event.Message, 71 | } 72 | 73 | _, err = e.ds.ProvisioningEventForMachine(ctx, e.log, &ev, machineID) 74 | if err != nil { 75 | processErrs = append(processErrs, err) 76 | failed = append(failed, machineID) 77 | continue 78 | } 79 | processed++ 80 | } 81 | 82 | return &v1.EventServiceSendResponse{ 83 | Events: processed, 84 | Failed: failed, 85 | }, errors.Join(processErrs...) 86 | } 87 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/grpc/event-service_test.go: -------------------------------------------------------------------------------- 1 | package grpc 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "os" 7 | "reflect" 8 | "testing" 9 | 10 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 11 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 12 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 13 | 14 | v1 "github.com/metal-stack/metal-api/pkg/api/v1" 15 | ) 16 | 17 | func TestEventService_Send(t *testing.T) { 18 | ds, mock := datastore.InitMockDB(t) 19 | testdata.InitMockDBData(mock) 20 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 21 | 22 | tests := []struct { 23 | name string 24 | req *v1.EventServiceSendRequest 25 | ds *datastore.RethinkStore 26 | log *slog.Logger 27 | want *v1.EventServiceSendResponse 28 | wantErr bool 29 | }{ 30 | { 31 | name: "simple", 32 | req: &v1.EventServiceSendRequest{ 33 | Events: map[string]*v1.MachineProvisioningEvent{ 34 | "m1": { 35 | Event: string(metal.ProvisioningEventPreparing), 36 | Message: "starting metal-hammer", 37 | }, 38 | }, 39 | }, 40 | ds: ds, 41 | log: log, 42 | want: &v1.EventServiceSendResponse{ 43 | Events: uint64(1), 44 | Failed: []string{}, 45 | }, 46 | wantErr: false, 47 | }, 48 | } 49 | for _, tt := range tests { 50 | tt := tt 51 | t.Run(tt.name, func(t *testing.T) { 52 | e := &EventService{ 53 | log: tt.log, 54 | ds: tt.ds, 55 | } 56 | 57 | got, err := e.Send(context.Background(), tt.req) 58 | if (err != nil) != tt.wantErr { 59 | t.Errorf("EventService.Send() error = %v, wantErr %v", err, tt.wantErr) 60 | return 61 | } 62 | if !reflect.DeepEqual(got, tt.want) { 63 | t.Errorf("EventService.Send() = %v, want %v", got, tt.want) 64 | } 65 | }) 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/headscale/auth.go: -------------------------------------------------------------------------------- 1 | package headscale 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // Implements google.golang.org/grpc/credentials.PerRPCCredentials interface 8 | type tokenAuth struct { 9 | token string 10 | } 11 | 12 | func (t tokenAuth) GetRequestMetadata( 13 | ctx context.Context, 14 | _ ...string, 15 | ) (map[string]string, error) { 16 | return map[string]string{ 17 | "authorization": "Bearer " + t.token, 18 | }, nil 19 | } 20 | 21 | func (tokenAuth) RequireTransportSecurity() bool { 22 | return false 23 | } 24 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/ipam/testing.go: -------------------------------------------------------------------------------- 1 | package ipam 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "net/http" 7 | "net/http/httptest" 8 | "os" 9 | "testing" 10 | 11 | goipam "github.com/metal-stack/go-ipam" 12 | "github.com/metal-stack/go-ipam/api/v1/apiv1connect" 13 | "github.com/metal-stack/go-ipam/pkg/service" 14 | ) 15 | 16 | func InitTestIpam(t *testing.T) IPAMer { 17 | 18 | ctx := context.Background() 19 | mux := http.NewServeMux() 20 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 21 | 22 | mux.Handle(apiv1connect.NewIpamServiceHandler( 23 | service.New(log, goipam.New(ctx)), 24 | )) 25 | server := httptest.NewUnstartedServer(mux) 26 | server.EnableHTTP2 = true 27 | server.StartTLS() 28 | 29 | ipamclient := apiv1connect.NewIpamServiceClient( 30 | server.Client(), 31 | server.URL, 32 | ) 33 | 34 | ipamer := New(ipamclient) 35 | return ipamer 36 | } 37 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/asn-uniqueness.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | "strings" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 9 | ) 10 | 11 | const ( 12 | TypeASNUniqueness Type = "asn-not-unique" 13 | ) 14 | 15 | type ( 16 | issueASNUniqueness struct { 17 | details string 18 | } 19 | ) 20 | 21 | func (i *issueASNUniqueness) Spec() *spec { 22 | return &spec{ 23 | Type: TypeASNUniqueness, 24 | Severity: SeverityMinor, 25 | Description: "The ASN is not unique (only impact on firewalls)", 26 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#asn-not-unique", 27 | } 28 | } 29 | 30 | func (i *issueASNUniqueness) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 31 | var ( 32 | machineASNs = map[uint32]metal.Machines{} 33 | overlaps []string 34 | isNoFirewall = func(m metal.Machine) bool { 35 | return m.Allocation == nil || m.Allocation.Role != metal.RoleFirewall 36 | } 37 | ) 38 | 39 | if isNoFirewall(m) { 40 | return false 41 | } 42 | 43 | for _, n := range m.Allocation.MachineNetworks { 44 | n := n 45 | 46 | if n.ASN == 0 { 47 | continue 48 | } 49 | 50 | machineASNs[n.ASN] = nil 51 | } 52 | 53 | for _, machineFromAll := range c.Machines { 54 | machineFromAll := machineFromAll 55 | 56 | if machineFromAll.ID == m.ID { 57 | continue 58 | } 59 | otherMachine := machineFromAll 60 | 61 | if isNoFirewall(otherMachine) { 62 | continue 63 | } 64 | 65 | for _, n := range otherMachine.Allocation.MachineNetworks { 66 | n := n 67 | 68 | if n.ASN == 0 { 69 | continue 70 | } 71 | 72 | _, ok := machineASNs[n.ASN] 73 | if !ok { 74 | continue 75 | } 76 | 77 | machineASNs[n.ASN] = append(machineASNs[n.ASN], otherMachine) 78 | } 79 | } 80 | 81 | var asnList []uint32 82 | for asn := range machineASNs { 83 | asn := asn 84 | asnList = append(asnList, asn) 85 | } 86 | sort.Slice(asnList, func(i, j int) bool { 87 | return asnList[i] < asnList[j] 88 | }) 89 | 90 | for _, asn := range asnList { 91 | asn := asn 92 | 93 | overlappingMachines, ok := machineASNs[asn] 94 | if !ok || len(overlappingMachines) == 0 { 95 | continue 96 | } 97 | 98 | var sharedIDs []string 99 | for _, m := range overlappingMachines { 100 | m := m 101 | sharedIDs = append(sharedIDs, m.ID) 102 | } 103 | 104 | overlaps = append(overlaps, fmt.Sprintf("- ASN (%d) not unique, shared with %s", asn, sharedIDs)) 105 | } 106 | 107 | if len(overlaps) == 0 { 108 | return false 109 | } 110 | 111 | sort.Slice(overlaps, func(i, j int) bool { 112 | return overlaps[i] < overlaps[j] 113 | }) 114 | 115 | i.details = strings.Join(overlaps, "\n") 116 | 117 | return true 118 | } 119 | 120 | func (i *issueASNUniqueness) Details() string { 121 | return i.details 122 | } 123 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/bmc-info-outdated.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | const ( 11 | TypeBMCInfoOutdated Type = "bmc-info-outdated" 12 | ) 13 | 14 | type ( 15 | issueBMCInfoOutdated struct { 16 | details string 17 | } 18 | ) 19 | 20 | func (i *issueBMCInfoOutdated) Details() string { 21 | return i.details 22 | } 23 | 24 | func (i *issueBMCInfoOutdated) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 25 | if m.IPMI.LastUpdated.IsZero() { 26 | i.details = "machine ipmi has never been set" 27 | return true 28 | } 29 | 30 | lastUpdated := time.Since(m.IPMI.LastUpdated) 31 | 32 | if lastUpdated > 20*time.Minute { 33 | i.details = fmt.Sprintf("last updated %s ago", lastUpdated.String()) 34 | return true 35 | } 36 | 37 | return false 38 | } 39 | 40 | func (*issueBMCInfoOutdated) Spec() *spec { 41 | return &spec{ 42 | Type: TypeBMCInfoOutdated, 43 | Severity: SeverityMajor, 44 | Description: "BMC has not been updated from either metal-hammer or metal-bmc", 45 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#bmc-info-outdated", 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/bmc-without-ip.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeBMCWithoutIP Type = "bmc-without-ip" 7 | ) 8 | 9 | type ( 10 | issueBMCWithoutIP struct{} 11 | ) 12 | 13 | func (i *issueBMCWithoutIP) Spec() *spec { 14 | return &spec{ 15 | Type: TypeBMCWithoutIP, 16 | Severity: SeverityMajor, 17 | Description: "BMC has no ip address", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#bmc-without-ip", 19 | } 20 | } 21 | 22 | func (i *issueBMCWithoutIP) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | return m.IPMI.Address == "" 24 | } 25 | 26 | func (i *issueBMCWithoutIP) Details() string { 27 | return "" 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/bmc-without-mac.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeBMCWithoutMAC Type = "bmc-without-mac" 7 | ) 8 | 9 | type ( 10 | issueBMCWithoutMAC struct{} 11 | ) 12 | 13 | func (i *issueBMCWithoutMAC) Spec() *spec { 14 | return &spec{ 15 | Type: TypeBMCWithoutMAC, 16 | Severity: SeverityMajor, 17 | Description: "BMC has no mac address", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#bmc-without-mac", 19 | } 20 | } 21 | 22 | func (i *issueBMCWithoutMAC) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | return m.IPMI.MacAddress == "" 24 | } 25 | 26 | func (i *issueBMCWithoutMAC) Details() string { 27 | return "" 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/crash-loop.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 5 | "github.com/metal-stack/metal-lib/pkg/pointer" 6 | ) 7 | 8 | const ( 9 | TypeCrashLoop Type = "crashloop" 10 | ) 11 | 12 | type ( 13 | issueCrashLoop struct{} 14 | ) 15 | 16 | func (i *issueCrashLoop) Spec() *spec { 17 | return &spec{ 18 | Type: TypeCrashLoop, 19 | Severity: SeverityMajor, 20 | Description: "machine is in a provisioning crash loop (⭕)", 21 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#crashloop", 22 | } 23 | } 24 | 25 | func (i *issueCrashLoop) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 26 | if ec.CrashLoop { 27 | if pointer.FirstOrZero(ec.Events).Event == metal.ProvisioningEventWaiting { 28 | // Machine which are waiting are not considered to have issues 29 | } else { 30 | return true 31 | } 32 | } 33 | return false 34 | } 35 | 36 | func (i *issueCrashLoop) Details() string { 37 | return "" 38 | } 39 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/failed-machine-reclaim.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 5 | "github.com/metal-stack/metal-lib/pkg/pointer" 6 | ) 7 | 8 | const ( 9 | TypeFailedMachineReclaim Type = "failed-machine-reclaim" 10 | ) 11 | 12 | type ( 13 | issueFailedMachineReclaim struct{} 14 | ) 15 | 16 | func (i *issueFailedMachineReclaim) Spec() *spec { 17 | return &spec{ 18 | Type: TypeFailedMachineReclaim, 19 | Severity: SeverityCritical, 20 | Description: "machine phones home but not allocated", 21 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#failed-machine-reclaim", 22 | } 23 | } 24 | 25 | func (i *issueFailedMachineReclaim) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 26 | if ec.FailedMachineReclaim { 27 | return true 28 | } 29 | 30 | // compatibility: before the provisioning FSM was renewed, this state could be detected the following way 31 | // we should keep this condition 32 | if m.Allocation == nil && pointer.FirstOrZero(ec.Events).Event == metal.ProvisioningEventPhonedHome { 33 | return true 34 | } 35 | 36 | return false 37 | } 38 | 39 | func (i *issueFailedMachineReclaim) Details() string { 40 | return "" 41 | } 42 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/last-event-error.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | const ( 11 | TypeLastEventError Type = "last-event-error" 12 | ) 13 | 14 | type ( 15 | issueLastEventError struct { 16 | details string 17 | } 18 | ) 19 | 20 | func DefaultLastErrorThreshold() time.Duration { 21 | return 7 * 24 * time.Hour 22 | } 23 | 24 | func (i *issueLastEventError) Spec() *spec { 25 | return &spec{ 26 | Type: TypeLastEventError, 27 | Severity: SeverityMinor, 28 | Description: "the machine had an error during the provisioning lifecycle", 29 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#last-event-error", 30 | } 31 | } 32 | 33 | func (i *issueLastEventError) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 34 | if c.LastErrorThreshold == 0 { 35 | return false 36 | } 37 | 38 | if ec.LastErrorEvent != nil { 39 | timeSince := time.Since(time.Time(ec.LastErrorEvent.Time)) 40 | if timeSince < c.LastErrorThreshold { 41 | i.details = fmt.Sprintf("occurred %s ago", timeSince.String()) 42 | return true 43 | } 44 | } 45 | 46 | return false 47 | } 48 | 49 | func (i *issueLastEventError) Details() string { 50 | return i.details 51 | } 52 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/liveliness-dead.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeLivelinessDead Type = "liveliness-dead" 7 | ) 8 | 9 | type ( 10 | issueLivelinessDead struct{} 11 | ) 12 | 13 | func (i *issueLivelinessDead) Spec() *spec { 14 | return &spec{ 15 | Type: TypeLivelinessDead, 16 | Severity: SeverityMajor, 17 | Description: "the machine is not sending events anymore", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#liveliness-dead", 19 | } 20 | } 21 | 22 | func (i *issueLivelinessDead) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | return ec.Liveliness == metal.MachineLivelinessDead 24 | } 25 | 26 | func (i *issueLivelinessDead) Details() string { 27 | return "" 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/liveliness-not-available.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeLivelinessNotAvailable Type = "liveliness-not-available" 7 | ) 8 | 9 | type ( 10 | issueLivelinessNotAvailable struct{} 11 | ) 12 | 13 | func (i *issueLivelinessNotAvailable) Spec() *spec { 14 | return &spec{ 15 | Type: TypeLivelinessNotAvailable, 16 | Severity: SeverityMinor, 17 | Description: "the machine liveliness is not available", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#liveliness-not-available", 19 | } 20 | } 21 | 22 | func (i *issueLivelinessNotAvailable) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | allowed := map[metal.MachineLiveliness]bool{ 24 | metal.MachineLivelinessAlive: true, 25 | metal.MachineLivelinessDead: true, 26 | metal.MachineLivelinessUnknown: true, 27 | } 28 | 29 | return !allowed[ec.Liveliness] 30 | } 31 | 32 | func (i *issueLivelinessNotAvailable) Details() string { 33 | return "" 34 | } 35 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/liveliness-unknown.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeLivelinessUnknown Type = "liveliness-unknown" 7 | ) 8 | 9 | type ( 10 | issueLivelinessUnknown struct{} 11 | ) 12 | 13 | func (i *issueLivelinessUnknown) Spec() *spec { 14 | return &spec{ 15 | Type: TypeLivelinessUnknown, 16 | Severity: SeverityMajor, 17 | Description: "the machine is not sending LLDP alive messages anymore", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#liveliness-unknown", 19 | } 20 | } 21 | 22 | func (i *issueLivelinessUnknown) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | return ec.Liveliness == metal.MachineLivelinessUnknown 24 | } 25 | 26 | func (i *issueLivelinessUnknown) Details() string { 27 | return "" 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/no-event-container.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 5 | ) 6 | 7 | const ( 8 | TypeNoEventContainer Type = "no-event-container" 9 | ) 10 | 11 | type ( 12 | issueNoEventContainer struct{} 13 | ) 14 | 15 | func (i *issueNoEventContainer) Spec() *spec { 16 | return &spec{ 17 | Type: TypeNoEventContainer, 18 | Severity: SeverityMajor, 19 | Description: "machine has no event container", 20 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#no-event-container", 21 | } 22 | } 23 | 24 | func (i *issueNoEventContainer) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 25 | return ec.Base.ID == "" 26 | } 27 | 28 | func (i *issueNoEventContainer) Details() string { 29 | return "" 30 | } 31 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/no-partition.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | const ( 6 | TypeNoPartition Type = "no-partition" 7 | ) 8 | 9 | type ( 10 | issueNoPartition struct{} 11 | ) 12 | 13 | func (i *issueNoPartition) Spec() *spec { 14 | return &spec{ 15 | Type: TypeNoPartition, 16 | Severity: SeverityMajor, 17 | Description: "machine with no partition", 18 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#no-partition", 19 | } 20 | } 21 | 22 | func (i *issueNoPartition) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 23 | return m.PartitionID == "" 24 | } 25 | 26 | func (i *issueNoPartition) Details() string { 27 | return "" 28 | } 29 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/non-distinct-bmc-ip.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 7 | ) 8 | 9 | const ( 10 | TypeNonDistinctBMCIP Type = "bmc-no-distinct-ip" 11 | ) 12 | 13 | type ( 14 | issueNonDistinctBMCIP struct { 15 | details string 16 | } 17 | ) 18 | 19 | func (i *issueNonDistinctBMCIP) Spec() *spec { 20 | return &spec{ 21 | Type: TypeNonDistinctBMCIP, 22 | Severity: SeverityMajor, 23 | Description: "BMC IP address is not distinct", 24 | RefURL: "https://docs.metal-stack.io/stable/installation/troubleshoot/#bmc-no-distinct-ip", 25 | } 26 | } 27 | 28 | func (i *issueNonDistinctBMCIP) Evaluate(m metal.Machine, ec metal.ProvisioningEventContainer, c *Config) bool { 29 | if m.IPMI.Address == "" { 30 | return false 31 | } 32 | 33 | var ( 34 | bmcIP = m.IPMI.Address 35 | overlaps []string 36 | ) 37 | 38 | for _, machineFromAll := range c.Machines { 39 | machineFromAll := machineFromAll 40 | 41 | if machineFromAll.ID == m.ID { 42 | continue 43 | } 44 | otherMachine := machineFromAll 45 | 46 | if otherMachine.IPMI.Address == "" { 47 | continue 48 | } 49 | 50 | if bmcIP == otherMachine.IPMI.Address { 51 | overlaps = append(overlaps, otherMachine.ID) 52 | } 53 | } 54 | 55 | if len(overlaps) == 0 { 56 | return false 57 | } 58 | 59 | i.details = fmt.Sprintf("BMC IP (%s) not unique, shared with %s", bmcIP, overlaps) 60 | 61 | return true 62 | } 63 | 64 | func (i *issueNonDistinctBMCIP) Details() string { 65 | return i.details 66 | } 67 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/severeties.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "fmt" 4 | 5 | const ( 6 | // SeverityMinor is an issue that should be checked from time to time but has no bad effects for the user. 7 | SeverityMinor Severity = "minor" 8 | // SeverityMajor is an issue where user experience is affected or provider resources are wasted. 9 | // overall functionality is still maintained though. major issues should be resolved as soon as possible. 10 | SeverityMajor Severity = "major" 11 | // SeverityCritical is an issue that can lead to disfunction of the system and need to be handled as quickly as possible. 12 | SeverityCritical Severity = "critical" 13 | ) 14 | 15 | type ( 16 | Severity string 17 | ) 18 | 19 | func AllSevereties() []Severity { 20 | return []Severity{ 21 | SeverityMinor, 22 | SeverityMajor, 23 | SeverityCritical, 24 | } 25 | } 26 | 27 | func SeverityFromString(input string) (Severity, error) { 28 | switch Severity(input) { 29 | case SeverityCritical: 30 | return SeverityCritical, nil 31 | case SeverityMajor: 32 | return SeverityMajor, nil 33 | case SeverityMinor: 34 | return SeverityMinor, nil 35 | default: 36 | return "", fmt.Errorf("unknown issue severity: %s", input) 37 | } 38 | } 39 | 40 | func (s Severity) LowerThan(o Severity) bool { 41 | smap := map[Severity]int{ 42 | SeverityCritical: 10, 43 | SeverityMajor: 5, 44 | SeverityMinor: 0, 45 | } 46 | 47 | return smap[s] < smap[o] 48 | } 49 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/issues/types.go: -------------------------------------------------------------------------------- 1 | package issues 2 | 3 | import "fmt" 4 | 5 | type ( 6 | Type string 7 | ) 8 | 9 | func AllIssueTypes() []Type { 10 | return []Type{ 11 | TypeNoPartition, 12 | TypeLivelinessDead, 13 | TypeLivelinessUnknown, 14 | TypeLivelinessNotAvailable, 15 | TypeFailedMachineReclaim, 16 | TypeCrashLoop, 17 | TypeLastEventError, 18 | TypeBMCWithoutMAC, 19 | TypeBMCWithoutIP, 20 | TypeBMCInfoOutdated, 21 | TypeASNUniqueness, 22 | TypeNonDistinctBMCIP, 23 | TypeNoEventContainer, 24 | } 25 | } 26 | 27 | func NewIssueFromType(t Type) (issue, error) { 28 | switch t { 29 | case TypeNoPartition: 30 | return &issueNoPartition{}, nil 31 | case TypeLivelinessDead: 32 | return &issueLivelinessDead{}, nil 33 | case TypeLivelinessUnknown: 34 | return &issueLivelinessUnknown{}, nil 35 | case TypeLivelinessNotAvailable: 36 | return &issueLivelinessNotAvailable{}, nil 37 | case TypeFailedMachineReclaim: 38 | return &issueFailedMachineReclaim{}, nil 39 | case TypeCrashLoop: 40 | return &issueCrashLoop{}, nil 41 | case TypeLastEventError: 42 | return &issueLastEventError{}, nil 43 | case TypeBMCWithoutMAC: 44 | return &issueBMCWithoutMAC{}, nil 45 | case TypeBMCWithoutIP: 46 | return &issueBMCWithoutIP{}, nil 47 | case TypeBMCInfoOutdated: 48 | return &issueBMCInfoOutdated{}, nil 49 | case TypeASNUniqueness: 50 | return &issueASNUniqueness{}, nil 51 | case TypeNonDistinctBMCIP: 52 | return &issueNonDistinctBMCIP{}, nil 53 | case TypeNoEventContainer: 54 | return &issueNoEventContainer{}, nil 55 | default: 56 | return nil, fmt.Errorf("unknown issue type: %s", t) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/masterdata/masterdata.go: -------------------------------------------------------------------------------- 1 | package masterdata 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | v1 "github.com/metal-stack/masterdata-api/api/v1" 8 | mdm "github.com/metal-stack/masterdata-api/pkg/client" 9 | "github.com/metal-stack/metal-lib/pkg/healthstatus" 10 | ) 11 | 12 | type MasterdataHealthClient struct { 13 | mdc mdm.Client 14 | } 15 | 16 | func NewMasterdataHealthClient(mdc mdm.Client) *MasterdataHealthClient { 17 | return &MasterdataHealthClient{mdc: mdc} 18 | } 19 | 20 | func (mhc *MasterdataHealthClient) ServiceName() string { 21 | return "masterdata-api" 22 | } 23 | 24 | func (mhc *MasterdataHealthClient) Check(ctx context.Context) (healthstatus.HealthResult, error) { 25 | version, err := mhc.mdc.Version().Get(ctx, &v1.GetVersionRequest{}) 26 | 27 | if err != nil { 28 | return healthstatus.HealthResult{ 29 | Status: healthstatus.HealthStatusUnhealthy, 30 | }, err 31 | } 32 | 33 | return healthstatus.HealthResult{ 34 | Status: healthstatus.HealthStatusHealthy, 35 | Message: fmt.Sprintf("connected to masterdata-api service version: %s rev: %s", version.Version, version.Revision), 36 | }, nil 37 | } 38 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/errors.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | errNotFound = errors.New("NotFound") 10 | errConflict = errors.New("Conflict") 11 | // TODO refactor implementations of fmt.Errorf to metal.Internal() in datastore and service 12 | errInternal = errors.New("Internal") 13 | ) 14 | 15 | // NotFound creates a new notfound error with a given error message. 16 | func NotFound(format string, args ...interface{}) error { 17 | return fmt.Errorf("%w %s", errNotFound, fmt.Sprintf(format, args...)) 18 | } 19 | 20 | // IsNotFound checks if an error is a notfound error. 21 | func IsNotFound(e error) bool { 22 | return errors.Is(e, errNotFound) 23 | } 24 | 25 | // Conflict creates a new conflict error with a given error message. 26 | func Conflict(format string, args ...interface{}) error { 27 | return fmt.Errorf("%w %s", errConflict, fmt.Sprintf(format, args...)) 28 | } 29 | 30 | // IsConflict checks if an error is a conflict error. 31 | func IsConflict(e error) bool { 32 | return errors.Is(e, errConflict) 33 | } 34 | 35 | // Internal creates a new Internal error with a given error message and the original error. 36 | func Internal(format string, args ...interface{}) error { 37 | return fmt.Errorf("%w %s", errInternal, fmt.Sprintf(format, args...)) 38 | } 39 | 40 | // IsInternal checks if an error is a Internal error. 41 | func IsInternal(e error) bool { 42 | return errors.Is(e, errInternal) 43 | } 44 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/errors_test.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | ) 7 | 8 | func TestNotFound(t *testing.T) { 9 | tests := []struct { 10 | name string 11 | format string 12 | args []interface{} 13 | wantErr bool 14 | }{ 15 | { 16 | name: "TestNotFound 1", 17 | format: "SomeFormat", 18 | wantErr: true, 19 | }, 20 | } 21 | for i := range tests { 22 | tt := tests[i] 23 | t.Run(tt.name, func(t *testing.T) { 24 | if err := NotFound(tt.format, tt.args...); (err != nil) != tt.wantErr { 25 | t.Errorf("NotFound() error = %v, wantErr %v", err, tt.wantErr) 26 | } 27 | }) 28 | } 29 | } 30 | 31 | func TestIsNotFound(t *testing.T) { 32 | tests := []struct { 33 | name string 34 | err error 35 | want bool 36 | }{ 37 | { 38 | name: "Test 1", 39 | err: errors.New("Some other Error"), 40 | want: false, 41 | }, 42 | { 43 | name: "Test 2", 44 | err: errNotFound, 45 | want: true, 46 | }, 47 | { 48 | name: "Test 3", 49 | err: nil, 50 | want: false, 51 | }, 52 | } 53 | for i := range tests { 54 | tt := tests[i] 55 | t.Run(tt.name, func(t *testing.T) { 56 | if got := IsNotFound(tt.err); got != tt.want { 57 | t.Errorf("IsNotFound() = %v, want %v", got, tt.want) 58 | } 59 | }) 60 | } 61 | } 62 | 63 | func TestIsConflict(t *testing.T) { 64 | tests := []struct { 65 | name string 66 | err error 67 | want bool 68 | }{ 69 | { 70 | name: "Test 1", 71 | err: errors.New("Some other Error"), 72 | want: false, 73 | }, 74 | { 75 | name: "Test 2", 76 | err: errConflict, 77 | want: true, 78 | }, 79 | { 80 | name: "Test 3", 81 | err: nil, 82 | want: false, 83 | }, 84 | } 85 | for i := range tests { 86 | tt := tests[i] 87 | t.Run(tt.name, func(t *testing.T) { 88 | if got := IsConflict(tt.err); got != tt.want { 89 | t.Errorf("IsConflict() = %v, want %v", got, tt.want) 90 | } 91 | }) 92 | } 93 | } 94 | 95 | func TestIsInternal(t *testing.T) { 96 | tests := []struct { 97 | name string 98 | err error 99 | want bool 100 | }{ 101 | { 102 | name: "Test 1", 103 | err: errors.New("Some other Error"), 104 | want: false, 105 | }, 106 | { 107 | name: "Test 2", 108 | err: errInternal, 109 | want: true, 110 | }, 111 | { 112 | name: "Test 3", 113 | err: nil, 114 | want: false, 115 | }, 116 | } 117 | for i := range tests { 118 | tt := tests[i] 119 | t.Run(tt.name, func(t *testing.T) { 120 | if got := IsInternal(tt.err); got != tt.want { 121 | t.Errorf("IsInternal() = %v, want %v", got, tt.want) 122 | } 123 | }) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/firmware.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | type FirmwareKind = string 4 | 5 | const ( 6 | FirmwareBIOS FirmwareKind = "bios" 7 | FirmwareBMC FirmwareKind = "bmc" 8 | ) 9 | 10 | var FirmwareKinds = []string{ 11 | FirmwareBIOS, 12 | FirmwareBMC, 13 | } 14 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/image_test.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestImages_ByID(t *testing.T) { 9 | testImages := []Image{ 10 | { 11 | Base: Base{ 12 | ID: "1", 13 | Name: "Image 1", 14 | Description: "description 1", 15 | }, 16 | }, 17 | { 18 | Base: Base{ 19 | ID: "2", 20 | Name: "Image 2", 21 | Description: "description 2", 22 | }, 23 | }, 24 | { 25 | Base: Base{ 26 | ID: "3", 27 | Name: "Image 3", 28 | Description: "description 3", 29 | }, 30 | }, 31 | } 32 | 33 | imageMap := make(ImageMap) 34 | for i, f := range testImages { 35 | imageMap[f.ID] = testImages[i] 36 | } 37 | 38 | tests := []struct { 39 | name string 40 | ii Images 41 | want ImageMap 42 | }{ 43 | // Test Data Array (only 1 data): 44 | { 45 | name: "TestImages_ByID Test 1", 46 | ii: testImages, 47 | want: imageMap, 48 | }, 49 | } 50 | for i := range tests { 51 | tt := tests[i] 52 | t.Run(tt.name, func(t *testing.T) { 53 | if got := tt.ii.ByID(); !reflect.DeepEqual(got, tt.want) { 54 | t.Errorf("Images.ByID() = %v, want %v", got, tt.want) 55 | } 56 | }) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/ip.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | 8 | "github.com/metal-stack/metal-lib/pkg/tag" 9 | 10 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/tags" 11 | ) 12 | 13 | // IPType is the type of an ip. 14 | type IPType string 15 | 16 | // IPScope is the scope of an ip. 17 | type IPScope string 18 | 19 | const ( 20 | // TagIPSeparator is the separator character for key and values in IP-Tags 21 | TagIPSeparator = "=" 22 | 23 | // Ephemeral IPs will be cleaned up automatically on machine, network, project deletion 24 | Ephemeral IPType = "ephemeral" 25 | // Static IPs will not be cleaned up and can be re-used for machines, networks within a project 26 | Static IPType = "static" 27 | 28 | // ScopeEmpty IPs are not bound to a project, machine or cluster 29 | ScopeEmpty IPScope = "" 30 | // ScopeProject IPs can be assigned to machines or used by cluster services 31 | ScopeProject IPScope = "project" 32 | // ScopeMachine IPs are bound to the usage directly at machines 33 | ScopeMachine IPScope = "machine" 34 | ) 35 | 36 | // IP of a machine/firewall. 37 | type IP struct { 38 | IPAddress string `rethinkdb:"id" json:"id"` 39 | // AllocationID will be randomly generated during IP creation and helps identifying the point in time 40 | // when an IP was created. This is not the primary key! 41 | // This field can help to distinguish whether an IP address was re-acquired or 42 | // if it is still the same ip address as before. 43 | AllocationUUID string `rethinkdb:"allocationuuid" json:"allocationuuid"` 44 | ParentPrefixCidr string `rethinkdb:"prefix" json:"prefix"` 45 | Name string `rethinkdb:"name" json:"name"` 46 | Description string `rethinkdb:"description" json:"description"` 47 | ProjectID string `rethinkdb:"projectid" json:"projectid"` 48 | NetworkID string `rethinkdb:"networkid" json:"networkid"` 49 | Type IPType `rethinkdb:"type" json:"type"` 50 | Tags []string `rethinkdb:"tags" json:"tags"` 51 | Created time.Time `rethinkdb:"created" json:"created"` 52 | Changed time.Time `rethinkdb:"changed" json:"changed"` 53 | } 54 | 55 | // GetID returns the ID of the entity 56 | func (ip *IP) GetID() string { 57 | return ip.IPAddress 58 | } 59 | 60 | // SetID sets the ID of the entity 61 | func (ip *IP) SetID(id string) { 62 | ip.IPAddress = id 63 | } 64 | 65 | // GetChanged returns the last changed timestamp of the entity 66 | func (ip *IP) GetChanged() time.Time { 67 | return ip.Changed 68 | } 69 | 70 | // SetChanged sets the last changed timestamp of the entity 71 | func (ip *IP) SetChanged(changed time.Time) { 72 | ip.Changed = changed 73 | } 74 | 75 | // GetCreated returns the creation timestamp of the entity 76 | func (ip *IP) GetCreated() time.Time { 77 | return ip.Created 78 | } 79 | 80 | // SetCreated sets the creation timestamp of the entity 81 | func (ip *IP) SetCreated(created time.Time) { 82 | ip.Created = created 83 | } 84 | 85 | // GetScope determines the scope of an ip address 86 | func (ip *IP) GetScope() IPScope { 87 | if ip.ProjectID == "" { 88 | return ScopeEmpty 89 | } 90 | for _, t := range ip.Tags { 91 | if strings.HasPrefix(t, tag.MachineID) { 92 | return ScopeMachine 93 | } 94 | } 95 | return ScopeProject 96 | } 97 | 98 | func (ip *IP) HasMachineId(id string) bool { 99 | t := tags.New(ip.Tags) 100 | return t.Has(IpTag(tag.MachineID, id)) 101 | } 102 | 103 | func (ip *IP) GetMachineIds() []string { 104 | ts := tags.New(ip.Tags) 105 | return ts.Values(tag.MachineID + TagIPSeparator) 106 | } 107 | 108 | func (ip *IP) AddMachineId(id string) { 109 | ts := tags.New(ip.Tags) 110 | t := IpTag(tag.MachineID, id) 111 | ts.Remove(tag.MachineID) 112 | ts.Add(t) 113 | ip.Tags = ts.Unique() 114 | } 115 | 116 | func (ip *IP) RemoveMachineId(id string) { 117 | ts := tags.New(ip.Tags) 118 | t := IpTag(tag.MachineID, id) 119 | ts.Remove(t) 120 | ip.Tags = ts.Unique() 121 | } 122 | 123 | func IpTag(key, value string) string { 124 | return fmt.Sprintf("%s%s%s", key, TagIPSeparator, value) 125 | } 126 | 127 | type IPs []IP 128 | 129 | type IPsMap map[string]IPs 130 | 131 | func (l IPs) ByProjectID() IPsMap { 132 | res := IPsMap{} 133 | for _, e := range l { 134 | res[e.ProjectID] = append(res[e.ProjectID], e) 135 | } 136 | return res 137 | } 138 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/ip_test.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/metal-stack/metal-lib/pkg/tag" 7 | 8 | "github.com/google/go-cmp/cmp" 9 | ) 10 | 11 | func TestAddMachineId(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | ip IP 15 | expectedTags []string 16 | }{ 17 | { 18 | name: "ip without machine tag", 19 | ip: IP{}, 20 | expectedTags: []string{IpTag(tag.MachineID, "123")}, 21 | }, 22 | { 23 | name: "ip with empty machine tag", 24 | ip: IP{ 25 | Tags: []string{tag.MachineID}, 26 | }, 27 | expectedTags: []string{IpTag(tag.MachineID, "123")}, 28 | }, 29 | { 30 | name: "ip with other machine tag", 31 | ip: IP{ 32 | Tags: []string{IpTag(tag.MachineID, "1")}, 33 | }, 34 | expectedTags: []string{IpTag(tag.MachineID, "1"), IpTag(tag.MachineID, "123")}, 35 | }, 36 | } 37 | for i := range tests { 38 | tt := tests[i] 39 | t.Run(tt.name, func(t *testing.T) { 40 | tt.ip.AddMachineId("123") 41 | if got := tt.ip.Tags; !cmp.Equal(got, tt.expectedTags) { 42 | t.Errorf("%v", cmp.Diff(got, tt.expectedTags)) 43 | } 44 | }) 45 | } 46 | } 47 | 48 | func TestRemoveMachineId(t *testing.T) { 49 | tests := []struct { 50 | name string 51 | ip IP 52 | expectedTags []string 53 | }{ 54 | { 55 | name: "ip without machine tag", 56 | ip: IP{}, 57 | expectedTags: []string{}, 58 | }, 59 | { 60 | name: "ip with empty machine tag", 61 | ip: IP{ 62 | Tags: []string{tag.MachineID}, 63 | }, 64 | expectedTags: []string{tag.MachineID}, 65 | }, 66 | { 67 | name: "ip with other machine tag", 68 | ip: IP{ 69 | Tags: []string{IpTag(tag.MachineID, "1")}, 70 | }, 71 | expectedTags: []string{IpTag(tag.MachineID, "1")}, 72 | }, 73 | { 74 | name: "ip with matching machine tag", 75 | ip: IP{ 76 | Tags: []string{IpTag(tag.MachineID, "123")}, 77 | }, 78 | expectedTags: []string{}, 79 | }, 80 | } 81 | for i := range tests { 82 | tt := tests[i] 83 | t.Run(tt.name, func(t *testing.T) { 84 | tt.ip.RemoveMachineId("123") 85 | if got := tt.ip.Tags; !cmp.Equal(got, tt.expectedTags) { 86 | t.Errorf("%v", cmp.Diff(got, tt.expectedTags)) 87 | } 88 | }) 89 | } 90 | } 91 | 92 | func TestGetScope(t *testing.T) { 93 | tests := []struct { 94 | name string 95 | ip IP 96 | expectedScope IPScope 97 | }{ 98 | { 99 | name: "empty scope ip", 100 | ip: IP{ 101 | Tags: []string{IpTag(tag.MachineID, "102")}, 102 | }, 103 | expectedScope: ScopeEmpty, 104 | }, 105 | { 106 | name: "machine ip", 107 | ip: IP{ 108 | ProjectID: "1", 109 | Tags: []string{IpTag(tag.MachineID, "102")}, 110 | }, 111 | expectedScope: ScopeMachine, 112 | }, 113 | { 114 | name: "project ip", 115 | ip: IP{ 116 | ProjectID: "1", 117 | Tags: []string{}, 118 | }, 119 | expectedScope: ScopeProject, 120 | }, 121 | } 122 | 123 | for i := range tests { 124 | tt := tests[i] 125 | t.Run(tt.name, func(t *testing.T) { 126 | if got := tt.ip.GetScope(); got != tt.expectedScope { 127 | t.Errorf("IP.GetScope = %v, want %v", got, tt.expectedScope) 128 | } 129 | }) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/metal.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/metal-stack/metal-lib/jwt/sec" 8 | 9 | "github.com/metal-stack/security" 10 | ) 11 | 12 | // These are our supported groups. 13 | var ( 14 | // View Groupname 15 | ViewGroups = []security.ResourceAccess{ 16 | security.ResourceAccess("k8s_kaas-view"), // FIXME remove legacy, only for compatibility 17 | security.ResourceAccess("maas-all-all-view"), 18 | } 19 | 20 | // Edit Groupname 21 | EditGroups = []security.ResourceAccess{ 22 | security.ResourceAccess("k8s_kaas-edit"), // FIXME remove legacy, only for compatibility 23 | security.ResourceAccess("maas-all-all-edit"), 24 | } 25 | 26 | // Admin Groupname 27 | AdminGroups = []security.ResourceAccess{ 28 | security.ResourceAccess("k8s_kaas-admin"), // FIXME remove legacy, only for compatibility 29 | security.ResourceAccess("maas-all-all-admin"), 30 | } 31 | 32 | // Groups that have view permission 33 | ViewAccess = sec.MergeResourceAccess(ViewGroups, EditGroups, AdminGroups) 34 | // Groups that have edit permission 35 | EditAccess = sec.MergeResourceAccess(EditGroups, AdminGroups) 36 | // Groups that have admin permission 37 | AdminAccess = AdminGroups 38 | ) 39 | 40 | // EventType is the type for event types. 41 | type EventType string 42 | 43 | // NSQTopic . 44 | type NSQTopic struct { 45 | Name string 46 | PartitionAgnostic bool 47 | } 48 | 49 | // Some enums. 50 | const ( 51 | CREATE EventType = "create" 52 | UPDATE EventType = "update" 53 | DELETE EventType = "delete" 54 | COMMAND EventType = "command" 55 | ) 56 | 57 | var ( 58 | TopicMachine = NSQTopic{Name: "machine", PartitionAgnostic: true} 59 | TopicAllocation = NSQTopic{Name: "allocation", PartitionAgnostic: false} 60 | ) 61 | 62 | // Topics is a list of topics of which the metal-api is a producer. 63 | // metal-api will make sure these topics exist when it is started. 64 | var Topics = []NSQTopic{ 65 | TopicMachine, 66 | TopicAllocation, 67 | } 68 | 69 | // GetFQN gets the fully qualified name of a NSQTopic 70 | func (t NSQTopic) GetFQN(partitionID string) string { 71 | if !t.PartitionAgnostic { 72 | return t.Name 73 | } 74 | return fmt.Sprintf("%s-%s", partitionID, t.Name) 75 | } 76 | 77 | // Base implements common fields for most basic entity types (not all). 78 | type Base struct { 79 | ID string `rethinkdb:"id,omitempty" json:"id,omitempty"` 80 | Name string `rethinkdb:"name" json:"name"` 81 | Description string `rethinkdb:"description" json:"description"` 82 | Created time.Time `rethinkdb:"created" json:"created"` 83 | Changed time.Time `rethinkdb:"changed" json:"changed"` 84 | } 85 | 86 | // Entity is an interface that allows metal entities to be created and stored into the database with the generic creation and update functions. 87 | type Entity interface { 88 | // GetID returns the entity's id 89 | GetID() string 90 | // SetID sets the entity's id 91 | SetID(id string) 92 | // GetChanged returns the entity's changed time 93 | GetChanged() time.Time 94 | // SetChanged sets the entity's changed time 95 | SetChanged(changed time.Time) 96 | // GetCreated sets the entity's creation time 97 | GetCreated() time.Time 98 | // SetCreated sets the entity's creation time 99 | SetCreated(created time.Time) 100 | } 101 | 102 | // GetID returns the ID of the entity 103 | func (b *Base) GetID() string { 104 | return b.ID 105 | } 106 | 107 | // SetID sets the ID of the entity 108 | func (b *Base) SetID(id string) { 109 | b.ID = id 110 | } 111 | 112 | // GetChanged returns the last changed timestamp of the entity 113 | func (b *Base) GetChanged() time.Time { 114 | return b.Changed 115 | } 116 | 117 | // SetChanged sets the last changed timestamp of the entity 118 | func (b *Base) SetChanged(changed time.Time) { 119 | b.Changed = changed 120 | } 121 | 122 | // GetCreated returns the creation timestamp of the entity 123 | func (b *Base) GetCreated() time.Time { 124 | return b.Created 125 | } 126 | 127 | // SetCreated sets the creation timestamp of the entity 128 | func (b *Base) SetCreated(created time.Time) { 129 | b.Created = created 130 | } 131 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/partition.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | // A Partition represents a location. 4 | type Partition struct { 5 | Base 6 | BootConfiguration BootConfiguration `rethinkdb:"bootconfig" json:"bootconfig"` 7 | MgmtServiceAddress string `rethinkdb:"mgmtserviceaddr" json:"mgmtserviceaddr"` 8 | Labels map[string]string `rethinkdb:"labels" json:"labels"` 9 | DNSServers DNSServers `rethinkdb:"dns_servers" json:"dns_servers"` 10 | NTPServers NTPServers `rethinkdb:"ntp_servers" json:"ntp_servers"` 11 | } 12 | 13 | // BootConfiguration defines the metal-hammer initrd, kernel and commandline 14 | type BootConfiguration struct { 15 | ImageURL string `rethinkdb:"imageurl" json:"imageurl"` 16 | KernelURL string `rethinkdb:"kernelurl" json:"kernelurl"` 17 | CommandLine string `rethinkdb:"commandline" json:"commandline"` 18 | } 19 | 20 | // Partitions is a list of partitions. 21 | type Partitions []Partition 22 | 23 | // PartitionMap is an indexed map of partitions 24 | type PartitionMap map[string]Partition 25 | 26 | // ByID creates an indexed map of partitions where the id is the index. 27 | func (sz Partitions) ByID() PartitionMap { 28 | res := make(PartitionMap) 29 | for i, s := range sz { 30 | res[s.ID] = sz[i] 31 | } 32 | return res 33 | } 34 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/partition_test.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestPartitions_ByID(t *testing.T) { 9 | testPartitions := []Partition{ 10 | { 11 | Base: Base{ 12 | ID: "1", 13 | Name: "partition1", 14 | Description: "description 1", 15 | }, 16 | }, 17 | { 18 | Base: Base{ 19 | ID: "2", 20 | Name: "partition2", 21 | Description: "description 2", 22 | }, 23 | }, 24 | { 25 | Base: Base{ 26 | ID: "3", 27 | Name: "partition3", 28 | Description: "description 3", 29 | }, 30 | }, 31 | } 32 | 33 | tests := []struct { 34 | name string 35 | sz Partitions 36 | want PartitionMap 37 | }{ 38 | { 39 | name: "ByID Test 1", 40 | sz: testPartitions, 41 | want: map[string]Partition{testPartitions[0].ID: testPartitions[0], testPartitions[1].ID: testPartitions[1], testPartitions[2].ID: testPartitions[2]}, 42 | }, 43 | } 44 | for i := range tests { 45 | tt := tests[i] 46 | t.Run(tt.name, func(t *testing.T) { 47 | if got := tt.sz.ByID(); !reflect.DeepEqual(got, tt.want) { 48 | t.Errorf("Partitions.ByID() = %v, want %v", got, tt.want) 49 | } 50 | }) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/provisioning_test.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestProvisioningEventContainer_Validate(t *testing.T) { 9 | now := time.Now() 10 | tests := []struct { 11 | name string 12 | container ProvisioningEventContainer 13 | wantErr bool 14 | }{ 15 | { 16 | name: "Validate empty container", 17 | container: ProvisioningEventContainer{ 18 | Events: ProvisioningEvents{}, 19 | }, 20 | wantErr: false, 21 | }, 22 | { 23 | name: "Validate sorted and consistent container", 24 | container: ProvisioningEventContainer{ 25 | Events: ProvisioningEvents{ 26 | ProvisioningEvent{ 27 | Time: now.Add(-2 * time.Minute), 28 | }, 29 | ProvisioningEvent{ 30 | Time: now.Add(-3 * time.Minute), 31 | }, 32 | ProvisioningEvent{ 33 | Time: now.Add(-4 * time.Minute), 34 | }, 35 | ProvisioningEvent{ 36 | Time: now.Add(-5 * time.Minute), 37 | }, 38 | }, 39 | LastEventTime: &now, 40 | }, 41 | wantErr: false, 42 | }, 43 | { 44 | name: "Validate container with one event", 45 | container: ProvisioningEventContainer{ 46 | Events: ProvisioningEvents{ 47 | ProvisioningEvent{ 48 | Time: now, 49 | }, 50 | }, 51 | LastEventTime: &now, 52 | }, 53 | wantErr: false, 54 | }, 55 | { 56 | name: "Validate container with empty last event time field", 57 | container: ProvisioningEventContainer{ 58 | Events: ProvisioningEvents{ 59 | ProvisioningEvent{ 60 | Time: now, 61 | }, 62 | }, 63 | }, 64 | wantErr: true, 65 | }, 66 | { 67 | name: "Validate unsorted container", 68 | container: ProvisioningEventContainer{ 69 | Events: ProvisioningEvents{ 70 | ProvisioningEvent{ 71 | Time: now.Add(-2 * time.Minute), 72 | }, 73 | ProvisioningEvent{ 74 | Time: now.Add(-4 * time.Minute), 75 | }, 76 | ProvisioningEvent{ 77 | Time: now.Add(-3 * time.Minute), 78 | }, 79 | ProvisioningEvent{ 80 | Time: now.Add(-5 * time.Minute), 81 | }, 82 | }, 83 | }, 84 | wantErr: true, 85 | }, 86 | { 87 | name: "Validate inconsistent last event times", 88 | container: ProvisioningEventContainer{ 89 | Events: ProvisioningEvents{ 90 | ProvisioningEvent{ 91 | Time: now.Add(1 * time.Minute), 92 | }, 93 | ProvisioningEvent{ 94 | Time: now.Add(-3 * time.Minute), 95 | }, 96 | ProvisioningEvent{ 97 | Time: now.Add(-4 * time.Minute), 98 | }, 99 | ProvisioningEvent{ 100 | Time: now.Add(-5 * time.Minute), 101 | }, 102 | }, 103 | LastEventTime: &now, 104 | }, 105 | wantErr: true, 106 | }, 107 | } 108 | for i := range tests { 109 | tt := tests[i] 110 | t.Run(tt.name, func(t *testing.T) { 111 | if err := tt.container.Validate(); (err != nil) != tt.wantErr { 112 | t.Errorf("ProvisioningEventContainer.Validate() error = %v, wantErr %v", err, tt.wantErr) 113 | } 114 | }) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/size_reservation.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "fmt" 5 | "slices" 6 | 7 | mdmv1 "github.com/metal-stack/masterdata-api/api/v1" 8 | ) 9 | 10 | // SizeReservation defines a reservation of a size for machine allocations 11 | type SizeReservation struct { 12 | Base 13 | SizeID string `rethinkdb:"sizeid" json:"sizeid"` 14 | Amount int `rethinkdb:"amount" json:"amount"` 15 | ProjectID string `rethinkdb:"projectid" json:"projectid"` 16 | PartitionIDs []string `rethinkdb:"partitionids" json:"partitionids"` 17 | Labels map[string]string `rethinkdb:"labels" json:"labels"` 18 | } 19 | 20 | type SizeReservations []SizeReservation 21 | 22 | func (rs *SizeReservations) BySize() map[string]SizeReservations { 23 | res := map[string]SizeReservations{} 24 | if rs == nil { 25 | return res 26 | } 27 | 28 | for _, rv := range *rs { 29 | res[rv.SizeID] = append(res[rv.SizeID], rv) 30 | } 31 | 32 | return res 33 | } 34 | 35 | func (rs *SizeReservations) ForPartition(partitionID string) SizeReservations { 36 | if rs == nil { 37 | return nil 38 | } 39 | 40 | var result SizeReservations 41 | for _, r := range *rs { 42 | r := r 43 | if slices.Contains(r.PartitionIDs, partitionID) { 44 | result = append(result, r) 45 | } 46 | } 47 | 48 | return result 49 | } 50 | 51 | func (rs *SizeReservations) Validate(sizes SizeMap, partitions PartitionMap, projects map[string]*mdmv1.Project) error { 52 | if rs == nil { 53 | return nil 54 | } 55 | 56 | for _, r := range *rs { 57 | err := r.Validate(sizes, partitions, projects) 58 | if err != nil { 59 | return err 60 | } 61 | } 62 | 63 | return nil 64 | } 65 | 66 | func (r *SizeReservation) Validate(sizes SizeMap, partitions PartitionMap, projects map[string]*mdmv1.Project) error { 67 | if r.Amount <= 0 { 68 | return fmt.Errorf("amount must be a positive integer") 69 | } 70 | 71 | if _, ok := sizes[r.SizeID]; !ok { 72 | return fmt.Errorf("size must exist before creating a size reservation") 73 | } 74 | 75 | if len(r.PartitionIDs) == 0 { 76 | return fmt.Errorf("at least one partition id must be specified") 77 | } 78 | ids := map[string]bool{} 79 | for _, partition := range r.PartitionIDs { 80 | ids[partition] = true 81 | if _, ok := partitions[partition]; !ok { 82 | return fmt.Errorf("partition must exist before creating a size reservation") 83 | } 84 | } 85 | if len(ids) != len(r.PartitionIDs) { 86 | return fmt.Errorf("partitions must not contain duplicates") 87 | } 88 | 89 | if r.ProjectID == "" { 90 | return fmt.Errorf("project id must be specified") 91 | } 92 | if _, ok := projects[r.ProjectID]; !ok { 93 | return fmt.Errorf("project must exist before creating a size reservation") 94 | } 95 | 96 | return nil 97 | } 98 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metal/sizeimageconstraint.go: -------------------------------------------------------------------------------- 1 | package metal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/Masterminds/semver/v3" 8 | ) 9 | 10 | // SizeImageConstraint expresses optional restrictions for specific size to image combinations 11 | // this might be required if the support for a specific hardware in a given size is only supported 12 | // with a newer version of the image. 13 | // 14 | // If the size in question is not found, no restrictions apply. 15 | // If the image in question is not found, no restrictions apply as well. 16 | // If the image in question is found, but does not match the given expression, machine creation must be forbidden. 17 | type SizeImageConstraint struct { 18 | Base 19 | // Images a map from imageID to semver compatible matcher string 20 | // example: 21 | // images: 22 | // ubuntu: ">= 20.04.20211011" 23 | // debian: ">= 10.0.20210101" 24 | Images map[string]string `rethinkdb:"images" json:"images"` 25 | } 26 | 27 | // SizeImageConstraints is a slice of ImageConstraint 28 | type SizeImageConstraints []SizeImageConstraint 29 | 30 | func (scs *SizeImageConstraints) Validate() error { 31 | for _, c := range *scs { 32 | err := c.Validate() 33 | if err != nil { 34 | return err 35 | } 36 | } 37 | return nil 38 | } 39 | 40 | func (sc *SizeImageConstraint) Validate() error { 41 | for os, vc := range sc.Images { 42 | // no pure wildcard in images 43 | if os == "*" { 44 | return fmt.Errorf("just '*' is not allowed as image os constraint") 45 | } 46 | // a single "*" is possible 47 | if strings.TrimSpace(vc) == "*" { 48 | continue 49 | } 50 | _, _, err := convertToOpAndVersion(vc) 51 | if err != nil { 52 | return err 53 | } 54 | } 55 | return nil 56 | } 57 | 58 | func (scs *SizeImageConstraints) Matches(size Size, image Image) error { 59 | for _, sc := range *scs { 60 | if sc.ID == size.ID { 61 | return sc.Matches(size, image) 62 | } 63 | } 64 | return nil 65 | } 66 | 67 | func (sc *SizeImageConstraint) Matches(size Size, image Image) error { 68 | if sc.ID != size.ID { 69 | return nil 70 | } 71 | for os, versionconstraint := range sc.Images { 72 | if os != image.OS { 73 | continue 74 | } 75 | version, err := semver.NewVersion(image.Version) 76 | if err != nil { 77 | return fmt.Errorf("version of image is invalid %w", err) 78 | } 79 | 80 | // FIXME is this a valid assumption 81 | if version.Patch() == 0 { 82 | return fmt.Errorf("no patch version given") 83 | } 84 | c, err := semver.NewConstraint(versionconstraint) 85 | if err != nil { 86 | return fmt.Errorf("versionconstraint %s is invalid %w", versionconstraint, err) 87 | } 88 | if !c.Check(version) { 89 | return fmt.Errorf("given size:%s with image:%s does violate image constraint:%s %s", size.ID, image.OS+"-"+image.Version, os, c.String()) 90 | } 91 | } 92 | return nil 93 | } 94 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/emicklei/go-restful/v3" 9 | "github.com/prometheus/client_golang/prometheus" 10 | "google.golang.org/grpc" 11 | ) 12 | 13 | var ( 14 | counter = prometheus.NewCounterVec( 15 | prometheus.CounterOpts{ 16 | Namespace: "metal", 17 | Subsystem: "api", 18 | Name: "requests_total", 19 | Help: "A counter for requests to the whole metal api.", 20 | }, 21 | []string{"code", "method"}, 22 | ) 23 | 24 | duration = prometheus.NewHistogramVec( 25 | prometheus.HistogramOpts{ 26 | Namespace: "metal", 27 | Subsystem: "api", 28 | Name: "request_duration_seconds", 29 | Help: "A histogram of latencies for requests.", 30 | Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, 31 | }, 32 | []string{"route", "method"}, 33 | ) 34 | grpcDuration = prometheus.NewHistogramVec( 35 | prometheus.HistogramOpts{ 36 | Namespace: "metal", 37 | Subsystem: "api", 38 | Name: "grpc_request_duration_seconds", 39 | Help: "A histogram of latencies for requests.", 40 | Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, 41 | }, 42 | []string{"method"}, 43 | ) 44 | ) 45 | 46 | func init() { 47 | prometheus.MustRegister(counter, duration, grpcDuration) 48 | } 49 | 50 | func RestfulMetrics(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { 51 | n := time.Now() 52 | chain.ProcessFilter(req, resp) 53 | counter.WithLabelValues(fmt.Sprintf("%d", resp.StatusCode()), req.Request.Method).Inc() 54 | duration.WithLabelValues(req.SelectedRoutePath(), req.Request.Method).Observe(time.Since(n).Seconds()) 55 | } 56 | 57 | func GrpcMetrics(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { 58 | n := time.Now() 59 | resp, err = handler(ctx, req) 60 | grpcDuration.WithLabelValues(info.FullMethod).Observe(time.Since(n).Seconds()) 61 | return resp, err 62 | } 63 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/asn.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 7 | ) 8 | 9 | const ( 10 | // ASNMin is the minimum asn defined according to 11 | // https://en.wikipedia.org/wiki/Autonomous_system_(Internet) 12 | ASNMin = uint32(4200000000) 13 | 14 | // ASNBase is the offset for all Machine ASN´s 15 | ASNBase = uint32(4210000000) 16 | 17 | // ASNMax defines the maximum allowed asn 18 | // https://en.wikipedia.org/wiki/Autonomous_system_(Internet) 19 | ASNMax = uint32(4294967294) 20 | ) 21 | 22 | // acquireASN fetches a unique integer by using the existing integer pool and adding to ASNBase 23 | func acquireASN(ds *datastore.RethinkStore) (*uint32, error) { 24 | i, err := ds.GetASNPool().AcquireRandomUniqueInteger() 25 | if err != nil { 26 | return nil, err 27 | } 28 | asn := ASNBase + uint32(i) // nolint:gosec 29 | if asn > ASNMax { 30 | return nil, fmt.Errorf("unable to calculate asn, got a asn larger than ASNMax: %d > %d", asn, ASNMax) 31 | } 32 | return &asn, nil 33 | } 34 | 35 | // releaseASN will release the asn from the integerpool 36 | func releaseASN(ds *datastore.RethinkStore, asn uint32) error { 37 | if asn < ASNBase || asn > ASNMax { 38 | return fmt.Errorf("asn %d might not be smaller than:%d or larger than %d", asn, ASNBase, ASNMax) 39 | } 40 | i := uint(asn - ASNBase) 41 | 42 | return ds.GetASNPool().ReleaseUniqueInteger(i) 43 | } 44 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/audit-service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "log/slog" 5 | "net/http" 6 | 7 | v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" 8 | "github.com/metal-stack/metal-lib/auditing" 9 | 10 | restfulspec "github.com/emicklei/go-restful-openapi/v2" 11 | restful "github.com/emicklei/go-restful/v3" 12 | "github.com/metal-stack/metal-lib/httperrors" 13 | ) 14 | 15 | type auditResource struct { 16 | webResource 17 | a auditing.Auditing 18 | } 19 | 20 | func NewAudit(log *slog.Logger, a auditing.Auditing) *restful.WebService { 21 | ir := auditResource{ 22 | webResource: webResource{ 23 | log: log, 24 | }, 25 | a: a, 26 | } 27 | 28 | return ir.webService() 29 | } 30 | 31 | func (r *auditResource) webService() *restful.WebService { 32 | ws := new(restful.WebService) 33 | ws. 34 | Path(BasePath + "v1/audit"). 35 | Consumes(restful.MIME_JSON). 36 | Produces(restful.MIME_JSON) 37 | 38 | tags := []string{"audit"} 39 | 40 | ws.Route(ws.POST("/find"). 41 | To(viewer(r.find)). 42 | Operation("findAuditTraces"). 43 | Doc("find all audit traces that match given properties"). 44 | Metadata(restfulspec.KeyOpenAPITags, tags). 45 | Metadata(auditing.Exclude, true). 46 | Reads(v1.AuditFindRequest{}). 47 | Writes([]v1.AuditResponse{}). 48 | Returns(http.StatusOK, "OK", []v1.AuditResponse{}). 49 | DefaultReturns("Error", httperrors.HTTPErrorResponse{})) 50 | 51 | return ws 52 | } 53 | 54 | func (r *auditResource) find(request *restful.Request, response *restful.Response) { 55 | if r.a == nil { 56 | r.sendError(request, response, httperrors.InternalServerError(featureDisabledErr)) 57 | return 58 | } 59 | 60 | var requestPayload v1.AuditFindRequest 61 | err := request.ReadEntity(&requestPayload) 62 | if err != nil { 63 | r.sendError(request, response, httperrors.BadRequest(err)) 64 | return 65 | } 66 | 67 | backendResult, err := r.a.Search(request.Request.Context(), auditing.EntryFilter{ 68 | Limit: requestPayload.Limit, 69 | From: requestPayload.From, 70 | To: requestPayload.To, 71 | Component: requestPayload.Component, 72 | RequestId: requestPayload.RequestId, 73 | Type: auditing.EntryType(requestPayload.Type), 74 | User: requestPayload.User, 75 | Tenant: requestPayload.Tenant, 76 | Project: requestPayload.Project, 77 | Detail: auditing.EntryDetail(requestPayload.Detail), 78 | Phase: auditing.EntryPhase(requestPayload.Phase), 79 | Path: requestPayload.Path, 80 | ForwardedFor: requestPayload.ForwardedFor, 81 | RemoteAddr: requestPayload.RemoteAddr, 82 | Body: requestPayload.Body, 83 | StatusCode: requestPayload.StatusCode, 84 | Error: requestPayload.Error, 85 | }) 86 | if err != nil { 87 | r.sendError(request, response, httperrors.InternalServerError(err)) 88 | return 89 | } 90 | 91 | result := []*v1.AuditResponse{} 92 | for _, e := range backendResult { 93 | e := e 94 | result = append(result, v1.NewAuditResponse(e)) 95 | } 96 | 97 | r.send(request, response, http.StatusOK, result) 98 | } 99 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/common_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "io" 8 | "net/http" 9 | "net/http/httptest" 10 | "strings" 11 | "testing" 12 | 13 | restful "github.com/emicklei/go-restful/v3" 14 | "github.com/metal-stack/security" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | //nolint:deadcode,unused 19 | type emptyBody struct{} 20 | 21 | func webRequestPut(t require.TestingT, service *restful.WebService, user *security.User, request interface{}, path string, response interface{}) int { 22 | return webRequest(t, http.MethodPut, service, user, request, path, response) 23 | } 24 | 25 | func webRequestPost(t require.TestingT, service *restful.WebService, user *security.User, request interface{}, path string, response interface{}) int { 26 | return webRequest(t, http.MethodPost, service, user, request, path, response) 27 | } 28 | 29 | func webRequestDelete(t require.TestingT, service *restful.WebService, user *security.User, request interface{}, path string, response interface{}) int { 30 | return webRequest(t, http.MethodDelete, service, user, request, path, response) 31 | } 32 | 33 | func webRequestGet(t require.TestingT, service *restful.WebService, user *security.User, request interface{}, path string, response interface{}) int { 34 | return webRequest(t, http.MethodGet, service, user, request, path, response) 35 | } 36 | 37 | func webRequest(t require.TestingT, method string, service *restful.WebService, user *security.User, request interface{}, path string, response interface{}) int { 38 | container := restful.NewContainer().Add(service) 39 | 40 | jsonBody, err := json.Marshal(request) 41 | require.NoError(t, err) 42 | body := io.NopCloser(strings.NewReader(string(jsonBody))) 43 | createReq := httptest.NewRequest(method, path, body) 44 | createReq.Header.Set("Content-Type", "application/json") 45 | 46 | container.Filter(MockAuth(user)) 47 | 48 | w := httptest.NewRecorder() 49 | container.ServeHTTP(w, createReq) 50 | 51 | resp := w.Result() 52 | defer resp.Body.Close() 53 | 54 | err = json.NewDecoder(resp.Body).Decode(response) 55 | require.NoError(t, err) 56 | return resp.StatusCode 57 | } 58 | 59 | func genericWebRequest[E any](t *testing.T, service *restful.WebService, user *security.User, body any, method string, path string) (int, E) { 60 | var encoded []byte 61 | 62 | if body != nil { 63 | var err error 64 | encoded, err = json.Marshal(body) 65 | require.NoError(t, err) 66 | } 67 | 68 | req := httptest.NewRequest(method, path, bytes.NewBuffer(encoded)) 69 | 70 | req.Header.Add("Content-Type", "application/json") 71 | req.Header.Add("Accept", "application/json") 72 | 73 | recorder := httptest.NewRecorder() 74 | 75 | container := restful.NewContainer().Add(service) 76 | container.Filter(MockAuth(user)) 77 | container.ServeHTTP(recorder, req) 78 | 79 | res := recorder.Result() 80 | defer res.Body.Close() 81 | 82 | var got E 83 | err := json.Unmarshal(recorder.Body.Bytes(), &got) 84 | require.NoError(t, err, "unable to parse response into %T: %s", got, recorder.Body.String()) 85 | 86 | return recorder.Code, got 87 | } 88 | 89 | func MockAuth(user *security.User) restful.FilterFunction { 90 | return func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { 91 | rq := req.Request 92 | ctx := security.PutUserInContext(context.Background(), user) 93 | req.Request = rq.WithContext(ctx) 94 | chain.ProcessFilter(req, resp) 95 | } 96 | } 97 | 98 | type NopPublisher struct { 99 | } 100 | 101 | func (p NopPublisher) Publish(topic string, data interface{}) error { 102 | return nil 103 | } 104 | 105 | func (p NopPublisher) CreateTopic(topic string) error { 106 | return nil 107 | } 108 | 109 | func (p NopPublisher) Stop() {} 110 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/firmware-service_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "sort" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/require" 8 | ) 9 | 10 | func TestInsertRevisions(t *testing.T) { 11 | // given 12 | paths := []string{ 13 | "bucket/v/b/vb1", 14 | "bucket/v/b/vb2", 15 | "bucket/v/b/vb3", 16 | "bucket/v/c/vc1", 17 | "bucket/v/c/vc2", 18 | "bucket/x/y/xy1", 19 | "bucket/x/y/xy2", 20 | } 21 | revisions := make(map[string]map[string][]string) 22 | 23 | // when 24 | for _, path := range paths { 25 | insertRevisions(path, revisions, "v", "b") 26 | } 27 | 28 | // then 29 | require.Len(t, revisions, 1) 30 | boardRevisions, ok := revisions["v"] 31 | require.True(t, ok) 32 | require.Len(t, boardRevisions, 1) 33 | rr, ok := boardRevisions["b"] 34 | require.True(t, ok) 35 | sort.Strings(rr) 36 | require.Equal(t, []string{"vb1", "vb2", "vb3"}, rr) 37 | 38 | // given 39 | revisions = make(map[string]map[string][]string) 40 | 41 | // when 42 | for _, path := range paths { 43 | insertRevisions(path, revisions, "", "b") 44 | } 45 | 46 | // then 47 | require.Len(t, revisions, 1) 48 | boardRevisions, ok = revisions["v"] 49 | require.True(t, ok) 50 | require.Len(t, boardRevisions, 1) 51 | rr, ok = boardRevisions["b"] 52 | require.True(t, ok) 53 | sort.Strings(rr) 54 | require.Equal(t, []string{"vb1", "vb2", "vb3"}, rr) 55 | 56 | // when 57 | for _, path := range paths { 58 | insertRevisions(path, revisions, "v", "") 59 | } 60 | 61 | // then 62 | require.Len(t, revisions, 1) 63 | boardRevisions, ok = revisions["v"] 64 | require.True(t, ok) 65 | require.Len(t, boardRevisions, 2) 66 | rr, ok = boardRevisions["b"] 67 | require.True(t, ok) 68 | sort.Strings(rr) 69 | require.Equal(t, []string{"vb1", "vb2", "vb3"}, rr) 70 | rr, ok = boardRevisions["c"] 71 | require.True(t, ok) 72 | sort.Strings(rr) 73 | require.Equal(t, []string{"vc1", "vc2"}, rr) 74 | 75 | // when 76 | for _, path := range paths { 77 | insertRevisions(path, revisions, "", "") 78 | } 79 | 80 | // then 81 | require.Len(t, revisions, 2) 82 | boardRevisions, ok = revisions["v"] 83 | require.True(t, ok) 84 | require.Len(t, boardRevisions, 2) 85 | rr, ok = boardRevisions["b"] 86 | require.True(t, ok) 87 | sort.Strings(rr) 88 | require.Equal(t, []string{"vb1", "vb2", "vb3"}, rr) 89 | rr, ok = boardRevisions["c"] 90 | require.True(t, ok) 91 | sort.Strings(rr) 92 | require.Equal(t, []string{"vc1", "vc2"}, rr) 93 | 94 | boardRevisions, ok = revisions["x"] 95 | require.True(t, ok) 96 | require.Len(t, boardRevisions, 1) 97 | rr, ok = boardRevisions["y"] 98 | require.True(t, ok) 99 | sort.Strings(rr) 100 | require.Equal(t, []string{"xy1", "xy2"}, rr) 101 | } 102 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/image-service_integration_test.go: -------------------------------------------------------------------------------- 1 | //go:build integration 2 | // +build integration 3 | 4 | package service 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "log/slog" 11 | "net/http" 12 | "net/http/httptest" 13 | "os" 14 | "strings" 15 | "testing" 16 | 17 | restful "github.com/emicklei/go-restful/v3" 18 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 19 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 20 | v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" 21 | "github.com/metal-stack/metal-api/test" 22 | "github.com/stretchr/testify/assert" 23 | "github.com/stretchr/testify/require" 24 | ) 25 | 26 | func TestGetImagesIntegration(t *testing.T) { 27 | rethinkContainer, c, err := test.StartRethink(t) 28 | require.NoError(t, err) 29 | defer func() { 30 | _ = rethinkContainer.Terminate(context.Background()) 31 | }() 32 | 33 | log := slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelError})) 34 | 35 | ds := datastore.New(log, c.IP+":"+c.Port, c.DB, c.User, c.Password) 36 | ds.VRFPoolRangeMax = 1000 37 | ds.ASNPoolRangeMax = 1000 38 | 39 | err = ds.Connect() 40 | require.NoError(t, err) 41 | err = ds.Initialize() 42 | require.NoError(t, err) 43 | 44 | imageservice := NewImage(log, ds) 45 | container := restful.NewContainer().Add(imageservice) 46 | 47 | imageID := "test-image-1.0.0" 48 | imageName := "testimage" 49 | imageDesc := "Test Image" 50 | newImage := v1.ImageCreateRequest{ 51 | Common: v1.Common{ 52 | Identifiable: v1.Identifiable{ 53 | ID: imageID, 54 | }, 55 | Describable: v1.Describable{ 56 | Name: &imageName, 57 | Description: &imageDesc, 58 | }, 59 | }, 60 | URL: "https://www.google.com", // not good to rely on this page 61 | Features: []string{string(metal.ImageFeatureMachine)}, 62 | } 63 | 64 | ji, err := json.Marshal(newImage) 65 | require.NoError(t, err) 66 | body := io.NopCloser(strings.NewReader(string(ji))) 67 | createReq := httptest.NewRequest(http.MethodPut, "/v1/image", body) 68 | createReq.Header.Set("Content-Type", "application/json") 69 | 70 | container = injectAdmin(log, container, createReq) 71 | w := httptest.NewRecorder() 72 | container.ServeHTTP(w, createReq) 73 | 74 | resp := w.Result() 75 | defer resp.Body.Close() 76 | require.Equal(t, http.StatusCreated, resp.StatusCode, w.Body.String()) 77 | var result v1.ImageResponse 78 | err = json.NewDecoder(resp.Body).Decode(&result) 79 | require.NoError(t, err) 80 | assert.Equal(t, newImage.ID, result.ID) 81 | assert.Equal(t, newImage.Name, result.Name) 82 | assert.Equal(t, newImage.Description, result.Description) 83 | assert.Equal(t, newImage.URL, *result.URL) 84 | require.Len(t, result.Features, 1) 85 | assert.Equal(t, newImage.Features[0], result.Features[0]) 86 | } 87 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/s3client/s3client.go: -------------------------------------------------------------------------------- 1 | package s3client 2 | 3 | import ( 4 | "github.com/aws/aws-sdk-go/aws" 5 | "github.com/aws/aws-sdk-go/aws/client" 6 | "github.com/aws/aws-sdk-go/aws/credentials" 7 | "github.com/aws/aws-sdk-go/aws/session" 8 | "github.com/aws/aws-sdk-go/service/s3" 9 | "time" 10 | ) 11 | 12 | type Client struct { 13 | *s3.S3 14 | Url string 15 | Key string 16 | Secret string 17 | FirmwareBucket string 18 | } 19 | 20 | func New(url, key, secret, firmwareBucket string) (*Client, error) { 21 | c := &Client{ 22 | Url: url, 23 | Key: key, 24 | Secret: secret, 25 | FirmwareBucket: firmwareBucket, 26 | } 27 | s, err := c.newSession() 28 | if err != nil { 29 | return nil, err 30 | } 31 | c.S3 = s3.New(s) 32 | return c, nil 33 | } 34 | 35 | func (c *Client) newSession() (client.ConfigProvider, error) { 36 | dummyRegion := "dummy" // we don't use AWS S3, we don't need a proper region 37 | hostnameImmutable := true 38 | return session.NewSession(&aws.Config{ 39 | Region: &dummyRegion, 40 | Endpoint: &c.Url, 41 | Credentials: credentials.NewStaticCredentials(c.Key, c.Secret, ""), 42 | S3ForcePathStyle: &hostnameImmutable, 43 | Retryer: client.DefaultRetryer{ 44 | NumMaxRetries: 3, 45 | MinRetryDelay: 10 * time.Second, 46 | }, 47 | }) 48 | } 49 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/service_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "log/slog" 8 | "net/http" 9 | "net/http/httptest" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/require" 14 | 15 | "github.com/emicklei/go-restful/v3" 16 | "github.com/metal-stack/metal-lib/httperrors" 17 | "github.com/metal-stack/metal-lib/rest" 18 | "github.com/metal-stack/security" 19 | ) 20 | 21 | var testUserDirectory = NewUserDirectory("") 22 | 23 | func injectViewer(log *slog.Logger, container *restful.Container, rq *http.Request) *restful.Container { 24 | return injectUser(log, testUserDirectory.viewer, container, rq) 25 | } 26 | 27 | func injectEditor(log *slog.Logger, container *restful.Container, rq *http.Request) *restful.Container { 28 | return injectUser(log, testUserDirectory.edit, container, rq) 29 | } 30 | 31 | func injectAdmin(log *slog.Logger, container *restful.Container, rq *http.Request) *restful.Container { 32 | return injectUser(log, testUserDirectory.admin, container, rq) 33 | } 34 | 35 | func injectUser(log *slog.Logger, u security.User, container *restful.Container, rq *http.Request) *restful.Container { 36 | hma := security.NewHMACAuth(u.Name, []byte{1, 2, 3}, security.WithUser(u)) 37 | usergetter := security.NewCreds(security.WithHMAC(hma)) 38 | container.Filter(rest.UserAuth(usergetter, log)) // FIXME 39 | var body []byte 40 | if rq.Body != nil { 41 | data, _ := io.ReadAll(rq.Body) 42 | body = data 43 | rq.Body.Close() 44 | rq.Body = io.NopCloser(bytes.NewReader(data)) 45 | } 46 | hma.AddAuth(rq, time.Now(), body) 47 | return container 48 | } 49 | 50 | func TestTenantEnsurer(t *testing.T) { 51 | e := NewTenantEnsurer(slog.Default(), []string{"pvdr", "Pv", "pv-DR"}, nil) 52 | require.True(t, e.allowed("pvdr")) 53 | require.True(t, e.allowed("Pv")) 54 | require.True(t, e.allowed("pv")) 55 | require.True(t, e.allowed("pv-DR")) 56 | require.True(t, e.allowed("PV-DR")) 57 | require.True(t, e.allowed("PV-dr")) 58 | require.False(t, e.allowed("")) 59 | require.False(t, e.allowed("abc")) 60 | } 61 | 62 | func TestAllowedPathSuffixes(t *testing.T) { 63 | foo := func(req *restful.Request, resp *restful.Response) { 64 | _ = resp.WriteHeaderAndEntity(http.StatusOK, nil) 65 | } 66 | 67 | e := NewTenantEnsurer(slog.Default(), []string{"a", "b", "c"}, []string{"health", "liveliness"}) 68 | ws := new(restful.WebService).Path("/").Consumes(restful.MIME_JSON).Produces(restful.MIME_JSON) 69 | ws.Filter(e.EnsureAllowedTenantFilter) 70 | health := ws.GET("health").To(foo).Returns(http.StatusOK, "OK", nil).DefaultReturns("Error", httperrors.HTTPErrorResponse{}) 71 | liveliness := ws.GET("liveliness").To(foo).Returns(http.StatusOK, "OK", nil).DefaultReturns("Error", httperrors.HTTPErrorResponse{}) 72 | machine := ws.GET("machine").To(foo).Returns(http.StatusOK, "OK", nil).DefaultReturns("Error", httperrors.HTTPErrorResponse{}) 73 | ws.Route(health) 74 | ws.Route(liveliness) 75 | ws.Route(machine) 76 | restful.DefaultContainer.Add(ws) 77 | 78 | // health must be allowed without tenant check 79 | httpRequest, _ := http.NewRequestWithContext(context.TODO(), "GET", "http://localhost/health", nil) 80 | httpRequest.Header.Set("Accept", "application/json") 81 | httpWriter := httptest.NewRecorder() 82 | 83 | restful.DefaultContainer.Dispatch(httpWriter, httpRequest) 84 | 85 | require.Equal(t, http.StatusOK, httpWriter.Code) 86 | 87 | // liveliness must be allowed without tenant check 88 | httpRequest, _ = http.NewRequestWithContext(context.TODO(), "GET", "http://localhost/liveliness", nil) 89 | httpRequest.Header.Set("Accept", "application/json") 90 | httpWriter = httptest.NewRecorder() 91 | 92 | restful.DefaultContainer.Dispatch(httpWriter, httpRequest) 93 | 94 | require.Equal(t, http.StatusOK, httpWriter.Code) 95 | 96 | // machine must not be allowed without tenant check 97 | httpRequest, _ = http.NewRequestWithContext(context.TODO(), "GET", "http://localhost/machine", nil) 98 | httpRequest.Header.Set("Accept", "application/json") 99 | httpWriter = httptest.NewRecorder() 100 | 101 | restful.DefaultContainer.Dispatch(httpWriter, httpRequest) 102 | 103 | require.Equal(t, http.StatusForbidden, httpWriter.Code) 104 | } 105 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/user-service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "log/slog" 6 | "net/http" 7 | 8 | v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" 9 | "github.com/metal-stack/security" 10 | 11 | restfulspec "github.com/emicklei/go-restful-openapi/v2" 12 | restful "github.com/emicklei/go-restful/v3" 13 | "github.com/metal-stack/metal-lib/httperrors" 14 | ) 15 | 16 | type userResource struct { 17 | webResource 18 | userGetter security.UserGetter 19 | } 20 | 21 | // NewUser returns a webservice for user specific endpoints. 22 | func NewUser(log *slog.Logger, userGetter security.UserGetter) *restful.WebService { 23 | r := userResource{ 24 | webResource: webResource{ 25 | log: log, 26 | }, 27 | userGetter: userGetter, 28 | } 29 | return r.webService() 30 | } 31 | 32 | func (r *userResource) webService() *restful.WebService { 33 | ws := new(restful.WebService) 34 | ws. 35 | Path(BasePath + "v1/user"). 36 | Consumes(restful.MIME_JSON). 37 | Produces(restful.MIME_JSON) 38 | 39 | tags := []string{"user"} 40 | 41 | ws.Route(ws.GET("/me"). 42 | To(viewer(r.getMe)). 43 | Operation("getMe"). 44 | Doc("extract the connecting user from auth header"). 45 | Metadata(restfulspec.KeyOpenAPITags, tags). 46 | Writes(v1.User{}). 47 | Returns(http.StatusOK, "OK", v1.User{}). 48 | DefaultReturns("Error", httperrors.HTTPErrorResponse{})) 49 | 50 | return ws 51 | } 52 | 53 | func (r *userResource) getMe(request *restful.Request, response *restful.Response) { 54 | u, err := r.userGetter.User(request.Request) 55 | if err != nil { 56 | r.sendError(request, response, httperrors.UnprocessableEntity(err)) 57 | return 58 | } 59 | 60 | if u == nil { 61 | r.sendError(request, response, httperrors.BadRequest(fmt.Errorf("unable to extract user from token, got nil"))) 62 | return 63 | } 64 | 65 | grps := []string{} 66 | for _, g := range u.Groups { 67 | grps = append(grps, string(g)) 68 | } 69 | user := &v1.User{ 70 | EMail: u.EMail, 71 | Name: u.Name, 72 | Tenant: u.Tenant, 73 | Issuer: u.Issuer, 74 | Subject: u.Subject, 75 | Groups: grps, 76 | } 77 | 78 | r.send(request, response, http.StatusOK, user) 79 | } 80 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/audit.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/metal-stack/metal-lib/auditing" 9 | ) 10 | 11 | type AuditFindRequest struct { 12 | Limit int64 `json:"limit" optional:"true"` 13 | 14 | From time.Time `json:"from" optional:"true"` 15 | To time.Time `json:"to" optional:"true"` 16 | 17 | Component string `json:"component" optional:"true"` 18 | RequestId string `json:"rqid" optional:"true"` 19 | Type string `json:"type" optional:"true"` 20 | 21 | User string `json:"user" optional:"true"` 22 | Tenant string `json:"tenant" optional:"true"` 23 | Project string `json:"project" optional:"true"` 24 | 25 | Detail string `json:"detail" optional:"true"` 26 | Phase string `json:"phase" optional:"true"` 27 | 28 | Path string `json:"path" optional:"true"` 29 | ForwardedFor string `json:"forwarded_for" optional:"true"` 30 | RemoteAddr string `json:"remote_addr" optional:"true"` 31 | 32 | Body string `json:"body" optional:"true"` 33 | StatusCode int `json:"status_code" optional:"true"` 34 | 35 | Error string `json:"error" optional:"true"` 36 | } 37 | 38 | type AuditResponse struct { 39 | Component string `json:"component" optional:"true"` 40 | RequestId string `json:"rqid" optional:"true"` 41 | Type string `json:"type" optional:"true"` 42 | Timestamp time.Time `json:"timestamp" optional:"true"` 43 | 44 | User string `json:"user" optional:"true"` 45 | Tenant string `json:"tenant" optional:"true"` 46 | 47 | // HTTP method get, post, put, delete, ... 48 | // or for grpc unary, stream 49 | Detail string `json:"detail" optional:"true"` 50 | // e.g. Request, Response, Error, Opened, Close 51 | Phase string `json:"phase" optional:"true"` 52 | // /api/v1/... or the method name 53 | Path string `json:"path" optional:"true"` 54 | ForwardedFor string `json:"forwarded_for" optional:"true"` 55 | RemoteAddr string `json:"remote_addr" optional:"true"` 56 | 57 | Body string `json:"body" optional:"true"` 58 | StatusCode int `json:"status_code" optional:"true"` 59 | 60 | // Internal errors 61 | Error string `json:"error" optional:"true"` 62 | } 63 | 64 | func NewAuditResponse(e auditing.Entry) *AuditResponse { 65 | body := "" 66 | switch v := e.Body.(type) { 67 | case string: 68 | body = v 69 | case []byte: 70 | body = string(v) 71 | default: 72 | b, err := json.Marshal(v) 73 | if err == nil { 74 | body = string(b) 75 | } else { 76 | body = fmt.Sprintf("unknown body: %v", v) 77 | } 78 | } 79 | errStr := "" 80 | if e.Error != nil { 81 | b, err := json.Marshal(e.Error) 82 | if err == nil { 83 | errStr = string(b) 84 | } else { 85 | errStr = fmt.Sprintf("unknown error: %v", e.Error) 86 | } 87 | } 88 | 89 | return &AuditResponse{ 90 | Component: e.Component, 91 | RequestId: e.RequestId, 92 | Type: string(e.Type), 93 | Timestamp: e.Timestamp, 94 | User: e.User, 95 | Tenant: e.Tenant, 96 | Detail: string(e.Detail), 97 | Phase: string(e.Phase), 98 | Path: e.Path, 99 | ForwardedFor: e.ForwardedFor, 100 | RemoteAddr: e.RemoteAddr, 101 | Body: body, 102 | StatusCode: e.StatusCode, 103 | Error: errStr, 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/common.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // emptyBody is useful because with go-restful you cannot define an insert / update endpoint 8 | // without specifying a payload for reading. it would immediately intercept the request and 9 | // return 406: Not Acceptable to the client. 10 | type EmptyBody struct{} 11 | 12 | type Identifiable struct { 13 | ID string `json:"id" description:"the unique ID of this entity" required:"true"` 14 | } 15 | 16 | type Describable struct { 17 | Name *string `json:"name,omitempty" description:"a readable name for this entity" optional:"true"` 18 | Description *string `json:"description,omitempty" description:"a description for this entity" optional:"true"` 19 | } 20 | 21 | type Common struct { 22 | Identifiable 23 | Describable 24 | } 25 | 26 | type Timestamps struct { 27 | Created time.Time `json:"created" description:"the creation time of this entity" readOnly:"true" optional:"true"` 28 | Changed time.Time `json:"changed" description:"the last changed timestamp of this entity" readOnly:"true" optional:"true"` 29 | } 30 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/firewall.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | type FirewallCreateRequest struct { 4 | MachineAllocateRequest 5 | FirewallAllocateRequest 6 | } 7 | 8 | type FirewallAllocateRequest struct { 9 | FirewallRules *FirewallRules `json:"firewall_rules" description:"optional egress and ingress firewall rules to deploy during firewall allocation" optional:"true"` 10 | } 11 | 12 | type FirewallEgressRule struct { 13 | Protocol string `json:"protocol,omitempty" description:"the protocol for the rule, defaults to tcp" enum:"tcp|udp" optional:"true"` 14 | Ports []int `json:"ports" description:"the ports affected by this rule"` 15 | To []string `json:"to" description:"the cidrs affected by this rule"` 16 | Comment string `json:"comment,omitempty" description:"an optional comment describing what this rule is used for" optional:"true"` 17 | } 18 | 19 | type FirewallIngressRule struct { 20 | Protocol string `json:"protocol,omitempty" description:"the protocol for the rule, defaults to tcp" enum:"tcp|udp" optional:"true"` 21 | Ports []int `json:"ports" description:"the ports affected by this rule"` 22 | To []string `json:"to,omitempty" description:"the cidrs affected by this rule" optional:"true"` 23 | From []string `json:"from" description:"the cidrs affected by this rule"` 24 | Comment string `json:"comment,omitempty" description:"an optional comment describing what this rule is used for" optional:"true"` 25 | } 26 | 27 | type FirewallResponse struct { 28 | MachineResponse 29 | } 30 | 31 | type FirewallFindRequest struct { 32 | MachineFindRequest 33 | } 34 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/firmware.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | type Firmware struct { 4 | Vendor string 5 | Board string 6 | BmcVersion string 7 | BiosVersion string 8 | Revision string 9 | } 10 | 11 | type FirmwaresResponse struct { 12 | Revisions map[string]VendorRevisions `json:"revisions" description:"list of firmwares per board per vendor per kind"` 13 | } 14 | 15 | type VendorRevisions struct { 16 | VendorRevisions map[string]BoardRevisions 17 | } 18 | 19 | type BoardRevisions struct { 20 | BoardRevisions map[string][]string 21 | } 22 | 23 | type MachineUpdateFirmwareRequest struct { 24 | Kind string `json:"kind" enum:"bios|bmc" description:"the firmware kind, i.e. [bios|bmc]"` 25 | Revision string `json:"revision" description:"the update revision"` 26 | Description string `json:"description" description:"a description why the machine has been updated"` 27 | } 28 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/image.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 7 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 8 | ) 9 | 10 | type ImageBase struct { 11 | URL *string `json:"url" modelDescription:"an image that can be attached to a machine" description:"the url of this image" optional:"true"` 12 | Features []string `json:"features" description:"features of this image" optional:"true"` 13 | ExpirationDate time.Time `json:"expirationDate" description:"expirationDate of this image" optional:"false"` 14 | Classification string `json:"classification" description:"classification of this image" optional:"true"` 15 | UsedBy []string `json:"usedby" description:"machines where this image is in use" optional:"true"` 16 | } 17 | 18 | type ImageCreateRequest struct { 19 | Common 20 | URL string `json:"url" description:"the url of this image"` 21 | Features []string `json:"features" description:"features of this image" optional:"true"` 22 | ExpirationDate *time.Time `json:"expirationDate" description:"expirationDate of this image" optional:"true"` 23 | Classification *string `json:"classification" description:"classification of this image" optional:"true"` 24 | } 25 | 26 | type ImageUpdateRequest struct { 27 | Common 28 | ImageBase 29 | ExpirationDate *time.Time `json:"expirationDate" description:"expirationDate of this image" optional:"true"` 30 | Classification *string `json:"classification" description:"classification of this image" optional:"true"` 31 | } 32 | 33 | // ImageFindRequest is used to find a image with different criteria. 34 | type ImageFindRequest struct { 35 | datastore.ImageSearchQuery 36 | } 37 | 38 | type ImageResponse struct { 39 | Common 40 | ImageBase 41 | Timestamps 42 | } 43 | 44 | func NewImageResponse(img *metal.Image) *ImageResponse { 45 | if img == nil { 46 | return nil 47 | } 48 | features := []string{} 49 | for k, v := range img.Features { 50 | if v { 51 | features = append(features, string(k)) 52 | } 53 | } 54 | return &ImageResponse{ 55 | Common: Common{ 56 | Identifiable: Identifiable{ 57 | ID: img.ID, 58 | }, 59 | Describable: Describable{ 60 | Name: &img.Name, 61 | Description: &img.Description, 62 | }, 63 | }, 64 | ImageBase: ImageBase{ 65 | URL: &img.URL, 66 | Features: features, 67 | ExpirationDate: img.ExpirationDate, 68 | Classification: string(img.Classification), 69 | }, 70 | Timestamps: Timestamps{ 71 | Created: img.Created, 72 | Changed: img.Changed, 73 | }, 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/ip.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 5 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 6 | ) 7 | 8 | type IPBase struct { 9 | ProjectID string `json:"projectid" description:"the project this ip address belongs to"` 10 | NetworkID string `json:"networkid" description:"the network this ip allocate request address belongs to"` 11 | Type metal.IPType `json:"type" enum:"static|ephemeral" description:"the ip type, ephemeral leads to automatic cleanup of the ip address, static will enable re-use of the ip at a later point in time"` 12 | Tags []string `json:"tags" description:"free tags that you associate with this ip." optional:"true"` 13 | } 14 | 15 | type IPIdentifiable struct { 16 | IPAddress string `json:"ipaddress" modelDescription:"an ip address that can be attached to a machine" description:"the address (ipv4 or ipv6) of this ip" readonly:"true"` 17 | AllocationUUID string `json:"allocationuuid" description:"a unique identifier for this ip address allocation, can be used to distinguish between ip address allocation over time." readonly:"true"` 18 | } 19 | 20 | type IPAllocateRequest struct { 21 | Describable 22 | IPBase 23 | MachineID *string `json:"machineid" description:"the machine id this ip should be associated with" optional:"true"` 24 | AddressFamily *metal.AddressFamily `json:"addressfamily,omitempty" description:"the addressfamily to allocate a ip address from the given network, defaults to IPv4" enum:"IPv4|IPv6" optional:"true"` 25 | } 26 | 27 | type IPUpdateRequest struct { 28 | IPAddress string `json:"ipaddress" modelDescription:"an ip address that can be attached to a machine" description:"the address (ipv4 or ipv6) of this ip" readonly:"true"` 29 | Describable 30 | Type metal.IPType `json:"type" enum:"static|ephemeral" description:"the ip type, ephemeral leads to automatic cleanup of the ip address, static will enable re-use of the ip at a later point in time"` 31 | Tags []string `json:"tags" description:"free tags that you associate with this ip." optional:"true"` 32 | } 33 | 34 | type IPFindRequest struct { 35 | datastore.IPSearchQuery 36 | } 37 | 38 | type IPResponse struct { 39 | Describable 40 | IPBase 41 | IPIdentifiable 42 | Timestamps 43 | } 44 | 45 | func NewIPResponse(ip *metal.IP) *IPResponse { 46 | tags := ip.Tags 47 | if tags == nil { 48 | tags = []string{} 49 | } 50 | return &IPResponse{ 51 | Describable: Describable{ 52 | Name: &ip.Name, 53 | Description: &ip.Description, 54 | }, 55 | IPBase: IPBase{ 56 | NetworkID: ip.NetworkID, 57 | ProjectID: ip.ProjectID, 58 | Type: ip.Type, 59 | Tags: tags, 60 | }, 61 | IPIdentifiable: IPIdentifiable{ 62 | IPAddress: ip.IPAddress, 63 | AllocationUUID: ip.AllocationUUID, 64 | }, 65 | Timestamps: Timestamps{ 66 | Created: ip.Created, 67 | Changed: ip.Changed, 68 | }, 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/sizeimageconstraint.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 4 | 5 | type SizeImageConstraintBase struct { 6 | Images map[string]string `json:"images" description:"a list of images for this constraints apply"` 7 | } 8 | 9 | type SizeImageConstraintResponse struct { 10 | Common 11 | SizeImageConstraintBase `json:"constraints" description:"a list of constraints that for this size"` 12 | } 13 | 14 | type SizeImageConstraintCreateRequest struct { 15 | Common 16 | SizeImageConstraintBase `json:"constraints" description:"a list of constraints that for this size" optional:"true"` 17 | } 18 | type SizeImageConstraintTryRequest struct { 19 | SizeID string `json:"size"` 20 | ImageID string `json:"image"` 21 | } 22 | type SizeImageConstraintUpdateRequest struct { 23 | Common 24 | SizeImageConstraintBase `json:"constraints" description:"a list of constraints that for this size" optional:"true"` 25 | } 26 | 27 | func NewSizeImageConstraint(s SizeImageConstraintCreateRequest) *metal.SizeImageConstraint { 28 | var ( 29 | name string 30 | description string 31 | ) 32 | if s.Common.Describable.Name != nil { 33 | name = *s.Common.Describable.Name 34 | } 35 | if s.Common.Describable.Description != nil { 36 | description = *s.Common.Describable.Description 37 | } 38 | return &metal.SizeImageConstraint{ 39 | Base: metal.Base{ 40 | ID: s.ID, 41 | Name: name, 42 | Description: description, 43 | }, 44 | Images: s.Images, 45 | } 46 | } 47 | 48 | func NewSizeImageConstraintResponse(s *metal.SizeImageConstraint) *SizeImageConstraintResponse { 49 | return &SizeImageConstraintResponse{ 50 | Common: Common{ 51 | Identifiable: Identifiable{ID: s.ID}, 52 | Describable: Describable{Name: &s.Name, Description: &s.Description}, 53 | }, 54 | SizeImageConstraintBase: SizeImageConstraintBase{ 55 | Images: s.Images, 56 | }, 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/user.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | type User struct { 4 | EMail string 5 | Name string 6 | Groups []string 7 | Tenant string 8 | Issuer string 9 | Subject string 10 | } 11 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/v1/vpn.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import "time" 4 | 5 | type VPNResponse struct { 6 | Address string `json:"address" description:"address of VPN's control plane"` 7 | AuthKey string `json:"auth_key" description:"auth key to connect to the VPN"` 8 | } 9 | 10 | type VPNRequest struct { 11 | Pid string `json:"pid" description:"project ID"` 12 | Ephemeral bool `json:"ephemeral" description:"specifies if auth key should be ephemeral"` 13 | Expiration *time.Duration `json:"expiration" description:"expiration time" optional:"true"` 14 | } 15 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/vpn-service_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "testing" 7 | 8 | "github.com/google/go-cmp/cmp" 9 | headscalev1 "github.com/juanfont/headscale/gen/go/headscale/v1" 10 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 11 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 12 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" 13 | "github.com/metal-stack/metal-lib/pkg/testcommon" 14 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 15 | ) 16 | 17 | func Test_EvaluateVPNConnected(t *testing.T) { 18 | tests := []struct { 19 | name string 20 | mockFn func(mock *r.Mock) 21 | headscaleMachines []*headscalev1.Node 22 | wantErr error 23 | }{ 24 | { 25 | name: "machines are correctly evaluated", 26 | mockFn: func(mock *r.Mock) { 27 | mock.On(r.DB("mockdb").Table("machine")).Return(metal.Machines{ 28 | { 29 | Base: metal.Base{ 30 | ID: "toggle", 31 | }, 32 | Allocation: &metal.MachineAllocation{ 33 | Project: "p1", 34 | VPN: &metal.MachineVPN{ 35 | Connected: false, 36 | }, 37 | }, 38 | }, 39 | { 40 | Base: metal.Base{ 41 | ID: "already-connected", 42 | }, 43 | Allocation: &metal.MachineAllocation{ 44 | Project: "p2", 45 | VPN: &metal.MachineVPN{ 46 | Connected: true, 47 | }, 48 | }, 49 | }, 50 | { 51 | Base: metal.Base{ 52 | ID: "no-vpn", 53 | }, 54 | Allocation: &metal.MachineAllocation{ 55 | Project: "p3", 56 | }, 57 | }, 58 | }, nil) 59 | 60 | // unfortunately, it's too hard to check the replace exactly for specific fields... 61 | mock.On(r.DB("mockdb").Table("machine").Get("toggle").Replace(r.MockAnything())).Return(testdata.EmptyResult, nil) 62 | }, 63 | headscaleMachines: []*headscalev1.Node{ 64 | { 65 | Name: "toggle", 66 | User: &headscalev1.User{ 67 | Name: "previous-allocation", 68 | }, 69 | Online: false, 70 | }, 71 | { 72 | Name: "toggle", 73 | User: &headscalev1.User{ 74 | Name: "p1", 75 | }, 76 | Online: true, 77 | }, 78 | { 79 | Name: "already-connected", 80 | User: &headscalev1.User{ 81 | Name: "p2", 82 | }, 83 | Online: true, 84 | }, 85 | }, 86 | wantErr: nil, 87 | }, 88 | } 89 | for _, tt := range tests { 90 | t.Run(tt.name, func(t *testing.T) { 91 | ds, mock := datastore.InitMockDB(t) 92 | if tt.mockFn != nil { 93 | tt.mockFn(mock) 94 | } 95 | 96 | err := EvaluateVPNConnected(slog.Default(), ds, &headscaleTest{ms: tt.headscaleMachines}) 97 | if diff := cmp.Diff(tt.wantErr, err, testcommon.ErrorStringComparer()); diff != "" { 98 | t.Errorf("error diff (-want +got):\n%s", diff) 99 | } 100 | 101 | mock.AssertExpectations(t) 102 | }) 103 | } 104 | } 105 | 106 | type headscaleTest struct { 107 | ms []*headscalev1.Node 108 | } 109 | 110 | func (h *headscaleTest) NodesConnected(ctx context.Context) ([]*headscalev1.Node, error) { 111 | return h.ms, nil 112 | } 113 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/service/vrf.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" 5 | ) 6 | 7 | // acquireRandomVRF will grab a unique but random vrf out of the vrfintegerpool 8 | func acquireRandomVRF(ds *datastore.RethinkStore) (*uint, error) { 9 | vrf, err := ds.GetVRFPool().AcquireRandomUniqueInteger() 10 | return &vrf, err 11 | } 12 | 13 | // acquireVRF will the given vrf out of the vrfintegerpool if not available a error is thrown 14 | func acquireVRF(ds *datastore.RethinkStore, vrf uint) error { 15 | _, err := ds.GetVRFPool().AcquireUniqueInteger(vrf) 16 | return err 17 | } 18 | 19 | // releaseVRF will return the given vrf to the vrfintegerpool for reuse 20 | func releaseVRF(ds *datastore.RethinkStore, vrf uint) error { 21 | return ds.GetVRFPool().ReleaseUniqueInteger(vrf) 22 | } 23 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/tags/tags.go: -------------------------------------------------------------------------------- 1 | package tags 2 | 3 | import ( 4 | "sort" 5 | "strings" 6 | ) 7 | 8 | // Tags holds tags. 9 | type Tags struct { 10 | tags []string 11 | } 12 | 13 | // New creates a new Tag instance. 14 | func New(tags []string) *Tags { 15 | return &Tags{ 16 | tags: tags, 17 | } 18 | } 19 | 20 | // Has checks whether the given tag is contained in the tags. 21 | func (t *Tags) Has(tag string) bool { 22 | for _, t := range t.tags { 23 | if t == tag { 24 | return true 25 | } 26 | } 27 | return false 28 | } 29 | 30 | // HasPrefix checks whether the given prefix is contained in the tags. 31 | func (t *Tags) HasPrefix(prefix string) bool { 32 | for _, t := range t.tags { 33 | if strings.HasPrefix(t, prefix) { 34 | return true 35 | } 36 | } 37 | return false 38 | } 39 | 40 | // Add adds a tag 41 | func (t *Tags) Add(tag string) { 42 | t.tags = append(t.tags, tag) 43 | } 44 | 45 | // Remove removes a tag 46 | func (t *Tags) Remove(tag string) bool { 47 | tags := []string{} 48 | removed := false 49 | for _, t := range t.tags { 50 | if t == tag { 51 | removed = true 52 | continue 53 | } 54 | tags = append(tags, t) 55 | } 56 | if removed { 57 | t.tags = tags 58 | } 59 | return removed 60 | } 61 | 62 | // Values collects all the values that are contained with the given prefix. 63 | func (t *Tags) Values(prefix string) []string { 64 | values := []string{} 65 | for _, t := range t.tags { 66 | if strings.HasPrefix(t, prefix) { 67 | values = append(values, strings.TrimPrefix(t, prefix)) 68 | } 69 | } 70 | return values 71 | } 72 | 73 | // Unique returns the distinct tag values as sorted slice. 74 | func (t *Tags) Unique() []string { 75 | tagSet := make(map[string]bool) 76 | for _, t := range t.tags { 77 | tagSet[t] = true 78 | } 79 | uniqueTags := []string{} 80 | for k := range tagSet { 81 | uniqueTags = append(uniqueTags, k) 82 | } 83 | sort.Strings(uniqueTags) 84 | return uniqueTags 85 | } 86 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/tags/tags_test.go: -------------------------------------------------------------------------------- 1 | package tags 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/google/go-cmp/cmp" 8 | ) 9 | 10 | func Example() { 11 | t := New(nil) 12 | t.Add("k=1") 13 | t.Add("k=2") 14 | fmt.Println(t.Unique()) 15 | fmt.Println(t.Values("k=")) 16 | // Output: 17 | // [k=1 k=2] 18 | // [1 2] 19 | } 20 | 21 | func TestHas(t *testing.T) { 22 | tests := []struct { 23 | name string 24 | tags []string 25 | tag string 26 | wanted bool 27 | }{ 28 | { 29 | name: "empty", 30 | tags: []string{}, 31 | tag: "", 32 | wanted: false, 33 | }, 34 | { 35 | name: "with tag", 36 | tags: []string{"t"}, 37 | tag: "t", 38 | wanted: true, 39 | }, 40 | { 41 | name: "with other tags", 42 | tags: []string{"a", "b", "c"}, 43 | tag: "t", 44 | wanted: false, 45 | }, 46 | } 47 | for i := range tests { 48 | tt := tests[i] 49 | t.Run(tt.name, func(t *testing.T) { 50 | tags := New(tt.tags) 51 | got := tags.Has(tt.tag) 52 | if !cmp.Equal(got, tt.wanted) { 53 | t.Errorf("Test failed: %v", cmp.Diff(got, tt.wanted)) 54 | } 55 | }) 56 | } 57 | } 58 | 59 | func TestHasPrefix(t *testing.T) { 60 | tests := []struct { 61 | name string 62 | tags []string 63 | prefix string 64 | wanted bool 65 | }{ 66 | { 67 | name: "empty tags", 68 | tags: []string{}, 69 | prefix: "", 70 | wanted: false, 71 | }, 72 | { 73 | name: "tag with empty string", 74 | tags: []string{""}, 75 | prefix: "", 76 | wanted: true, 77 | }, 78 | { 79 | name: "a tag with prefix", 80 | tags: []string{"b", "c", "key=value"}, 81 | prefix: "key", 82 | wanted: true, 83 | }, 84 | } 85 | for i := range tests { 86 | tt := tests[i] 87 | t.Run(tt.name, func(t *testing.T) { 88 | tags := New(tt.tags) 89 | got := tags.HasPrefix(tt.prefix) 90 | if !cmp.Equal(got, tt.wanted) { 91 | t.Errorf("Test failed: %v", cmp.Diff(got, tt.wanted)) 92 | } 93 | }) 94 | } 95 | } 96 | 97 | func TestRemove(t *testing.T) { 98 | tests := []struct { 99 | name string 100 | tags []string 101 | delete string 102 | wantedTags []string 103 | wantedReturn bool 104 | }{ 105 | { 106 | name: "tag not there", 107 | tags: []string{""}, 108 | delete: "test", 109 | wantedTags: []string{""}, 110 | wantedReturn: false, 111 | }, 112 | { 113 | name: "remove a tag", 114 | tags: []string{"2", "1", "2", "3"}, 115 | delete: "2", 116 | wantedTags: []string{"1", "3"}, 117 | wantedReturn: true, 118 | }, 119 | } 120 | for i := range tests { 121 | tt := tests[i] 122 | t.Run(tt.name, func(t *testing.T) { 123 | tags := New(tt.tags) 124 | gotReturn := tags.Remove(tt.delete) 125 | got := tags.Unique() 126 | if !cmp.Equal(got, tt.wantedTags) { 127 | t.Errorf("Test failed: %v", cmp.Diff(got, tt.wantedTags)) 128 | } 129 | if gotReturn != tt.wantedReturn { 130 | t.Errorf("expected %v but got %v", tt.wantedReturn, gotReturn) 131 | } 132 | }) 133 | } 134 | } 135 | 136 | func TestUnique(t *testing.T) { 137 | tests := []struct { 138 | name string 139 | tags []string 140 | wanted []string 141 | }{ 142 | { 143 | name: "empty", 144 | tags: []string{}, 145 | wanted: []string{}, 146 | }, 147 | { 148 | name: "some tags", 149 | tags: []string{"2", "1", "2"}, 150 | wanted: []string{"1", "2"}, 151 | }, 152 | } 153 | for i := range tests { 154 | tt := tests[i] 155 | t.Run(tt.name, func(t *testing.T) { 156 | tags := New(tt.tags) 157 | got := tags.Unique() 158 | if !cmp.Equal(got, tt.wanted) { 159 | t.Errorf("Test failed: %v", cmp.Diff(got, tt.wanted)) 160 | } 161 | }) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/testdata/ipam.go: -------------------------------------------------------------------------------- 1 | package testdata 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/ipam" 9 | "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" 10 | r "gopkg.in/rethinkdb/rethinkdb-go.v6" 11 | ) 12 | 13 | // InitMockIpamData initializes mock data to be stored in the IPAM module 14 | func InitMockIpamData(dbMock *r.Mock, withIP bool) (ipam.IPAMer, error) { 15 | ipamer := ipam.InitTestIpam(&testing.T{}) 16 | 17 | ctx := context.Background() 18 | 19 | // start creating the prefixes in the IPAM 20 | for _, prefix := range prefixesIPAM { 21 | err := ipamer.CreatePrefix(ctx, prefix) 22 | if err != nil { 23 | return nil, fmt.Errorf("error creating ipam mock data: %w", err) 24 | } 25 | } 26 | for _, prefix := range []metal.Prefix{prefix1, prefix2, prefix3, superPrefix, superPrefixV6} { 27 | err := ipamer.CreatePrefix(ctx, prefix) 28 | if err != nil { 29 | return nil, fmt.Errorf("error creating ipam mock data: %w", err) 30 | } 31 | } 32 | 33 | NwIPAM = metal.Network{ 34 | Base: metal.Base{ 35 | ID: "4", 36 | Name: "IPAM Network", 37 | Description: "description IPAM", 38 | }, 39 | Prefixes: prefixesIPAM, 40 | } 41 | 42 | // now, let's get an ip from the IPAM for IPAMIP 43 | if withIP { 44 | ipAddress, err := ipamer.AllocateIP(ctx, prefixesIPAM[0]) 45 | if err != nil { 46 | return nil, fmt.Errorf("error creating ipam mock data: %w", err) 47 | } 48 | IPAMIP.IPAddress = ipAddress 49 | IPAMIP.ParentPrefixCidr = prefixesIPAM[0].String() 50 | IPAMIP.NetworkID = NwIPAM.ID 51 | TestIPs = append(TestIPs, IPAMIP) 52 | } else { 53 | if len(TestIPs) > 3 { 54 | TestIPs = TestIPs[:3] 55 | } 56 | IPAMIP.IPAddress = "" 57 | IPAMIP.ParentPrefixCidr = "" 58 | IPAMIP.NetworkID = "" 59 | } 60 | 61 | dbMock.On(r.DB("mockdb").Table("ip").Get(IPAMIP.IPAddress)).Return(IPAMIP, nil) 62 | dbMock.On(r.DB("mockdb").Table("network").Get(NwIPAM.ID)).Return(NwIPAM, nil) 63 | 64 | return ipamer, nil 65 | } 66 | -------------------------------------------------------------------------------- /cmd/metal-api/internal/tools/visualize_fsm/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | 7 | "github.com/looplab/fsm" 8 | pfsm "github.com/metal-stack/metal-api/cmd/metal-api/internal/fsm" 9 | ) 10 | 11 | func main() { 12 | f := fsm.NewFSM("", pfsm.Events(), nil) 13 | dot := fsm.Visualize(f) 14 | if err := os.WriteFile("fsm.dot", []byte(dot), 0600); err != nil { 15 | log.Fatal(err) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pkg/grpc/wait.go: -------------------------------------------------------------------------------- 1 | package helper 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "log/slog" 9 | "strings" 10 | "time" 11 | 12 | v1 "github.com/metal-stack/metal-api/pkg/api/v1" 13 | "google.golang.org/grpc/codes" 14 | "google.golang.org/grpc/status" 15 | ) 16 | 17 | // WaitForAllocation can be used to call the wait method continuously until an allocation was made. 18 | // This is made for the metal-hammer and located here for better testability. 19 | func WaitForAllocation(ctx context.Context, log *slog.Logger, service v1.BootServiceClient, machineID string, timeout time.Duration) error { 20 | req := &v1.BootServiceWaitRequest{ 21 | MachineId: machineID, 22 | } 23 | 24 | for { 25 | stream, err := service.Wait(ctx, req) 26 | if err != nil { 27 | log.Error("failed waiting for allocation", "retry after", timeout, "error", err) 28 | 29 | if strings.Contains(err.Error(), "failed to verify certificate") { 30 | return fmt.Errorf("certificate changed, rebooting") 31 | } 32 | 33 | time.Sleep(timeout) 34 | continue 35 | } 36 | 37 | for { 38 | _, err := stream.Recv() 39 | if errors.Is(err, io.EOF) { 40 | log.Info("machine has been requested for allocation", "machineID", machineID) 41 | return nil 42 | } 43 | 44 | if err != nil { 45 | if e, ok := status.FromError(err); ok { 46 | log.Error("got error from wait call", "code", e.Code(), "message", e.Message(), "details", e.Details()) 47 | switch e.Code() { // nolint:exhaustive 48 | case codes.Unimplemented: 49 | return fmt.Errorf("metal-api breaking change detected, rebooting: %w", err) 50 | } 51 | } 52 | 53 | log.Error("failed stream receiving during waiting for allocation", "retry after", timeout, "error", err) 54 | 55 | time.Sleep(timeout) 56 | break 57 | } 58 | 59 | log.Info("wait for allocation...") 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /proto/Makefile: -------------------------------------------------------------------------------- 1 | MAKEFLAGS += --no-print-directory 2 | BUF_VERSION := 1.50.0 3 | 4 | _buf: 5 | docker run --rm \ 6 | --entrypoint sh \ 7 | -v $(PWD)/..:/workspace \ 8 | -w /workspace/proto \ 9 | bufbuild/buf:$(BUF_VERSION) \ 10 | -c "buf $(CMD) && chown -R $(shell id -u):$(shell id -g) /workspace" 11 | 12 | .PHONY: protolint 13 | protolint: 14 | @$(MAKE) _buf CMD="format -w" 15 | @$(MAKE) _buf CMD="lint -v" 16 | 17 | .PHONY: protoc 18 | protoc: protolint 19 | @$(MAKE) _buf CMD="generate -v" 20 | -------------------------------------------------------------------------------- /proto/api/v1/event.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package api.v1; 4 | 5 | import "google/protobuf/timestamp.proto"; 6 | 7 | option go_package = "./v1"; 8 | 9 | service EventService { 10 | rpc Send(EventServiceSendRequest) returns (EventServiceSendResponse) {} 11 | } 12 | 13 | message EventServiceSendRequest { 14 | map events = 1; 15 | } 16 | 17 | message EventServiceSendResponse { 18 | // number of events stored 19 | uint64 events = 1; 20 | // slice of machineIDs for which event was not published 21 | repeated string failed = 2; 22 | } 23 | 24 | message MachineProvisioningEvent { 25 | // timestamp when the event occurred 26 | google.protobuf.Timestamp time = 1; 27 | // the event type 28 | // must be one of metal.ProvisioningEventType, otherwise event will be skipped 29 | // TODO should be migrated to be an enum 30 | string event = 2; 31 | // an additional message describing the event more detailed 32 | string message = 3; 33 | } 34 | -------------------------------------------------------------------------------- /proto/buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v2 2 | plugins: 3 | - remote: buf.build/protocolbuffers/go:v1.36.3 4 | out: ../pkg/api 5 | - remote: buf.build/grpc/go:v1.5.1 6 | out: ../pkg/api 7 | opt: require_unimplemented_servers=false 8 | -------------------------------------------------------------------------------- /proto/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v2 2 | lint: 3 | use: 4 | - STANDARD 5 | except: 6 | - FIELD_NOT_REQUIRED 7 | - PACKAGE_NO_IMPORT_CYCLE 8 | - RPC_REQUEST_STANDARD_NAME 9 | - RPC_RESPONSE_STANDARD_NAME 10 | - SERVICE_SUFFIX 11 | disallow_comment_ignores: true 12 | breaking: 13 | except: 14 | - EXTENSION_NO_DELETE 15 | - FIELD_SAME_DEFAULT 16 | -------------------------------------------------------------------------------- /test/integration.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "testing" 8 | 9 | "github.com/metal-stack/metal-lib/bus" 10 | "github.com/stretchr/testify/require" 11 | "github.com/testcontainers/testcontainers-go" 12 | testlog "github.com/testcontainers/testcontainers-go/log" 13 | "github.com/testcontainers/testcontainers-go/wait" 14 | ) 15 | 16 | type ConnectionDetails struct { 17 | Port string 18 | IP string 19 | DB string 20 | User string 21 | Password string 22 | } 23 | 24 | func StartRethink(t testing.TB) (container testcontainers.Container, c *ConnectionDetails, err error) { 25 | ctx := context.Background() 26 | var log testlog.Logger 27 | if t != nil { 28 | log = testlog.TestLogger(t) 29 | } 30 | req := testcontainers.ContainerRequest{ 31 | Image: "rethinkdb:2.4.4-bookworm-slim", 32 | ExposedPorts: []string{"8080/tcp", "28015/tcp"}, 33 | Env: map[string]string{"RETHINKDB_PASSWORD": "rethink"}, 34 | WaitingFor: wait.ForAll( 35 | wait.ForListeningPort("28015/tcp"), 36 | ), 37 | Cmd: []string{"rethinkdb", "--bind", "all", "--directory", "/tmp", "--initial-password", "rethink", "--io-threads", "500"}, 38 | } 39 | rtContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ 40 | ContainerRequest: req, 41 | Started: true, 42 | Logger: log, 43 | }) 44 | if err != nil { 45 | panic(err.Error()) 46 | } 47 | ip, err := rtContainer.Host(ctx) 48 | if err != nil { 49 | return rtContainer, nil, err 50 | } 51 | port, err := rtContainer.MappedPort(ctx, "28015") 52 | if err != nil { 53 | return rtContainer, nil, err 54 | } 55 | 56 | c = &ConnectionDetails{ 57 | IP: ip, 58 | Port: port.Port(), 59 | User: "admin", 60 | DB: "metal", 61 | Password: "rethink", 62 | } 63 | 64 | return rtContainer, c, err 65 | } 66 | 67 | func StartPostgres() (container testcontainers.Container, c *ConnectionDetails, err error) { 68 | ctx := context.Background() 69 | req := testcontainers.ContainerRequest{ 70 | Image: "postgres:16-alpine", 71 | ExposedPorts: []string{"5432/tcp"}, 72 | Env: map[string]string{"POSTGRES_PASSWORD": "password"}, 73 | WaitingFor: wait.ForAll( 74 | wait.ForLog("database system is ready to accept connections"), 75 | wait.ForListeningPort("5432/tcp"), 76 | ), 77 | Cmd: []string{"postgres", "-c", "max_connections=500"}, 78 | } 79 | pgContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ 80 | ContainerRequest: req, 81 | Started: true, 82 | }) 83 | if err != nil { 84 | panic(err.Error()) 85 | } 86 | ip, err := pgContainer.Host(ctx) 87 | if err != nil { 88 | return pgContainer, nil, err 89 | } 90 | port, err := pgContainer.MappedPort(ctx, "5432") 91 | if err != nil { 92 | return pgContainer, nil, err 93 | } 94 | c = &ConnectionDetails{ 95 | IP: ip, 96 | Port: port.Port(), 97 | User: "postgres", 98 | DB: "postgres", 99 | Password: "password", 100 | } 101 | 102 | return pgContainer, c, err 103 | } 104 | 105 | func StartNsqd(t *testing.T, log *slog.Logger) (testcontainers.Container, bus.Publisher, *bus.Consumer) { 106 | ctx := context.Background() 107 | 108 | c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ 109 | ContainerRequest: testcontainers.ContainerRequest{ 110 | Image: "nsqio/nsq:v1.3.0", 111 | ExposedPorts: []string{"4150/tcp", "4151/tcp"}, 112 | WaitingFor: wait.ForAll( 113 | wait.ForListeningPort("4150/tcp"), 114 | wait.ForListeningPort("4151/tcp"), 115 | ), 116 | Cmd: []string{"nsqd"}, 117 | }, 118 | Started: true, 119 | Logger: testlog.TestLogger(t), 120 | }) 121 | require.NoError(t, err) 122 | 123 | ip, err := c.Host(ctx) 124 | require.NoError(t, err) 125 | 126 | tcpPort, err := c.MappedPort(ctx, "4150") 127 | require.NoError(t, err) 128 | httpPort, err := c.MappedPort(ctx, "4151") 129 | require.NoError(t, err) 130 | 131 | consumer, err := bus.NewConsumer(log, nil) 132 | require.NoError(t, err) 133 | 134 | tcpAddress := fmt.Sprintf("%s:%d", ip, tcpPort.Int()) 135 | httpAddress := fmt.Sprintf("%s:%d", ip, httpPort.Int()) 136 | 137 | consumer.With(bus.NSQDs(tcpAddress)) 138 | 139 | publisher, err := bus.NewPublisher(log, &bus.PublisherConfig{ 140 | TCPAddress: tcpAddress, 141 | HTTPEndpoint: httpAddress, 142 | }) 143 | require.NoError(t, err) 144 | 145 | return c, publisher, consumer 146 | } 147 | -------------------------------------------------------------------------------- /test/rest/firewall.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/machine 2 | @firewallbaseurl = {{scheme}}://{{host}}/v1/firewall 3 | 4 | ### register a firewall 5 | # @name register 6 | POST {{baseurl}}/register 7 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 8 | X-Date: 1985-04-12T23:20:50.52Z 9 | Content-Type: application/json 10 | 11 | { 12 | "partitionid": "vagrant-lab", 13 | "rackid": "Vagrant Rack 1", 14 | "hardware": { 15 | "cpu_cores": 1, 16 | "disks": [ 17 | { 18 | "name": "sda", 19 | "size": 2147483648 20 | } 21 | ], 22 | "memory": 536870912, 23 | "nics": [ 24 | { 25 | "features": [ 26 | ], 27 | "mac": "aa:aa:aa:aa:aa:aa", 28 | "name": "eth0", 29 | "vendor": "Dell", 30 | "neighbors": [ 31 | { 32 | "mac":"11:11:11:11:11:11", 33 | "name":"switch" 34 | } 35 | ] 36 | }, 37 | { 38 | "features": [ 39 | ], 40 | "mac": "00:00:00:00:00:00", 41 | "name": "lo", 42 | "vendor": "Dell" 43 | } 44 | ] 45 | }, 46 | "uuid": "ae671b8b-a158-52c2-8c22-985ca0503873" 47 | } 48 | 49 | ### wait for a machine 50 | # @name wait 51 | GET {{baseurl}}/ae671b8b-a158-52c2-8c22-985ca0503873/wait 52 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 53 | X-Date: 1985-04-12T23:20:50.52Z 54 | 55 | ### allocate a firewall 56 | # @name allocate 57 | POST {{firewallbaseurl}}/allocate 58 | Content-Type: application/json 59 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 60 | X-Date: 1985-04-12T23:20:50.52Z 61 | 62 | { 63 | "description": "My first metal host", 64 | "partitionid": "vagrant-lab", 65 | "hostname": "metal-test-1", 66 | "imageid": "ubuntu-18.10-firewall", 67 | "name": "Metal Host 1", 68 | "projectid": "devops", 69 | "tenant": "dkb", 70 | "sizeid": "v1-small-x86", 71 | "ssh_pub_keys": [], 72 | "networks": [ 73 | {"networkid": "internet-vagrant-lab"} 74 | ], 75 | "ha": false, 76 | "ips": [] 77 | } 78 | 79 | ### free a machine 80 | # @name free 81 | DELETE {{baseurl}}/ae671b8b-a158-52c2-8c22-985ca0503873/free 82 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 83 | X-Date: 1985-04-12T23:20:50.52Z 84 | 85 | ### get a firewall 86 | # @name get 87 | GET {{firewallbaseurl}}/ae671b8b-a158-52c2-8c22-985ca0503873 88 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 89 | X-Date: 1985-04-12T23:20:50.52Z 90 | 91 | ### list firewalls 92 | # @name list 93 | GET {{firewallbaseurl}}/ 94 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 95 | X-Date: 1985-04-12T23:20:50.52Z 96 | 97 | ### find a firewall 98 | # @name find 99 | GET {{firewallbaseurl}}/find?project=test 100 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 101 | X-Date: 1985-04-12T23:20:50.52Z 102 | -------------------------------------------------------------------------------- /test/rest/images.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/image 2 | 3 | ### get all images 4 | # @name allImages 5 | GET {{baseurl}} 6 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 7 | X-Date: 1985-04-12T23:20:50.52Z 8 | 9 | ### get one image 10 | @imageid = {{allImages.response.body.$[0].id}} 11 | # @name getFirstImage 12 | GET {{baseurl}}/{{imageid}} 13 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 14 | X-Date: 1985-04-12T23:20:50.52Z 15 | 16 | ### migrate images 17 | # @name migrateImages 18 | GET {{baseurl}}/migrate 19 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 20 | X-Date: 1985-04-12T23:20:50.52Z 21 | 22 | ### delete an image 23 | # @name deleteImage 24 | DELETE {{baseurl}}/ubuntu-19.10 25 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 26 | X-Date: 1985-04-12T23:20:50.52Z 27 | 28 | 29 | ### create an image 30 | # @name createImage 31 | PUT {{baseurl}} 32 | Content-Type: application/json 33 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 34 | X-Date: 1985-04-12T23:20:50.52Z 35 | 36 | { 37 | "id": "ubuntu-19.10", 38 | "name": "Ubuntu 19.10", 39 | "description": "Ubuntu 19.10", 40 | "url": "http://images.metal-pod.io/metal-os/ubuntu/19.10/20200317/img.tar.lz4", 41 | "features": [ 42 | "machine" 43 | ], 44 | "expirationDate": "0001-01-01T00:00:00Z", 45 | "classification": "supported", 46 | "created": "2020-04-21T13:11:25.402Z", 47 | "changed": "2020-04-21T13:11:25.402Z" 48 | } -------------------------------------------------------------------------------- /test/rest/ips.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/ip 2 | 3 | ### get all ips 4 | # @name getAll 5 | GET {{baseurl}} 6 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 7 | X-Date: 1985-04-12T23:20:50.52Z 8 | 9 | ### allocate ip 10 | # @name allocateIP 11 | POST {{baseurl}}/allocate/185.24.0.5 12 | Content-Type: application/json 13 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 14 | X-Date: 1985-04-12T23:20:50.52Z 15 | 16 | { 17 | "description": "an ip", 18 | "name": "test-ip1", 19 | "networkid": "internet-vagrant-lab", 20 | "projectid": "9b1a3fbe-8bcf-41f6-9e9a-29b3c6c5d3f5" 21 | } 22 | 23 | ### release ip 24 | # @name releaseIP 25 | POST {{baseurl}}/release/185.24.0.1 26 | Content-Type: application/json 27 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 28 | X-Date: 1985-04-12T23:20:50.52Z -------------------------------------------------------------------------------- /test/rest/machine-groups.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/machine-group 2 | 3 | ### get all machine groups 4 | # @name getAll 5 | GET {{baseurl}} 6 | Content-Type: application/json 7 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 8 | X-Date: 1985-04-12T23:20:50.52Z 9 | 10 | ### get specific machine group 11 | # @name get 12 | GET {{baseurl}}/982d69e3-d770-4347-a21e-870eb43d65ae 13 | Content-Type: application/json 14 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 15 | X-Date: 1985-04-12T23:20:50.52Z 16 | 17 | ### find specific machine group 18 | # @name find 19 | POST {{baseurl}}/find 20 | Content-Type: application/json 21 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 22 | X-Date: 1985-04-12T23:20:50.52Z 23 | 24 | { 25 | "labels": {"application": "something"} 26 | } 27 | 28 | ### create a machine group 29 | # @name create 30 | PUT {{baseurl}} 31 | Content-Type: application/json 32 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 33 | X-Date: 1985-04-12T23:20:50.52Z 34 | 35 | { 36 | "description": "a test machine group", 37 | "name": "test-machine-group", 38 | "partitionid": "vagrant-lab", 39 | "projectid": "9b1a3fbe-8bcf-41f6-9e9a-29b3c6c5d3f5", 40 | "sshpubkeys": [], 41 | "labels": {"application": "something"} 42 | } 43 | 44 | ### delete a machine group 45 | # @name delete 46 | DELETE {{baseurl}}/59f311d8-58dc-4c00-a92b-e1e4ee30fb78 47 | Content-Type: application/json 48 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 49 | X-Date: 1985-04-12T23:20:50.52Z 50 | -------------------------------------------------------------------------------- /test/rest/metadata.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1 2 | 3 | ### check health 4 | # @name checkHealth 5 | GET {{baseurl}}/health 6 | 7 | ### get version 8 | # @name getVersion 9 | GET {{baseurl}}/version -------------------------------------------------------------------------------- /test/rest/networks.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/network 2 | 3 | ### get all networks 4 | # @name getAll 5 | GET {{baseurl}} 6 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 7 | X-Date: 1985-04-12T23:20:50.52Z 8 | 9 | ### find network 10 | POST {{baseurl}}/find 11 | Content-Type: application/json 12 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 13 | X-Date: 1985-04-12T23:20:50.52Z 14 | 15 | { 16 | "partitionid": "vagrant" 17 | } 18 | 19 | ### create project super network 20 | # @name createProjectSuperNetwork 21 | PUT {{baseurl}}/ 22 | Content-Type: application/json 23 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 24 | X-Date: 1985-04-12T23:20:50.52Z 25 | 26 | { 27 | "id": "tenant-super-network-vagrant", 28 | "description": "Project Super Network", 29 | "name": "projects", 30 | "nat": false, 31 | "partitionid": "vagrant", 32 | "prefixes": [ 33 | "10.0.0.0/16" 34 | ], 35 | "privatesuper": true 36 | } 37 | 38 | ### create internet network 39 | # @name createInternetNetwork 40 | PUT {{baseurl}}/ 41 | Content-Type: application/json 42 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 43 | X-Date: 1985-04-12T23:20:50.52Z 44 | 45 | { 46 | "id": "internet-vagrant", 47 | "description": "Internet in Vagrant", 48 | "name": "vagrant internet", 49 | "nat": false, 50 | "partitionid": "vagrant", 51 | "prefixes": [ 52 | "185.24.0.0/16", 53 | "185.27.0.0/16" 54 | ], 55 | "privatesuper": false 56 | } 57 | 58 | ### create underlay network 59 | # @name createUnderlayNetwork 60 | PUT {{baseurl}}/ 61 | Content-Type: application/json 62 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 63 | X-Date: 1985-04-12T23:20:50.52Z 64 | 65 | { 66 | "id": "underlay-vagrant", 67 | "description": "Underlay in Vagrant", 68 | "name": "vagrant underlay", 69 | "nat": false, 70 | "underlay": true, 71 | "partitionid": "vagrant", 72 | "vrf": 10000, 73 | "prefixes": [ 74 | "10.1.0.0/24" 75 | ], 76 | "privatesuper": false 77 | } 78 | 79 | ### allocate network 80 | # @name allocateNetwork 81 | POST {{baseurl}}/allocate 82 | Content-Type: application/json 83 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 84 | X-Date: 1985-04-12T23:20:50.52Z 85 | 86 | { 87 | "projectid": "00000000-0000-0000-0000-000000000001", 88 | "partitionid": "vagrant" 89 | } 90 | 91 | ### release network 92 | # @name releaseNetwork 93 | POST {{baseurl}}/release/f900e906-8b65-4262-892a-4dd4f520735d 94 | Content-Type: application/json 95 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 96 | X-Date: 1985-04-12T23:20:50.52Z 97 | 98 | 99 | ### delete network 100 | # @name deleteNetwork 101 | DELETE {{baseurl}}/tenant-super-network-vagrant 102 | Content-Type: application/json 103 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 104 | X-Date: 1985-04-12T23:20:50.52Z 105 | -------------------------------------------------------------------------------- /test/rest/partitions.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/partition 2 | 3 | ### get all partitions 4 | # @name getAll 5 | GET {{baseurl}} 6 | 7 | ### get vagrant-lab 8 | # @name getVagrantLab 9 | GET {{baseurl}}/vagrant-lab 10 | 11 | ### delete vagrant-lab 12 | # @name deleteVagrantLab 13 | DELETE {{baseurl}}/vagrant-lab 14 | Content-Type: application/json 15 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 16 | X-Date: 1985-04-12T23:20:50.52Z 17 | -------------------------------------------------------------------------------- /test/rest/projects.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/project 2 | 3 | ### get all projects 4 | # @name getAll 5 | GET {{baseurl}} 6 | Content-Type: application/json 7 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 8 | X-Date: 1985-04-12T23:20:50.52Z 9 | 10 | ### get specific project 11 | # @name get 12 | GET {{baseurl}}/43d9eba2-e1f7-43f9-8054-1d3a9b59302c 13 | Content-Type: application/json 14 | Authorization: Metal-Admin 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 15 | X-Date: 1985-04-12T23:20:50.52Z 16 | 17 | ### find specific project 18 | # @name find 19 | POST {{baseurl}}/find 20 | Content-Type: application/json 21 | Authorization: Metal-Admin bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 22 | X-Date: 1985-04-12T23:20:50.52Z 23 | 24 | { 25 | "name": "project-1" 26 | } 27 | 28 | ### create a project 29 | # @name create 30 | PUT {{baseurl}} 31 | Content-Type: application/json 32 | Authorization: Metal-Admin ad24814d87cf57f35e1f075d02a7eb748d17536cbdff473c09be2b75df0ca4d0 33 | X-Date: 1985-04-12T23:20:50.52Z 34 | 35 | { 36 | "description": "a test project", 37 | "name": "project-1" 38 | } 39 | 40 | ### delete a project 41 | # @name delete 42 | DELETE {{baseurl}}/e4c35a15-3aed-4f57-9e17-f3767bcde02f 43 | Content-Type: application/json 44 | Authorization: Metal-Admin 60d4480107818d260233f835ff91ec85df194a2300b290e8aba4449246919d81 45 | X-Date: 1985-04-12T23:20:50.52Z 46 | -------------------------------------------------------------------------------- /test/rest/readme.md: -------------------------------------------------------------------------------- 1 | # Usage 2 | 3 | This tests are written for the **REST Client** plugin of visual studio code. If you want to use these test, install this plugin and make sure you have a block like this in your user-settings of VSCode. 4 | 5 | ~~~json 6 | "rest-client.environmentVariables": { 7 | "$shared": {}, 8 | "local-metal" : { 9 | "scheme":"http", 10 | "host":"localhost:8080" 11 | }, 12 | } 13 | ~~~ 14 | 15 | You can then select this environment and simply execute the tests. -------------------------------------------------------------------------------- /test/rest/sizes.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/size 2 | 3 | ### get all switches 4 | # @name getAll 5 | GET {{baseurl}} 6 | Authorization: Metal-View 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 7 | X-Date: 1985-04-12T23:20:50.52Z 8 | 9 | ### get first size 10 | @sizeid = {{getAll.response.body.$[0].id}} 11 | # @name getFirstSize 12 | GET {{baseurl}}/{{sizeid}} 13 | Authorization: Metal-View 8d7b8a807d368b716ce7d712266b680edb77ff70d050be30e0bbf2e50e189b2b 14 | X-Date: 1985-04-12T23:20:50.52Z 15 | 16 | ### check if a size is found (success) 17 | POST {{baseurl}}/from-hardware 18 | Content-Type: application/json 19 | Authorization: Metal-View bfe5650d0149046959e7e49105134877906ebd6e1be0136dd6c51cb095d4ea8d 20 | X-Date: 1985-04-12T23:20:50.52Z 21 | 22 | { 23 | "cpu_cores": 1, 24 | "disks": [ 25 | { 26 | "name": "sda", 27 | "size": 12 28 | }, 29 | { 30 | "name": "sdb", 31 | "size": 13 32 | } 33 | ], 34 | "memory": 1024, 35 | "nics": [ 36 | { 37 | "features": [ 38 | ], 39 | "mac": "aa:aa:aa:aa:aa:aa", 40 | "name": "eth0", 41 | "vendor": "Dell", 42 | "neighbors": [ 43 | { 44 | "mac":"11:11:11:11:11:11", 45 | "name":"switch" 46 | } 47 | ] 48 | }, 49 | { 50 | "features": [ 51 | ], 52 | "mac": "00:00:00:00:00:00", 53 | "name": "lo", 54 | "vendor": "Dell" 55 | } 56 | ] 57 | } 58 | 59 | ### check if a size is found (no match) 60 | POST {{baseurl}}/from-hardware 61 | Content-Type: application/json 62 | 63 | { 64 | "cpu_cores": 1, 65 | "disks": [ 66 | { 67 | "name": "sda", 68 | "size": 12 69 | }, 70 | { 71 | "name": "sdb", 72 | "size": 13 73 | } 74 | ], 75 | "memory": 10240, 76 | "nics": [ 77 | { 78 | "features": [ 79 | ], 80 | "mac": "aa:aa:aa:aa:aa:aa", 81 | "name": "eth0", 82 | "vendor": "Dell", 83 | "neighbors": [ 84 | { 85 | "mac":"11:11:11:11:11:11", 86 | "name":"switch" 87 | } 88 | ] 89 | }, 90 | { 91 | "features": [ 92 | ], 93 | "mac": "00:00:00:00:00:00", 94 | "name": "lo", 95 | "vendor": "Dell" 96 | } 97 | ] 98 | } -------------------------------------------------------------------------------- /test/rest/switch.rest: -------------------------------------------------------------------------------- 1 | @baseurl = {{scheme}}://{{host}}/v1/switch 2 | 3 | ### get all switches 4 | # @name getAll 5 | GET {{baseurl}} 6 | Authorization: Metal-Admin 71f2cba9ea8ac77ef30c3fac795ea41208a3f7d5a9efe186c9975e5e531869b0 7 | X-Date: 1985-04-12T23:20:50.52Z 8 | 9 | ### register a switch 10 | # @name register 11 | POST {{baseurl}}/register 12 | Content-Type: application/json 13 | Authorization: Metal-Admin d64300dffcea83ffea5ae281d1a88543b9398841ae80cc8e0929e185e3c50dcd 14 | X-Date: 1985-04-12T23:20:50.52Z 15 | 16 | { 17 | "id": "switch1", 18 | "nics": [ 19 | { 20 | "mac": "11:11:11:11:11:11", 21 | "name": "eth1" 22 | }, 23 | { 24 | "mac": "11:11:11:11:11:13", 25 | "name": "eth2s0" 26 | } 27 | ], 28 | "partition_id": "vagrant-lab", 29 | "rack_id": "Vagrant Rack 1" 30 | } --------------------------------------------------------------------------------