├── .dockerignore ├── .github └── workflows │ ├── release.yaml │ └── validate-ipfs.yaml ├── .gitignore ├── .golangci.yaml ├── .readthedocs.yaml ├── Dockerfile ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── circuitrelay_types.go │ ├── groupversion_info.go │ ├── ipfscluster_types.go │ └── zz_generated.deepcopy.go ├── bundle.Dockerfile ├── bundle ├── manifests │ ├── cluster.ipfs.io_circuitrelays.yaml │ ├── cluster.ipfs.io_ipfsclusters.yaml │ ├── ipfs-operator-controller-manager-metrics-service_v1_service.yaml │ ├── ipfs-operator-manager-config_v1_configmap.yaml │ ├── ipfs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml │ └── ipfs-operator.clusterserviceversion.yaml ├── metadata │ └── annotations.yaml └── tests │ └── scorecard │ └── config.yaml ├── config ├── crd │ ├── bases │ │ ├── cluster.ipfs.io_circuitrelays.yaml │ │ └── cluster.ipfs.io_ipfsclusters.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_circuitrelays.yaml │ │ ├── cainjection_in_ipfs.yaml │ │ ├── cainjection_in_ipfsclusters.yaml │ │ ├── webhook_in_circuitrelays.yaml │ │ ├── webhook_in_ipfs.yaml │ │ └── webhook_in_ipfsclusters.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── ipfs-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── circuitrelay_editor_role.yaml │ ├── circuitrelay_viewer_role.yaml │ ├── ipfs_editor_role.yaml │ ├── ipfs_viewer_role.yaml │ ├── ipfscluster_editor_role.yaml │ ├── ipfscluster_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── cluster_v1alpha1_circuitrelay.yaml │ ├── cluster_v1alpha1_ipfscluster.yaml │ └── kustomization.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── controllers ├── circuitrelay.go ├── circuitrelay_controller.go ├── configmap.go ├── ipfs-cluster-operator.code-workspace ├── ipfs_util.go ├── ipfscluster_controller.go ├── ipfscluster_controller_test.go ├── scripts │ ├── config.go │ ├── config_test.go │ └── scripts_suite_test.go ├── secret.go ├── service.go ├── serviceaccount.go ├── statefulset.go ├── suite_test.go └── utils │ └── utils.go ├── docs ├── Makefile ├── build │ ├── doctrees │ │ ├── environment.pickle │ │ ├── getting_started.doctree │ │ ├── hacking.doctree │ │ ├── index.doctree │ │ └── your_first_cluster.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _sources │ │ ├── getting_started.rst.txt │ │ ├── hacking.rst.txt │ │ ├── index.rst.txt │ │ └── your_first_cluster.rst.txt │ │ ├── _static │ │ ├── _sphinx_javascript_frameworks_compat.js │ │ ├── basic.css │ │ ├── css │ │ │ ├── badge_only.css │ │ │ ├── fonts │ │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ │ ├── fontawesome-webfont.eot │ │ │ │ ├── fontawesome-webfont.svg │ │ │ │ ├── fontawesome-webfont.ttf │ │ │ │ ├── fontawesome-webfont.woff │ │ │ │ ├── fontawesome-webfont.woff2 │ │ │ │ ├── lato-bold-italic.woff │ │ │ │ ├── lato-bold-italic.woff2 │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-normal-italic.woff │ │ │ │ ├── lato-normal-italic.woff2 │ │ │ │ ├── lato-normal.woff │ │ │ │ └── lato-normal.woff2 │ │ │ └── theme.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── file.png │ │ ├── jquery-3.6.0.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── badge_only.js │ │ │ ├── html5shiv-printshiv.min.js │ │ │ ├── html5shiv.min.js │ │ │ └── theme.js │ │ ├── language_data.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.13.1.js │ │ └── underscore.js │ │ ├── genindex.html │ │ ├── getting_started.html │ │ ├── hacking.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── search.html │ │ ├── searchindex.js │ │ └── your_first_cluster.html └── source │ ├── conf.py │ ├── getting_started.rst │ ├── hacking.rst │ ├── index.rst │ ├── requirements.txt │ └── your_first_cluster.rst ├── examples ├── collab-follow-small.yaml ├── collab-follow.yaml ├── ipfs-medium-private.yaml ├── ipfs-small-private.yaml ├── ipfs-small.yaml └── ipfs.yaml ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── run-in-kind.sh ├── setup-kind-cluster.sh └── utils.sh ├── helm ├── gen.go └── ipfs-operator │ ├── Chart.yaml │ ├── crds │ ├── cluster.ipfs.io_circuitrelays.yaml │ └── cluster.ipfs.io_ipfsclusters.yaml │ ├── templates │ ├── ClusterRole-ipfs-operator-manager-role.yaml │ ├── ClusterRole-ipfs-operator-metrics-reader.yaml │ ├── ClusterRole-ipfs-operator-proxy-role.yaml │ ├── ClusterRoleBinding-ipfs-operator-manager-rolebinding.yaml │ ├── ClusterRoleBinding-ipfs-operator-proxy-rolebinding.yaml │ ├── ConfigMap-ipfs-operator-manager-config.yaml │ ├── Deployment-ipfs-operator-controller-manager.yaml │ ├── Namespace-ipfs-operator-system.yaml │ ├── Role-ipfs-operator-leader-election-role.yaml │ ├── RoleBinding-ipfs-operator-leader-election-rolebinding.yaml │ ├── Service-ipfs-operator-controller-manager-metrics-service.yaml │ ├── ServiceAccount-ipfs-operator-controller-manager.yaml │ └── _helpers.tpl │ └── values.yaml ├── main.go ├── pyproject.toml ├── test-kuttl ├── e2e-copy │ └── private-network │ │ ├── 00-create-ipfs-cluster.yaml │ │ ├── 05-assert-private-ipfs-network.yaml │ │ ├── 10-test-data-creation.sh │ │ ├── 10-test-data-creation.yaml │ │ ├── README.md │ │ └── utils.sh ├── e2e-release │ └── cluster-follow │ │ ├── 00-create-cluster.yaml │ │ ├── 05-assert-running.yaml │ │ ├── 10-validate-contents.sh │ │ ├── 10-validate-contents.yaml │ │ ├── README.md │ │ └── utils.sh ├── e2e │ ├── ipfs │ │ ├── 00-assert.yaml │ │ ├── 05-create-ipfs-cluster.yaml │ │ ├── 10-assert.yaml │ │ ├── 15-test-data-creation.sh │ │ ├── 15-test-data-creation.yaml │ │ ├── README.md │ │ └── utils.sh │ └── private-network │ │ ├── 00-create-ipfs-cluster.yaml │ │ ├── 05-assert-private.yaml │ │ ├── 10-test-data-creation.sh │ │ ├── 10-test-data-creation.yaml │ │ ├── README.md │ │ └── utils.sh ├── kuttl-test-release.yaml └── kuttl-test.yaml ├── tools.go └── version.mk /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | !docs/build/ 10 | docs/build/* 11 | testbin/* 12 | 13 | # Test binary, build with `go test -c` 14 | *.test 15 | 16 | # Output of the go coverage tool, specifically when used with LiteIDE 17 | *.out 18 | *.profile 19 | profile.json 20 | 21 | # Kubernetes Generated files - skip generated files, except for vendored files 22 | 23 | !vendor/**/zz_generated.* 24 | 25 | # editor and IDE paraphernalia 26 | .idea 27 | *.swp 28 | *.swo 29 | *~ 30 | 31 | # kubeconfig generated during test-e2e 32 | test-kuttl/kubeconfig 33 | 34 | # don't include vendor/ 35 | vendor/* 36 | 37 | # use this directory for files during development that shouldn't be pushed 38 | temp/* 39 | 40 | report.json -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # .readthedocs.yml 3 | # Read the Docs configuration file 4 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 5 | version: 2 6 | # Build documentation in the docs/ directory with Sphinx 7 | sphinx: 8 | builder: html 9 | configuration: docs/source/conf.py 10 | # Optionally build your docs in additional formats such as PDF 11 | formats: 12 | - pdf 13 | # Optionally set the version of Python and requirements required to build your 14 | # docs 15 | python: 16 | # version: 3.7 17 | install: 18 | - requirements: docs/source/requirements.txt 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.18 as builder 3 | 4 | # these are reasonable defaults which accommodate 90% of cases 5 | ARG arch=amd64 6 | ARG platform=linux 7 | 8 | WORKDIR /workspace 9 | # Copy the Go Modules manifests 10 | COPY go.mod go.mod 11 | COPY go.sum go.sum 12 | # cache deps before building and copying source so that we don't need to re-download as much 13 | # and so that source changes don't invalidate our downloaded layer 14 | RUN go mod download 15 | 16 | # Copy the go source 17 | COPY main.go main.go 18 | COPY api/ api/ 19 | COPY controllers/ controllers/ 20 | 21 | # Build 22 | RUN CGO_ENABLED=0 GOOS=${platform} GOARCH=${arch} go build -a -o manager main.go 23 | 24 | # Use distroless as minimal base image to package the manager binary 25 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 26 | FROM gcr.io/distroless/static:nonroot 27 | WORKDIR / 28 | COPY --from=builder /workspace/manager . 29 | USER 65532:65532 30 | 31 | ENTRYPOINT ["/manager"] 32 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: ipfs.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: ipfs-operator 8 | repo: github.com/redhat-et/ipfs-operator 9 | resources: 10 | - api: 11 | crdVersion: v1 12 | namespaced: true 13 | controller: true 14 | domain: ipfs.io 15 | group: cluster 16 | kind: CircuitRelay 17 | path: github.com/redhat-et/ipfs-operator/api/v1alpha1 18 | version: v1alpha1 19 | - api: 20 | crdVersion: v1 21 | namespaced: true 22 | controller: true 23 | domain: ipfs.io 24 | group: cluster 25 | kind: IpfsCluster 26 | path: github.com/redhat-et/ipfs-operator/api/v1alpha1 27 | version: v1alpha1 28 | version: "3" 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IPFS Operator 2 | This operator is still heavily in progress. 3 | 4 | # Running 5 | This operator can be deployed either with or without OLM installed. 6 | 7 | ## With OLM 8 | ```bash 9 | operator-sdk run bundle quay.io/redhat-et-ipfs/ipfs-operator-bundle:v0.0.1 -n ipfs-operator-system 10 | ``` 11 | 12 | ## Without OLM 13 | ```bash 14 | make deploy 15 | ``` 16 | 17 | # Deploying an IPFS cluster 18 | The value for URL must be changed to match your Kubernetes environment. The public bool defines if a load balancer should be created. This load balancer allows for ipfs gets to be done from systems outside of the Kubernetes environment. 19 | 20 | ```yaml 21 | apiVersion: cluster.ipfs.io/v1alpha1 22 | kind: Ipfs 23 | metadata: 24 | name: ipfs-sample-1 25 | spec: 26 | ipfsStorage: 2Gi 27 | clusterStorage: 2Gi 28 | ``` 29 | Once the values match your environment run the following. 30 | ```bash 31 | kubectl create -n default -f ifps.yaml 32 | ``` 33 | 34 | ### Running in KIND 35 | 36 | An easy way to test and modify changes to the operator is by running it in a local KIND cluster. 37 | To bootstrap a KIND cluster, you can run `hack/setup-kind-cluster.sh`, which will install all of the 38 | required components to operate an IPFS cluster. 39 | 40 | To deploy the operator in this repository into the cluster, you can run `hack/run-in-kind.sh` which 41 | will build the source code and inject it into the cluster. 42 | If you make subsequent changes, you will need to re-run `hack/run-in-kind.sh` and kill the previous 43 | operator manager by running `kubectl delete pod -A -n ipfs-operator-system` in order to redploy the updated image. 44 | 45 | ### Testing Local Changes 46 | 47 | If you're developing the operator and would like to test your changes locally, you can do this by 48 | running the kuttl end-to-end tests with `make test-e2e` after redploying the operator. 49 | 50 | -------------------------------------------------------------------------------- /api/v1alpha1/circuitrelay_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | "github.com/libp2p/go-libp2p/core/peer" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | 23 | ma "github.com/multiformats/go-multiaddr" 24 | ) 25 | 26 | // This is intended to mimic peer.AddrInfo. 27 | type AddrInfoBasicType struct { 28 | ID string `json:"id"` 29 | Addrs []string `json:"addrs"` 30 | addrInfo peer.AddrInfo `json:"-"` 31 | } 32 | 33 | func (a *AddrInfoBasicType) Parse() error { 34 | id, err := peer.Decode(a.ID) 35 | if err != nil { 36 | return err 37 | } 38 | addrs := make([]ma.Multiaddr, len(a.Addrs)) 39 | for i, addr := range a.Addrs { 40 | var maddr ma.Multiaddr 41 | maddr, err = ma.NewMultiaddr(addr) 42 | if err != nil { 43 | return err 44 | } 45 | addrs[i] = maddr 46 | } 47 | ai := peer.AddrInfo{ 48 | ID: id, 49 | Addrs: addrs, 50 | } 51 | a.addrInfo = ai 52 | return nil 53 | } 54 | 55 | func (a *AddrInfoBasicType) AddrInfo() *peer.AddrInfo { 56 | return &a.addrInfo 57 | } 58 | 59 | func (a *AddrInfoBasicType) DeepCopyInto(out *AddrInfoBasicType) { 60 | addrs := make([]string, len(a.Addrs)) 61 | 62 | copy(addrs, a.Addrs) 63 | out.ID = a.ID 64 | out.Addrs = addrs 65 | } 66 | 67 | func (a *AddrInfoBasicType) DeepCopy() *AddrInfoBasicType { 68 | var out *AddrInfoBasicType 69 | a.DeepCopyInto(out) 70 | return out 71 | } 72 | 73 | // KeyRef Defines a reference to a specific key on a certain secret. 74 | type KeyRef struct { 75 | KeyName string `json:"keyName"` 76 | SecretName string `json:"secretName"` 77 | } 78 | 79 | // CircuitRelaySpec Defines a specification for the RelayCircuit launched by Kubernetes. 80 | type CircuitRelaySpec struct { 81 | // SwarmKeyRef points to a multicodec-encoded v1 PSK stored within a secret somewhere. 82 | // +optional 83 | SwarmKeyRef *KeyRef `json:"swarmKeyRef,omitempty"` 84 | } 85 | 86 | type CircuitRelayStatus struct { 87 | AddrInfo AddrInfoBasicType `json:"addrInfo"` 88 | } 89 | 90 | //+kubebuilder:object:root=true 91 | //+kubebuilder:subresource:status 92 | 93 | // CircuitRelay is the Schema for the circuitrelays API. 94 | type CircuitRelay struct { 95 | metav1.TypeMeta `json:",inline"` 96 | metav1.ObjectMeta `json:"metadata,omitempty"` 97 | 98 | Spec CircuitRelaySpec `json:"spec,omitempty"` 99 | Status CircuitRelayStatus `json:"status,omitempty"` 100 | } 101 | 102 | //+kubebuilder:object:root=true 103 | 104 | // CircuitRelayList contains a list of CircuitRelay. 105 | type CircuitRelayList struct { 106 | metav1.TypeMeta `json:",inline"` 107 | metav1.ListMeta `json:"metadata,omitempty"` 108 | Items []CircuitRelay `json:"items"` 109 | } 110 | 111 | func init() { 112 | SchemeBuilder.Register(&CircuitRelay{}, &CircuitRelayList{}) 113 | } 114 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=cluster.ipfs.io 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | // Define the version information about this operator. 28 | const ( 29 | Group = "cluster.ipfs.io" 30 | Version = "v1alpha1" 31 | ) 32 | 33 | var ( 34 | // GroupVersion is group version used to register these objects 35 | GroupVersion = schema.GroupVersion{Group: Group, Version: Version} 36 | 37 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 38 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 39 | 40 | // AddToScheme adds the types in this group-version to the given scheme. 41 | AddToScheme = SchemeBuilder.AddToScheme 42 | ) 43 | -------------------------------------------------------------------------------- /api/v1alpha1/ipfscluster_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | corev1 "k8s.io/api/core/v1" 21 | "k8s.io/apimachinery/pkg/api/resource" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | const ( 26 | // ConditionReconciled is a status condition type that indicates whether the 27 | // CR has been successfully reconciled. 28 | ConditionReconciled string = "Reconciled" 29 | // ReconciledReasonComplete indicates the CR was successfully reconciled. 30 | ReconciledReasonComplete string = "ReconcileComplete" 31 | // ReconciledReasonError indicates an error was encountered while 32 | // reconciling the CR. 33 | ReconciledReasonError string = "ReconcileError" 34 | ) 35 | 36 | type ReproviderStrategy string 37 | 38 | const ( 39 | // ReproviderStrategyAll Announces the CID of every stored block. 40 | ReproviderStrategyAll ReproviderStrategy = "all" 41 | // ReproviderStrategyPinned Only announces the pinned CIDs recursively. 42 | ReproviderStrategyPinned ReproviderStrategy = "pinned" 43 | // ReproviderStrategyRoots Only announces the root block of explicitly pinned CIDs. 44 | ReproviderStrategyRoots ReproviderStrategy = "roots" 45 | ) 46 | 47 | type ReprovideSettings struct { 48 | // Strategy specifies the reprovider strategy, defaults to 'all'. 49 | // +kubebuilder:validation:Enum={all,pinned,roots} 50 | // +optional 51 | Strategy ReproviderStrategy `json:"strategy,omitempty"` 52 | // Interval sets the time between rounds of reproviding 53 | // local content to the routing system. Defaults to '12h'. 54 | // +optional 55 | Interval string `json:"interval,omitempty"` 56 | } 57 | 58 | type followParams struct { 59 | Name string `json:"name"` 60 | Template string `json:"template"` 61 | } 62 | 63 | // NetworkConfig defines the configuration structure used for networking. 64 | type NetworkConfig struct { 65 | // circuitRelays defines how many CircuitRelays should be created. 66 | CircuitRelays int32 `json:"circuitRelays"` 67 | // public is a switch which defines whether this IPFSCluster will use 68 | // the global IPFS network or create its own. 69 | // +kubebuilder:default:=true 70 | Public bool `json:"public,omitempty"` 71 | } 72 | 73 | // IpfsClusterSpec defines the desired state of the IpfsCluster. 74 | type IpfsClusterSpec struct { 75 | // ipfsStorage defines the total storage to be allocated by this resource. 76 | IpfsStorage resource.Quantity `json:"ipfsStorage"` 77 | // clusterStorage defines the amount of storage to be used by IPFS Cluster. 78 | ClusterStorage resource.Quantity `json:"clusterStorage"` 79 | // replicas sets the number of replicas of IPFS Cluster nodes we should be running. 80 | Replicas int32 `json:"replicas"` 81 | // networking defines network configuration settings. 82 | Networking NetworkConfig `json:"networking"` 83 | // follows defines the list of other IPFS Clusters this one should follow. 84 | // +optional 85 | Follows []*followParams `json:"follows,omitempty"` 86 | // ipfsResources specifies the resource requirements for each IPFS container. If this 87 | // value is omitted, then the operator will automatically determine these settings 88 | // based on the storage sizes used. 89 | // +optional 90 | IPFSResources *corev1.ResourceRequirements `json:"ipfsResources,omitempty"` 91 | // reprovider Describes the settings that each IPFS node 92 | // should use when reproviding content. 93 | // +optional 94 | Reprovider ReprovideSettings `json:"reprovider,omitempty"` 95 | } 96 | 97 | type IpfsClusterStatus struct { 98 | Conditions []metav1.Condition `json:"conditions,omitempty"` 99 | CircuitRelays []string `json:"circuitRelays,omitempty"` 100 | } 101 | 102 | //+kubebuilder:object:root=true 103 | //+kubebuilder:subresource:status 104 | 105 | // IpfsCluster is the Schema for the ipfs API. 106 | type IpfsCluster struct { 107 | metav1.TypeMeta `json:",inline"` 108 | metav1.ObjectMeta `json:"metadata,omitempty"` 109 | 110 | Spec IpfsClusterSpec `json:"spec,omitempty"` 111 | Status IpfsClusterStatus `json:"status,omitempty"` 112 | } 113 | 114 | //+kubebuilder:object:root=true 115 | 116 | // IpfsList contains a list of Ipfs. 117 | type IpfsClusterList struct { 118 | metav1.TypeMeta `json:",inline"` 119 | metav1.ListMeta `json:"metadata,omitempty"` 120 | Items []IpfsCluster `json:"items"` 121 | } 122 | 123 | func init() { 124 | SchemeBuilder.Register(&IpfsCluster{}, &IpfsClusterList{}) 125 | } 126 | -------------------------------------------------------------------------------- /bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | # Core bundle labels. 4 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 5 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 6 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 7 | LABEL operators.operatorframework.io.bundle.package.v1=ipfs-operator 8 | LABEL operators.operatorframework.io.bundle.channels.v1=alpha 9 | LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.26.0 10 | LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 11 | LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 12 | 13 | # Labels for testing. 14 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 15 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 16 | 17 | # Copy files to locations specified by labels. 18 | COPY bundle/manifests /manifests/ 19 | COPY bundle/metadata /metadata/ 20 | COPY bundle/tests/scorecard /tests/scorecard/ 21 | -------------------------------------------------------------------------------- /bundle/manifests/cluster.ipfs.io_circuitrelays.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.8.0 6 | creationTimestamp: null 7 | name: circuitrelays.cluster.ipfs.io 8 | spec: 9 | group: cluster.ipfs.io 10 | names: 11 | kind: CircuitRelay 12 | listKind: CircuitRelayList 13 | plural: circuitrelays 14 | singular: circuitrelay 15 | scope: Namespaced 16 | versions: 17 | - name: v1alpha1 18 | schema: 19 | openAPIV3Schema: 20 | description: CircuitRelay is the Schema for the circuitrelays API. 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation 24 | of an object. Servers should convert recognized schemas to the latest 25 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 26 | type: string 27 | kind: 28 | description: 'Kind is a string value representing the REST resource this 29 | object represents. Servers may infer this from the endpoint the client 30 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 31 | type: string 32 | metadata: 33 | type: object 34 | spec: 35 | description: CircuitRelaySpec Defines a specification for the RelayCircuit 36 | launched by Kubernetes. 37 | properties: 38 | swarmKeyRef: 39 | description: SwarmKeyRef points to a multicodec-encoded v1 PSK stored 40 | within a secret somewhere. 41 | properties: 42 | keyName: 43 | type: string 44 | secretName: 45 | type: string 46 | required: 47 | - keyName 48 | - secretName 49 | type: object 50 | type: object 51 | status: 52 | properties: 53 | addrInfo: 54 | description: This is intended to mimic peer.AddrInfo. 55 | properties: 56 | addrs: 57 | items: 58 | type: string 59 | type: array 60 | id: 61 | type: string 62 | required: 63 | - addrs 64 | - id 65 | type: object 66 | required: 67 | - addrInfo 68 | type: object 69 | type: object 70 | served: true 71 | storage: true 72 | subresources: 73 | status: {} 74 | status: 75 | acceptedNames: 76 | kind: "" 77 | plural: "" 78 | conditions: [] 79 | storedVersions: [] 80 | -------------------------------------------------------------------------------- /bundle/manifests/ipfs-operator-controller-manager-metrics-service_v1_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | control-plane: controller-manager 7 | name: ipfs-operator-controller-manager-metrics-service 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | status: 17 | loadBalancer: {} 18 | -------------------------------------------------------------------------------- /bundle/manifests/ipfs-operator-manager-config_v1_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | controller_manager_config.yaml: | 4 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 5 | kind: ControllerManagerConfig 6 | health: 7 | healthProbeBindAddress: :8081 8 | metrics: 9 | bindAddress: 127.0.0.1:8080 10 | webhook: 11 | port: 9443 12 | leaderElection: 13 | leaderElect: true 14 | resourceName: 658003f6.ipfs.io 15 | kind: ConfigMap 16 | metadata: 17 | name: ipfs-operator-manager-config 18 | -------------------------------------------------------------------------------- /bundle/manifests/ipfs-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | name: ipfs-operator-metrics-reader 6 | rules: 7 | - nonResourceURLs: 8 | - /metrics 9 | verbs: 10 | - get 11 | -------------------------------------------------------------------------------- /bundle/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | # Core bundle annotations. 3 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 6 | operators.operatorframework.io.bundle.package.v1: ipfs-operator 7 | operators.operatorframework.io.bundle.channels.v1: alpha 8 | operators.operatorframework.io.metrics.builder: operator-sdk-v1.26.0 9 | operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 10 | operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 11 | 12 | # Annotations for testing. 13 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 14 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 15 | -------------------------------------------------------------------------------- /bundle/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: 8 | - entrypoint: 9 | - scorecard-test 10 | - basic-check-spec 11 | image: quay.io/operator-framework/scorecard-test:v1.18.1 12 | labels: 13 | suite: basic 14 | test: basic-check-spec-test 15 | storage: 16 | spec: 17 | mountPath: {} 18 | - entrypoint: 19 | - scorecard-test 20 | - olm-bundle-validation 21 | image: quay.io/operator-framework/scorecard-test:v1.18.1 22 | labels: 23 | suite: olm 24 | test: olm-bundle-validation-test 25 | storage: 26 | spec: 27 | mountPath: {} 28 | - entrypoint: 29 | - scorecard-test 30 | - olm-crds-have-validation 31 | image: quay.io/operator-framework/scorecard-test:v1.18.1 32 | labels: 33 | suite: olm 34 | test: olm-crds-have-validation-test 35 | storage: 36 | spec: 37 | mountPath: {} 38 | - entrypoint: 39 | - scorecard-test 40 | - olm-crds-have-resources 41 | image: quay.io/operator-framework/scorecard-test:v1.18.1 42 | labels: 43 | suite: olm 44 | test: olm-crds-have-resources-test 45 | storage: 46 | spec: 47 | mountPath: {} 48 | - entrypoint: 49 | - scorecard-test 50 | - olm-spec-descriptors 51 | image: quay.io/operator-framework/scorecard-test:v1.18.1 52 | labels: 53 | suite: olm 54 | test: olm-spec-descriptors-test 55 | storage: 56 | spec: 57 | mountPath: {} 58 | - entrypoint: 59 | - scorecard-test 60 | - olm-status-descriptors 61 | image: quay.io/operator-framework/scorecard-test:v1.18.1 62 | labels: 63 | suite: olm 64 | test: olm-status-descriptors-test 65 | storage: 66 | spec: 67 | mountPath: {} 68 | storage: 69 | spec: 70 | mountPath: {} 71 | -------------------------------------------------------------------------------- /config/crd/bases/cluster.ipfs.io_circuitrelays.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.8.0 7 | creationTimestamp: null 8 | name: circuitrelays.cluster.ipfs.io 9 | spec: 10 | group: cluster.ipfs.io 11 | names: 12 | kind: CircuitRelay 13 | listKind: CircuitRelayList 14 | plural: circuitrelays 15 | singular: circuitrelay 16 | scope: Namespaced 17 | versions: 18 | - name: v1alpha1 19 | schema: 20 | openAPIV3Schema: 21 | description: CircuitRelay is the Schema for the circuitrelays API. 22 | properties: 23 | apiVersion: 24 | description: 'APIVersion defines the versioned schema of this representation 25 | of an object. Servers should convert recognized schemas to the latest 26 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 27 | type: string 28 | kind: 29 | description: 'Kind is a string value representing the REST resource this 30 | object represents. Servers may infer this from the endpoint the client 31 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 32 | type: string 33 | metadata: 34 | type: object 35 | spec: 36 | description: CircuitRelaySpec Defines a specification for the RelayCircuit 37 | launched by Kubernetes. 38 | properties: 39 | swarmKeyRef: 40 | description: SwarmKeyRef points to a multicodec-encoded v1 PSK stored 41 | within a secret somewhere. 42 | properties: 43 | keyName: 44 | type: string 45 | secretName: 46 | type: string 47 | required: 48 | - keyName 49 | - secretName 50 | type: object 51 | type: object 52 | status: 53 | properties: 54 | addrInfo: 55 | description: This is intended to mimic peer.AddrInfo. 56 | properties: 57 | addrs: 58 | items: 59 | type: string 60 | type: array 61 | id: 62 | type: string 63 | required: 64 | - addrs 65 | - id 66 | type: object 67 | required: 68 | - addrInfo 69 | type: object 70 | type: object 71 | served: true 72 | storage: true 73 | subresources: 74 | status: {} 75 | status: 76 | acceptedNames: 77 | kind: "" 78 | plural: "" 79 | conditions: [] 80 | storedVersions: [] 81 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/cluster.ipfs.io_circuitrelays.yaml 6 | - bases/cluster.ipfs.io_ipfsclusters.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patchesStrategicMerge: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_ipfs.yaml 13 | #- patches/webhook_in_circuitrelays.yaml 14 | #- patches/webhook_in_ipfsclusters.yaml 15 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 16 | 17 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 18 | # patches here are for enabling the CA injection for each CRD 19 | #- patches/cainjection_in_ipfs.yaml 20 | #- patches/cainjection_in_circuitrelays.yaml 21 | #- patches/cainjection_in_ipfsclusters.yaml 22 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 23 | 24 | # the following config is for teaching kustomize how to do kustomization for CRDs. 25 | configurations: 26 | - kustomizeconfig.yaml 27 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_circuitrelays.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: circuitrelays.cluster.ipfs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_ipfs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: ipfs.cluster.ipfs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_ipfsclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: ipfsclusters.cluster.ipfs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_circuitrelays.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: circuitrelays.cluster.ipfs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_ipfs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ipfs.cluster.ipfs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_ipfsclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ipfsclusters.cluster.ipfs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: ipfs-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: ipfs-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | # objref: 51 | # kind: Certificate 52 | # group: cert-manager.io 53 | # version: v1 54 | # name: serving-cert # this name should match the one in certificate.yaml 55 | # fieldref: 56 | # fieldpath: metadata.namespace 57 | #- name: CERTIFICATE_NAME 58 | # objref: 59 | # kind: Certificate 60 | # group: cert-manager.io 61 | # version: v1 62 | # name: serving-cert # this name should match the one in certificate.yaml 63 | #- name: SERVICE_NAMESPACE # namespace of the service 64 | # objref: 65 | # kind: Service 66 | # version: v1 67 | # name: webhook-service 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: SERVICE_NAME 71 | # objref: 72 | # kind: Service 73 | # version: v1 74 | # name: webhook-service 75 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=0" 19 | ports: 20 | - containerPort: 8443 21 | protocol: TCP 22 | name: https 23 | resources: 24 | limits: 25 | cpu: 500m 26 | memory: 128Mi 27 | requests: 28 | cpu: 5m 29 | memory: 64Mi 30 | - name: manager 31 | args: 32 | - "--health-probe-bind-address=:8081" 33 | - "--metrics-bind-address=127.0.0.1:8080" 34 | - "--leader-elect" 35 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: 658003f6.ipfs.io 12 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: quay.io/redhat-et-ipfs/ipfs-operator 16 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | annotations: 23 | kubectl.kubernetes.io/default-container: manager 24 | labels: 25 | control-plane: controller-manager 26 | spec: 27 | securityContext: 28 | runAsNonRoot: true 29 | containers: 30 | - command: 31 | - /manager 32 | args: 33 | - --leader-elect 34 | imagePullPolicy: IfNotPresent 35 | image: controller:v0.0.1 36 | name: manager 37 | securityContext: 38 | allowPrivilegeEscalation: false 39 | livenessProbe: 40 | httpGet: 41 | path: /healthz 42 | port: 8081 43 | initialDelaySeconds: 15 44 | periodSeconds: 20 45 | readinessProbe: 46 | httpGet: 47 | path: /readyz 48 | port: 8081 49 | initialDelaySeconds: 5 50 | periodSeconds: 10 51 | # TODO(user): Configure the resources accordingly based on the project requirements. 52 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 53 | resources: 54 | limits: 55 | cpu: 100m 56 | memory: 300Mi 57 | requests: 58 | cpu: 100m 59 | memory: 20Mi 60 | serviceAccountName: controller-manager 61 | terminationGracePeriodSeconds: 10 62 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # These resources constitute the fully configured set of manifests 2 | # used to generate the 'manifests/' directory in a bundle. 3 | resources: 4 | - bases/ipfs-operator.clusterserviceversion.yaml 5 | - ../default 6 | - ../samples 7 | - ../scorecard 8 | 9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. 10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. 11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount. 12 | #patchesJson6902: 13 | #- target: 14 | # group: apps 15 | # version: v1 16 | # kind: Deployment 17 | # name: controller-manager 18 | # namespace: system 19 | # patch: |- 20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. 21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. 22 | # - op: remove 23 | # path: /spec/template/spec/containers/1/volumeMounts/0 24 | # # Remove the "cert" volume, since OLM will create and mount a set of certs. 25 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment. 26 | # - op: remove 27 | # path: /spec/template/spec/volumes/0 28 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | protocol: TCP 13 | targetPort: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /config/rbac/circuitrelay_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit circuitrelays. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: circuitrelay-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - circuitrelays 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ipfs.io 21 | resources: 22 | - circuitrelays/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/circuitrelay_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view circuitrelays. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: circuitrelay-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - circuitrelays 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ipfs.io 17 | resources: 18 | - circuitrelays/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/ipfs_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit ipfs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ipfs-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - ipfs 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ipfs.io 21 | resources: 22 | - ipfs/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/ipfs_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view ipfs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ipfs-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - ipfs 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ipfs.io 17 | resources: 18 | - ipfs/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/ipfscluster_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit ipfsclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ipfscluster-editor-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - ipfsclusters 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - cluster.ipfs.io 21 | resources: 22 | - ipfsclusters/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/ipfscluster_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view ipfsclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: ipfscluster-viewer-role 6 | rules: 7 | - apiGroups: 8 | - cluster.ipfs.io 9 | resources: 10 | - ipfsclusters 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - cluster.ipfs.io 17 | resources: 18 | - ipfsclusters/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | creationTimestamp: null 6 | name: manager-role 7 | rules: 8 | - apiGroups: 9 | - '*' 10 | resources: 11 | - '*' 12 | verbs: 13 | - get 14 | - list 15 | - apiGroups: 16 | - apps 17 | resources: 18 | - deployments 19 | verbs: 20 | - create 21 | - delete 22 | - get 23 | - list 24 | - patch 25 | - update 26 | - watch 27 | - apiGroups: 28 | - apps 29 | resources: 30 | - statefulsets 31 | verbs: 32 | - create 33 | - delete 34 | - get 35 | - list 36 | - patch 37 | - update 38 | - watch 39 | - apiGroups: 40 | - cluster.ipfs.io 41 | resources: 42 | - circuitrelays 43 | verbs: 44 | - create 45 | - delete 46 | - get 47 | - list 48 | - patch 49 | - update 50 | - watch 51 | - apiGroups: 52 | - cluster.ipfs.io 53 | resources: 54 | - circuitrelays/finalizers 55 | verbs: 56 | - update 57 | - apiGroups: 58 | - cluster.ipfs.io 59 | resources: 60 | - circuitrelays/status 61 | verbs: 62 | - get 63 | - patch 64 | - update 65 | - apiGroups: 66 | - cluster.ipfs.io 67 | resources: 68 | - ipfsclusters 69 | verbs: 70 | - create 71 | - delete 72 | - get 73 | - list 74 | - patch 75 | - update 76 | - watch 77 | - apiGroups: 78 | - cluster.ipfs.io 79 | resources: 80 | - ipfsclusters/finalizers 81 | verbs: 82 | - update 83 | - apiGroups: 84 | - cluster.ipfs.io 85 | resources: 86 | - ipfsclusters/status 87 | verbs: 88 | - get 89 | - patch 90 | - update 91 | - apiGroups: 92 | - "" 93 | resources: 94 | - configmaps 95 | verbs: 96 | - create 97 | - delete 98 | - get 99 | - list 100 | - patch 101 | - update 102 | - watch 103 | - apiGroups: 104 | - "" 105 | resources: 106 | - persistentvolumeclaims 107 | verbs: 108 | - create 109 | - delete 110 | - get 111 | - list 112 | - patch 113 | - update 114 | - watch 115 | - apiGroups: 116 | - "" 117 | resources: 118 | - secrets 119 | verbs: 120 | - create 121 | - delete 122 | - get 123 | - list 124 | - patch 125 | - update 126 | - watch 127 | - apiGroups: 128 | - "" 129 | resources: 130 | - serviceaccounts 131 | verbs: 132 | - create 133 | - delete 134 | - get 135 | - list 136 | - patch 137 | - update 138 | - watch 139 | - apiGroups: 140 | - "" 141 | resources: 142 | - services 143 | verbs: 144 | - create 145 | - delete 146 | - get 147 | - list 148 | - patch 149 | - update 150 | - watch 151 | - apiGroups: 152 | - networking.k8s.io 153 | resources: 154 | - ingresses 155 | verbs: 156 | - create 157 | - delete 158 | - get 159 | - list 160 | - patch 161 | - update 162 | - watch 163 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/samples/cluster_v1alpha1_circuitrelay.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.ipfs.io/v1alpha1 2 | kind: CircuitRelay 3 | metadata: 4 | name: circuitrelay-sample 5 | spec: 6 | # TODO(user): Add fields here 7 | -------------------------------------------------------------------------------- /config/samples/cluster_v1alpha1_ipfscluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.ipfs.io/v1alpha1 2 | kind: IpfsCluster 3 | metadata: 4 | name: ipfscluster-sample 5 | spec: 6 | # TODO(user): Add fields here 7 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - cluster_v1alpha1_circuitrelay.yaml 4 | - cluster_v1alpha1_ipfscluster.yaml 5 | #+kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | #+kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.18.1 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.18.1 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.18.1 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.18.1 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.18.1 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.18.1 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /controllers/circuitrelay.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 8 | corev1 "k8s.io/api/core/v1" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | "sigs.k8s.io/controller-runtime/pkg/log" 12 | ctrllog "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | func (r *IpfsClusterReconciler) EnsureCircuitRelay( 16 | ctx context.Context, 17 | m *clusterv1alpha1.IpfsCluster, 18 | secret *corev1.Secret, 19 | ) (err error) { 20 | log := ctrllog.FromContext(ctx) 21 | if err = r.createCircuitRelays(ctx, m, secret); err != nil { 22 | return fmt.Errorf("cannot create circuit relays: %w", err) 23 | } 24 | // Check the status of circuit relays. 25 | // wait for them to complte so we can determine announce addresses. 26 | for _, relayName := range m.Status.CircuitRelays { 27 | relay := clusterv1alpha1.CircuitRelay{} 28 | relay.Name = relayName 29 | relay.Namespace = m.Namespace 30 | if err = r.Client.Get(ctx, client.ObjectKeyFromObject(&relay), &relay); err != nil { 31 | return fmt.Errorf("could not lookup circuitRelay %q: %w", relayName, err) 32 | } 33 | if relay.Status.AddrInfo.ID == "" { 34 | log.Info("relay is not ready yet. Will continue waiting.", "relay", relayName) 35 | return fmt.Errorf("relay is not ready yet") 36 | } 37 | } 38 | if err = r.Status().Update(ctx, m); err != nil { 39 | return err 40 | } 41 | return nil 42 | } 43 | 44 | // createCircuitRelays Creates the necessary amount of circuit relays if any are missing. 45 | // FIXME: if we change the number of CircuitRelays, we should update 46 | // the IPFS config file as well. 47 | func (r *IpfsClusterReconciler) createCircuitRelays( 48 | ctx context.Context, 49 | instance *clusterv1alpha1.IpfsCluster, 50 | secret *corev1.Secret, 51 | ) error { 52 | logger := log.FromContext(ctx, "context", "createCircuitRelays", "instance", instance) 53 | // do nothing 54 | if len(instance.Status.CircuitRelays) >= int(instance.Spec.Networking.CircuitRelays) { 55 | logger.Info("we have enough circuitRelays, skipping creation") 56 | // FIXME: handle scale-down of circuit relays 57 | return nil 58 | } 59 | logger.Info("creating more circuitRelays") 60 | // create the CircuitRelays 61 | for i := 0; int32(i) < instance.Spec.Networking.CircuitRelays; i++ { 62 | name := fmt.Sprintf("%s-%d", instance.Name, i) 63 | relay := clusterv1alpha1.CircuitRelay{} 64 | relay.Name = name 65 | relay.Namespace = instance.Namespace 66 | // include the private swarm key, if one is being provided 67 | if secret != nil { 68 | relay.Spec.SwarmKeyRef = &clusterv1alpha1.KeyRef{ 69 | KeyName: KeySwarmKey, 70 | SecretName: secret.Name, 71 | } 72 | } 73 | if err := ctrl.SetControllerReference(instance, &relay, r.Scheme); err != nil { 74 | return fmt.Errorf( 75 | "cannot set controller reference for new circuitRelay: %w, circuitRelay: %s", 76 | err, relay.Name, 77 | ) 78 | } 79 | if err := r.Create(ctx, &relay); err != nil { 80 | return fmt.Errorf("cannot create new circuitRelay: %w", err) 81 | } 82 | instance.Status.CircuitRelays = append(instance.Status.CircuitRelays, relay.Name) 83 | } 84 | if err := r.Status().Update(ctx, instance); err != nil { 85 | return err 86 | } 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /controllers/configmap.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | 12 | "github.com/alecthomas/units" 13 | "github.com/ipfs/kubo/config" 14 | "github.com/libp2p/go-libp2p/core/peer" 15 | ma "github.com/multiformats/go-multiaddr" 16 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 17 | "github.com/redhat-et/ipfs-operator/controllers/scripts" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ctrllog "sigs.k8s.io/controller-runtime/pkg/log" 20 | ) 21 | 22 | const ( 23 | // ScriptConfigureIPFS Defines the script run by the IPFS containers 24 | // in order to initialize their state. 25 | ScriptConfigureIPFS = "configure-ipfs.sh" 26 | // ScriptIPFSClusterEntryPoint Defines a shell script used as the entrypoint 27 | // for the IPFS Cluster container. 28 | ScriptIPFSClusterEntryPoint = "entrypoint.sh" 29 | ) 30 | 31 | // EnsureConfigMapScripts Returns a mutate function which loads the given configMap with scripts that 32 | // customize the startup of the IPFS containers depending on the values from the given IPFS cluster resource. 33 | func (r *IpfsClusterReconciler) EnsureConfigMapScripts( 34 | ctx context.Context, 35 | m *clusterv1alpha1.IpfsCluster, 36 | relayPeers []peer.AddrInfo, 37 | relayStatic []ma.Multiaddr, 38 | bootstrapPeers []string, 39 | ) (*corev1.ConfigMap, error) { 40 | var err error 41 | log := ctrllog.FromContext(ctx) 42 | cmName := "ipfs-cluster-scripts-" + m.Name 43 | cm := &corev1.ConfigMap{ 44 | ObjectMeta: metav1.ObjectMeta{ 45 | Name: cmName, 46 | Namespace: m.Namespace, 47 | }, 48 | } 49 | err = r.Get(ctx, client.ObjectKeyFromObject(cm), cm) 50 | if err != nil && !errors.IsNotFound(err) { 51 | return nil, fmt.Errorf("could not get configmap: %w", err) 52 | } 53 | 54 | op, err := ctrl.CreateOrUpdate(ctx, r.Client, cm, func() error { 55 | // TODO: compute these values in another function & place them here 56 | // convert multiaddrs to strings 57 | relayStaticStrs := make([]string, len(relayStatic)) 58 | for i, maddr := range relayStatic { 59 | relayStaticStrs[i] = maddr.String() 60 | } 61 | 62 | relayConfig := config.RelayClient{ 63 | Enabled: config.True, 64 | StaticRelays: relayStaticStrs, 65 | } 66 | 67 | // compute storage sizes of IPFS volumes 68 | sizei64, ok := m.Spec.IpfsStorage.AsInt64() 69 | if !ok { 70 | sizei64 = m.Spec.IpfsStorage.ToDec().Value() 71 | } 72 | maxStorage := MaxIPFSStorage(sizei64) 73 | maxStorageS := fmt.Sprintf("%dB", maxStorage) 74 | bloomFilterSize := scripts.CalculateBloomFilterSize(maxStorage) 75 | 76 | // reprovider settings 77 | reproviderStrategy := m.Spec.Reprovider.Strategy 78 | if reproviderStrategy == "" { 79 | reproviderStrategy = clusterv1alpha1.ReproviderStrategyAll 80 | } 81 | reproviderInterval := m.Spec.Reprovider.Interval 82 | if reproviderInterval == "" { 83 | reproviderInterval = "12h" 84 | } 85 | 86 | // get the config script 87 | configScript, internalErr := scripts.CreateConfigureScript( 88 | maxStorageS, 89 | relayPeers, 90 | relayConfig, 91 | bloomFilterSize, 92 | reproviderInterval, 93 | string(reproviderStrategy), 94 | bootstrapPeers, 95 | ) 96 | if internalErr != nil { 97 | return fmt.Errorf("could not create config script: %w", internalErr) 98 | } 99 | 100 | cm.Data = map[string]string{ 101 | ScriptIPFSClusterEntryPoint: scripts.IPFSClusterEntrypoint, 102 | ScriptConfigureIPFS: configScript, 103 | } 104 | if internalErr = ctrl.SetControllerReference(m, cm, r.Scheme); internalErr != nil { 105 | return fmt.Errorf("failed to set controller reference: %w", internalErr) 106 | } 107 | return nil 108 | }) 109 | if err != nil { 110 | log.Error(err, "failed to createorupdate configmap", "operation", op, "configmap", cm) 111 | return nil, fmt.Errorf("could not create or update configmap: %w", err) 112 | } 113 | log.Info("completed createorupdate configmap", "operation", op, "configMap", cm) 114 | return cm, nil 115 | } 116 | 117 | // staticAddrsFromRelayPeers Extracts all of the static addresses from the 118 | // given list of relayPeers. 119 | func staticAddrsFromRelayPeers(relayPeers []peer.AddrInfo) ([]ma.Multiaddr, error) { 120 | relayStatic := make([]ma.Multiaddr, 0) 121 | for _, addrInfo := range relayPeers { 122 | p2ppart, err := ma.NewMultiaddr("/p2p/" + addrInfo.ID.String()) 123 | if err != nil { 124 | return nil, fmt.Errorf("could not create p2p component: %w", err) 125 | } 126 | for _, addr := range addrInfo.Addrs { 127 | fullMa := addr.Encapsulate(p2ppart) 128 | relayStatic = append(relayStatic, fullMa) 129 | } 130 | } 131 | return relayStatic, nil 132 | } 133 | 134 | // getCircuitInfo Gets address info from the list of CircuitRelays 135 | // and returns a list of AddrInfo. 136 | func (r *IpfsClusterReconciler) getCircuitInfo( 137 | ctx context.Context, 138 | ipfs *clusterv1alpha1.IpfsCluster, 139 | ) ([]peer.AddrInfo, error) { 140 | log := ctrllog.FromContext(ctx) 141 | relayPeers := []peer.AddrInfo{} 142 | for _, relayName := range ipfs.Status.CircuitRelays { 143 | relay := clusterv1alpha1.CircuitRelay{} 144 | relay.Name = relayName 145 | relay.Namespace = ipfs.Namespace 146 | // OPTIMIZE: do this asynchronously? 147 | if err := r.Get(ctx, client.ObjectKeyFromObject(&relay), &relay); err != nil { 148 | return nil, fmt.Errorf("could not lookup circuitRelay: %w", err) 149 | } 150 | if err := relay.Status.AddrInfo.Parse(); err != nil { 151 | log.Error(err, "could not parse AddrInfo. Information will not be included in config", "relay", relayName) 152 | continue 153 | } 154 | addrInfo := relay.Status.AddrInfo.AddrInfo() 155 | relayPeers = append(relayPeers, *addrInfo) 156 | } 157 | return relayPeers, nil 158 | } 159 | 160 | // MaxIPSStorage Accepts a storage quantity and returns with a 161 | // calculated value to be used for setting the Max IPFS storage value 162 | // in bytes. 163 | func MaxIPFSStorage(ipfsStorage int64) (storageMaxGB int64) { 164 | var reducedSize units.Base2Bytes 165 | // if the disk is big, use a bigger percentage of it. 166 | if units.Base2Bytes(ipfsStorage) > units.Tebibyte*8 { 167 | reducedSize = units.Base2Bytes(ipfsStorage) * 9 / 10 168 | } else { 169 | reducedSize = units.Base2Bytes(ipfsStorage) * 8 / 10 170 | } 171 | return int64(reducedSize) 172 | } 173 | -------------------------------------------------------------------------------- /controllers/ipfs-cluster-operator.code-workspace: -------------------------------------------------------------------------------- 1 | { 2 | "folders": [ 3 | { 4 | "path": ".." 5 | }, 6 | { 7 | "path": "../../volsync" 8 | }, 9 | { 10 | "path": "../../../workdir/ipfs-cluster" 11 | } 12 | ], 13 | "settings": {} 14 | } -------------------------------------------------------------------------------- /controllers/ipfs_util.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/libp2p/go-libp2p/core/peer" 7 | ma "github.com/multiformats/go-multiaddr" 8 | "github.com/redhat-et/ipfs-operator/api/v1alpha1" 9 | ) 10 | 11 | // EnsureRelayCircuitInfo Returns information about the configured CircuitRelays. 12 | func (r *IpfsClusterReconciler) EnsureRelayCircuitInfo(ctx context.Context, ipfs *v1alpha1.IpfsCluster) ( 13 | relayPeers []peer.AddrInfo, relayStatic []ma.Multiaddr, err error) { 14 | if relayPeers, err = r.getCircuitInfo(ctx, ipfs); err != nil { 15 | return 16 | } 17 | if relayStatic, err = staticAddrsFromRelayPeers(relayPeers); err != nil { 18 | return 19 | } 20 | return 21 | } 22 | -------------------------------------------------------------------------------- /controllers/scripts/config_test.go: -------------------------------------------------------------------------------- 1 | package scripts_test 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | . "github.com/onsi/gomega" 6 | "github.com/redhat-et/ipfs-operator/controllers/scripts" 7 | ) 8 | 9 | var _ = Describe("Bloom Filter", func() { 10 | 11 | When("storage values are provided in decimal SI", func() { 12 | It("returns the correct value", func() { 13 | testCases := []struct { 14 | Input int64 15 | ExpectedBytes int64 16 | }{ 17 | { 18 | // N = 2 19 | Input: int64(2 * scripts.BloomBlockSize), 20 | ExpectedBytes: 4, 21 | }, 22 | { 23 | // N = 1, same size as block 24 | Input: int64(1 * scripts.BloomBlockSize), 25 | ExpectedBytes: 2, 26 | }, 27 | { 28 | // N = 57 29 | Input: int64(57 * scripts.BloomBlockSize), 30 | ExpectedBytes: 107, 31 | }, 32 | { 33 | // N = 0, invalid value 34 | Input: int64(0), 35 | ExpectedBytes: 0, 36 | }, 37 | { 38 | // N != 0, but N < 1 39 | Input: int64(1), 40 | ExpectedBytes: 0, 41 | }, 42 | { 43 | // Size just 1 Byte smaller than N = 1 44 | Input: int64(1*scripts.BloomBlockSize - 1), 45 | ExpectedBytes: 0, 46 | }, 47 | } 48 | for _, test := range testCases { 49 | output := scripts.CalculateBloomFilterSize(test.Input) 50 | Expect(output).To(Equal(test.ExpectedBytes)) 51 | } 52 | }) 53 | }) 54 | }) 55 | -------------------------------------------------------------------------------- /controllers/scripts/scripts_suite_test.go: -------------------------------------------------------------------------------- 1 | package scripts_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestScripts(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Scripts Suite") 13 | } 14 | -------------------------------------------------------------------------------- /controllers/secret.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | 15 | peer "github.com/libp2p/go-libp2p/core/peer" 16 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 17 | "github.com/redhat-et/ipfs-operator/controllers/utils" 18 | ) 19 | 20 | const ( 21 | KeyClusterSecret = "CLUSTER_SECRET" 22 | KeyBootstrapPeerPrivateKey = "BOOTSTRAP_PEER_PRIV_KEY" 23 | KeyBootstrapPeerID = "BOOTSTRAP_PEER_ID" 24 | KeySwarmKey = "SWARM_KEY" 25 | KeyPeerIDPrefix = "peerID-" 26 | KeyPrivateKeyPrefix = "privateKey-" 27 | ) 28 | 29 | func (r *IpfsClusterReconciler) EnsureSecretConfig( 30 | ctx context.Context, 31 | m *clusterv1alpha1.IpfsCluster, 32 | ) (*corev1.Secret, error) { 33 | secName := "ipfs-cluster-" + m.Name 34 | 35 | expectedSecret := &corev1.Secret{ 36 | ObjectMeta: metav1.ObjectMeta{ 37 | Name: secName, 38 | Namespace: m.Namespace, 39 | }, 40 | } 41 | // find secret 42 | err := r.Get(ctx, client.ObjectKeyFromObject(expectedSecret), expectedSecret) 43 | if err != nil { 44 | // test for unhandled errors 45 | if !errors.IsNotFound(err) { 46 | return nil, fmt.Errorf("could not get secret: %w", err) 47 | } 48 | err = r.createNewSecret(ctx, m, expectedSecret) 49 | if err != nil { 50 | return nil, fmt.Errorf("could not create new secret: %w", err) 51 | } 52 | return expectedSecret, nil 53 | } 54 | // secret exists. 55 | // test if we need to add more identities 56 | op, err := ctrl.CreateOrUpdate(ctx, r.Client, expectedSecret, func() error { 57 | numIdentities := countIdentities(expectedSecret) 58 | if numIdentities != m.Spec.Replicas { 59 | // create more identities if needed, otherwise they will be reused 60 | // when scaling down and then up again 61 | if numIdentities < m.Spec.Replicas { 62 | // create more 63 | err = generateNewIdentities(expectedSecret, numIdentities, m.Spec.Replicas) 64 | if err != nil { 65 | return fmt.Errorf("could not generate more identities: %w", err) 66 | } 67 | } 68 | } 69 | if ctrlErr := ctrl.SetControllerReference(m, expectedSecret, r.Scheme); ctrlErr != nil { 70 | return ctrlErr 71 | } 72 | return nil 73 | }) 74 | fmt.Printf("completed operation %q on secret\n", op) 75 | if err != nil { 76 | fmt.Printf("could not update secret on operation: %q\n", op) 77 | return nil, fmt.Errorf("could not update secret: %w", err) 78 | } 79 | fmt.Printf("succeeded updating secret on operation %q\n", op) 80 | return expectedSecret, nil 81 | } 82 | 83 | // createNewSecret Attempts to create an entirely new secret containing information relevant to IPFS Cluster. 84 | func (r *IpfsClusterReconciler) createNewSecret(ctx context.Context, m *clusterv1alpha1.IpfsCluster, 85 | secret *corev1.Secret) (err error) { 86 | // secret is not found. 87 | // initialize new secret 88 | secret.Data = make(map[string][]byte, 0) 89 | var clusterSecret, bootstrapPrivateKey string 90 | var swarmKey string 91 | var peerID peer.ID 92 | 93 | // save data in secret 94 | if clusterSecret, err = utils.NewClusterSecret(); err != nil { 95 | return fmt.Errorf("could not create cluster secret: %w", err) 96 | } 97 | if peerID, bootstrapPrivateKey, err = utils.GenerateIdentity(); err != nil { 98 | return fmt.Errorf("could not create new ipfs identity: %w", err) 99 | } 100 | err = generateNewIdentities(secret, 0, m.Spec.Replicas) 101 | if err != nil { 102 | return fmt.Errorf("could not place new identities in secret: %w", err) 103 | } 104 | if swarmKey, err = utils.NewSwarmKey(); err != nil { 105 | return fmt.Errorf("could not create swarm key: %w", err) 106 | } 107 | 108 | secret.Data[KeyClusterSecret] = []byte(clusterSecret) 109 | secret.Data[KeyBootstrapPeerPrivateKey] = []byte(bootstrapPrivateKey) 110 | secret.StringData[KeySwarmKey] = swarmKey 111 | secret.StringData[KeyBootstrapPeerID] = peerID.String() 112 | 113 | // ensure reference is set 114 | if err = ctrl.SetControllerReference(m, secret, r.Scheme); err != nil { 115 | return err 116 | } 117 | err = r.Create(ctx, secret) 118 | return 119 | } 120 | 121 | // countIdentities Counts the amount of unique peer identities present in the secret. 122 | func countIdentities(secret *corev1.Secret) int32 { 123 | var count int32 124 | for key := range secret.Data { 125 | if strings.Contains(key, KeyPeerIDPrefix) { 126 | count++ 127 | } 128 | } 129 | return count 130 | } 131 | 132 | // generateNewIdentities Populates the secret data with new Peer IDs 133 | // and private keys which are mapped based on the replica number. 134 | func generateNewIdentities(secret *corev1.Secret, start, n int32) error { 135 | if secret.StringData == nil { 136 | secret.StringData = make(map[string]string, 0) 137 | } 138 | for i := start; i < n; i++ { 139 | // generate new private key & peer id 140 | peerID, privKey, err := utils.GenerateIdentity() 141 | if err != nil { 142 | return err 143 | } 144 | peerIDKey := KeyPeerIDPrefix + strconv.Itoa(int(i)) 145 | secret.StringData[peerIDKey] = peerID.String() 146 | secretKey := KeyPrivateKeyPrefix + strconv.Itoa(int(i)) 147 | secret.StringData[secretKey] = privKey 148 | } 149 | return nil 150 | } 151 | -------------------------------------------------------------------------------- /controllers/service.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/util/intstr" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | 12 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 13 | "sigs.k8s.io/controller-runtime/pkg/log" 14 | ) 15 | 16 | // ensureServiceCluster Returns the existing IPFS cluster service object or an error. 17 | func (r *IpfsClusterReconciler) ensureServiceCluster( 18 | ctx context.Context, 19 | m *clusterv1alpha1.IpfsCluster, 20 | ) (*corev1.Service, error) { 21 | logger := log.FromContext(ctx) 22 | svcName := "ipfs-cluster-" + m.Name 23 | svc := &corev1.Service{ 24 | ObjectMeta: metav1.ObjectMeta{ 25 | Name: svcName, 26 | Namespace: m.Namespace, 27 | // TODO: annotations for external dns 28 | }, 29 | } 30 | 31 | logger.Info("creating or updating svc") 32 | op, err := ctrl.CreateOrUpdate(ctx, r.Client, svc, func() error { 33 | svc.Spec = corev1.ServiceSpec{} 34 | svc.Spec.Ports = []corev1.ServicePort{ 35 | { 36 | Name: "swarm", 37 | Protocol: corev1.ProtocolTCP, 38 | Port: portSwarm, 39 | TargetPort: intstr.FromString("swarm"), 40 | }, 41 | { 42 | Name: "swarm-udp", 43 | Protocol: corev1.ProtocolUDP, 44 | Port: portSwarmUDP, 45 | TargetPort: intstr.FromString("swarm-udp"), 46 | }, 47 | { 48 | Name: "ws", 49 | Protocol: corev1.ProtocolTCP, 50 | Port: portWS, 51 | TargetPort: intstr.FromString("ws"), 52 | }, 53 | { 54 | Name: "http", 55 | Protocol: corev1.ProtocolTCP, 56 | Port: portHTTP, 57 | TargetPort: intstr.FromString("http"), 58 | }, 59 | { 60 | Name: "api-http", 61 | Protocol: corev1.ProtocolTCP, 62 | Port: portAPIHTTP, 63 | TargetPort: intstr.FromString("api-http"), 64 | }, 65 | { 66 | Name: "proxy-http", 67 | Protocol: corev1.ProtocolTCP, 68 | Port: portProxyHTTP, 69 | TargetPort: intstr.FromString("proxy-http"), 70 | }, 71 | { 72 | Name: "cluster-swarm", 73 | Protocol: corev1.ProtocolTCP, 74 | Port: portClusterSwarm, 75 | TargetPort: intstr.FromString("cluster-swarm"), 76 | }, 77 | } 78 | svc.Spec.Selector = map[string]string{ 79 | "app.kubernetes.io/name": "ipfs-cluster-" + m.Name, 80 | } 81 | if err := ctrl.SetControllerReference(m, svc, r.Scheme); err != nil { 82 | return err 83 | } 84 | return nil 85 | }) 86 | if err != nil { 87 | logger.Error(err, "failed on operation", "operation", op) 88 | return nil, fmt.Errorf("failed to create service: %w", err) 89 | } 90 | logger.Info("completed operation", "operation", op) 91 | return svc, nil 92 | } 93 | -------------------------------------------------------------------------------- /controllers/serviceaccount.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | 6 | "sigs.k8s.io/controller-runtime/pkg/log" 7 | 8 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 13 | ) 14 | 15 | func (r *IpfsClusterReconciler) ensureSA(ctx context.Context, m *clusterv1alpha1.IpfsCluster) (*corev1.ServiceAccount, 16 | error) { 17 | logger := log.FromContext(ctx) 18 | logger.Info("ensuring service account") 19 | // Define a new Service Account object 20 | sa := &corev1.ServiceAccount{ 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Name: "ipfs-cluster-" + m.Name, 23 | Namespace: m.Namespace, 24 | }, 25 | } 26 | res, err := ctrlutil.CreateOrUpdate(ctx, r.Client, sa, func() error { 27 | if err := ctrl.SetControllerReference(m, sa, r.Scheme); err != nil { 28 | return err 29 | } 30 | return nil 31 | }) 32 | if err != nil { 33 | logger.Error(err, "failed to create serviceaccount") 34 | return nil, err 35 | } 36 | logger.Info("created serviceaccount", "result", res) 37 | return sa, nil 38 | } 39 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers_test 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | "k8s.io/client-go/kubernetes/scheme" 26 | "sigs.k8s.io/controller-runtime/pkg/client" 27 | "sigs.k8s.io/controller-runtime/pkg/envtest" 28 | logf "sigs.k8s.io/controller-runtime/pkg/log" 29 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 30 | 31 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 32 | //+kubebuilder:scaffold:imports 33 | ) 34 | 35 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 36 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 37 | 38 | var k8sClient client.Client 39 | var testEnv *envtest.Environment 40 | 41 | func TestAPIs(t *testing.T) { 42 | RegisterFailHandler(Fail) 43 | 44 | RunSpecs(t, "Controller Suite") 45 | } 46 | 47 | var _ = BeforeSuite(func() { 48 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 49 | 50 | By("bootstrapping test environment") 51 | testEnv = &envtest.Environment{ 52 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 53 | ErrorIfCRDPathMissing: true, 54 | } 55 | 56 | cfg, err := testEnv.Start() 57 | Expect(err).NotTo(HaveOccurred()) 58 | Expect(cfg).NotTo(BeNil()) 59 | 60 | err = clusterv1alpha1.AddToScheme(scheme.Scheme) 61 | Expect(err).NotTo(HaveOccurred()) 62 | 63 | //+kubebuilder:scaffold:scheme 64 | 65 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 66 | Expect(err).NotTo(HaveOccurred()) 67 | Expect(k8sClient).NotTo(BeNil()) 68 | }) 69 | 70 | var _ = AfterSuite(func() { 71 | By("tearing down the test environment") 72 | err := testEnv.Stop() 73 | Expect(err).NotTo(HaveOccurred()) 74 | }) 75 | -------------------------------------------------------------------------------- /controllers/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "encoding/base64" 7 | "encoding/hex" 8 | "fmt" 9 | 10 | "github.com/alecthomas/units" 11 | "github.com/go-logr/logr" 12 | 13 | ci "github.com/libp2p/go-libp2p/core/crypto" 14 | 15 | peer "github.com/libp2p/go-libp2p/core/peer" 16 | corev1 "k8s.io/api/core/v1" 17 | "k8s.io/apimachinery/pkg/api/resource" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 20 | ) 21 | 22 | // CreateOrPatchTrackedObjects Goes through the map of tracked objects and attempts to 23 | // apply the ctrl.createOrPatch function to each one. This function will return a 24 | // boolean indicating whether or not the requeue should be set to true. 25 | func CreateOrPatchTrackedObjects( 26 | ctx context.Context, 27 | trackedObjects map[client.Object]controllerutil.MutateFn, 28 | client client.Client, 29 | log logr.Logger, 30 | ) bool { 31 | var requeue bool 32 | var err error 33 | for obj, mut := range trackedObjects { 34 | var result controllerutil.OperationResult 35 | kind := obj.GetObjectKind().GroupVersionKind() 36 | name := obj.GetName() 37 | result, err = controllerutil.CreateOrPatch(ctx, client, obj, mut) 38 | if err != nil { 39 | log.Error(err, "error creating object", "objname", name, "objKind", kind.Kind, "result", result) 40 | requeue = true 41 | } else { 42 | log.Info("object changed", "objName", name, "objKind", kind.Kind, "result", result) 43 | } 44 | } 45 | return requeue 46 | } 47 | 48 | // ErrFunc Returns a function which returns the provided error when called. 49 | func ErrFunc(err error) controllerutil.MutateFn { 50 | return func() error { 51 | return err 52 | } 53 | } 54 | 55 | // IPFSContainerResources Returns the resource requests/requirements for running a single IPFS Container 56 | // depending on the storage requested by the user. 57 | func IPFSContainerResources(ipfsStorageBytes int64) (ipfsResources corev1.ResourceRequirements) { 58 | // Determine resource constraints from how much we are storing. 59 | // for every TB of storage, Request 1GB of memory and limit if we exceed 2x this amount. 60 | // memory floor is 2G. 61 | // The CPU requirement starts at 4 cores and increases by 500m for every TB of storage 62 | // many block storage providers have a maximum block storage of 16TB, so in this case, the 63 | // biggest node we would allocate would request a minimum allocation of 16G of RAM and 12 cores 64 | // and would permit usage up to twice this size 65 | 66 | ipfsStorageTB := ipfsStorageBytes / int64(units.Tebibyte) 67 | ipfsMilliCoresMin := 250 + (500 * ipfsStorageTB) 68 | ipfsRAMGBMin := ipfsStorageTB 69 | if ipfsRAMGBMin < 2 { 70 | ipfsRAMGBMin = 1 71 | } 72 | 73 | ipfsRAMMinQuantity := resource.NewScaledQuantity(ipfsRAMGBMin, resource.Giga) 74 | ipfsRAMMaxQuantity := resource.NewScaledQuantity(2*ipfsRAMGBMin, resource.Giga) 75 | ipfsCoresMinQuantity := resource.NewScaledQuantity(ipfsMilliCoresMin, resource.Milli) 76 | ipfsCoresMaxQuantity := resource.NewScaledQuantity(2*ipfsMilliCoresMin, resource.Milli) 77 | 78 | ipfsResources = corev1.ResourceRequirements{ 79 | Requests: corev1.ResourceList{ 80 | corev1.ResourceMemory: *ipfsRAMMinQuantity, 81 | corev1.ResourceCPU: *ipfsCoresMinQuantity, 82 | }, 83 | Limits: corev1.ResourceList{ 84 | corev1.ResourceMemory: *ipfsRAMMaxQuantity, 85 | corev1.ResourceCPU: *ipfsCoresMaxQuantity, 86 | }, 87 | } 88 | return 89 | } 90 | 91 | // randomKey Returns a cryptographically-secure generated key. 92 | func randomKey(len int) (buf []byte, err error) { 93 | buf = make([]byte, len) 94 | if _, err = rand.Read(buf); err != nil { 95 | return nil, err 96 | } 97 | return buf, nil 98 | } 99 | 100 | // NewClusterSecret Returns a new IPFS Cluster secret. 101 | func NewClusterSecret() (string, error) { 102 | buf, err := randomKey(32) 103 | if err != nil { 104 | return "", err 105 | } 106 | return hex.EncodeToString(buf), nil 107 | } 108 | 109 | // NewSwarmKey Generates and returns a key used for hosting a private swarm. 110 | func NewSwarmKey() (string, error) { 111 | const swarmPrefix = "/key/swarm/psk/1.0.0" 112 | const multiBase = "/base16/" 113 | buf, err := randomKey(32) 114 | if err != nil { 115 | return "", err 116 | } 117 | key := hex.EncodeToString(buf) 118 | swarmKey := fmt.Sprintf("%s\n%s\n%s", swarmPrefix, multiBase, key) 119 | return swarmKey, nil 120 | } 121 | 122 | // NewKey Generates a new private key and returns that along with the identity. 123 | func NewKey() (ci.PrivKey, peer.ID, error) { 124 | const edDSAKeyLen = 4096 125 | priv, pub, err := ci.GenerateKeyPair(ci.Ed25519, edDSAKeyLen) 126 | if err != nil { 127 | return nil, "", err 128 | } 129 | peerid, err := peer.IDFromPublicKey(pub) 130 | if err != nil { 131 | return nil, "", err 132 | } 133 | return priv, peerid, nil 134 | } 135 | 136 | // GenerateIdentity Generates a new key and returns the peer ID and private key 137 | // encoded as a base64 string using standard encoding, or an error if the key could not be generated. 138 | func GenerateIdentity() (peerid peer.ID, privStr string, err error) { 139 | var privateKey ci.PrivKey 140 | privateKey, peerid, err = NewKey() 141 | if err != nil { 142 | return "", "", fmt.Errorf("cannot generate new key: %w", err) 143 | } 144 | privBytes, err := ci.MarshalPrivateKey(privateKey) 145 | if err != nil { 146 | return "", "", fmt.Errorf("cannot get bytes from private key: %w", err) 147 | } 148 | privStr = base64.StdEncoding.EncodeToString(privBytes) 149 | return 150 | } 151 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/build/doctrees/getting_started.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/doctrees/getting_started.doctree -------------------------------------------------------------------------------- /docs/build/doctrees/hacking.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/doctrees/hacking.doctree -------------------------------------------------------------------------------- /docs/build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/build/doctrees/your_first_cluster.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/doctrees/your_first_cluster.doctree -------------------------------------------------------------------------------- /docs/build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 2723b11e89b02bc2c195e1cc24e85eee 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/build/html/_sources/getting_started.rst.txt: -------------------------------------------------------------------------------- 1 | Getting Started 2 | =================================== 3 | 4 | This document explains methods that can be used to install the operator onto an existing kubernetes cluster. 5 | 6 | No matter which method you choose, the same operator will be installed. 7 | 8 | **With helm** 9 | :: 10 | helm install ipfs-operator ./helm/ipfs-operator 11 | 12 | **manually** 13 | :: 14 | make install 15 | 16 | 17 | 18 | Confirm that the operator is installed. 19 | 20 | When the operator is installed, a new namespace will be created. Verify the operator is running in the `ipfs-operator` namespace. 21 | 22 | Once the operator is installed, you can proceed with installing your first cluster. 23 | -------------------------------------------------------------------------------- /docs/build/html/_sources/hacking.rst.txt: -------------------------------------------------------------------------------- 1 | Hacking 2 | =================================== 3 | 4 | Have you have found a bug in the operator or do you have an additional feature? That's great! Here are some tips. 5 | 6 | 7 | Running your own images. 8 | 9 | If you have edited the code, you would like to install it in your own cluster, you will first need an account on an image host like dockerhub. Once you have this, you can build, push, and install the image using the **IMG** environment variable 10 | :: 11 | export IMG=your-image-repo/ipfs-operator:version 12 | make docker-build 13 | make docker-push 14 | make install 15 | 16 | 17 | Creating a pull request 18 | 19 | Pull requests are welcome and encouraged. Please make pull reqeusts against https://github.com/redhat-et/ipfs-operator. 20 | -------------------------------------------------------------------------------- /docs/build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | IPFS Operator 2 | =================================== 3 | 4 | **IPFS Operator** is a `Kubernetes Operator ` 5 | designed to assist with running `IPFS Cluster ` 6 | 7 | IPFS cluster is used as the storage backend for many important and interesting projects. Some of the community projects 8 | hosted using IPFS cluster are listed `here `. The largest known clusters are host to tens of millions of pins and host nearly 1PB of objects. 9 | 10 | The IPFS Operator brings this technology to kubernetes. This operator enables kubernetes users to create clusters consisting of hundreds of peers quickly and easily. See _getting_started to begin. 11 | 12 | .. note:: 13 | 14 | This project is under active development. 15 | 16 | Contents 17 | -------- 18 | 19 | .. toctree:: 20 | 21 | getting_started 22 | your_first_cluster 23 | hacking 24 | -------------------------------------------------------------------------------- /docs/build/html/_sources/your_first_cluster.rst.txt: -------------------------------------------------------------------------------- 1 | Your First Cluster 2 | =================================== 3 | 4 | Create a cluster, we need to create a cluster using the ipfs operator CRD. 5 | 6 | Create a file with the following information 7 | 8 | .. code-block:: yaml 9 | 10 | apiVersion: cluster.ipfs.io/v1alpha1 11 | kind: Ipfs 12 | metadata: 13 | name: ipfs-sample-1 14 | spec: 15 | url: apps.example.com 16 | ipfsStorage: 2Gi 17 | clusterStorage: 2Gi 18 | replicas: 5 19 | public: true 20 | 21 | 22 | Adjust the storage requirements to meet your needs. 23 | 24 | Once you have made the necessary adjustments, apply it to your cluster with kubectl 25 | .. code-block::bash 26 | 27 | kubectl create namespace my_cluster 28 | kubectl -n my_cluster apply -f ipfs.yaml 29 | 30 | Verify that the cluster has started by viewing the status of the cluster. 31 | .. code-block::bash 32 | 33 | kubectl -n my_namespace status ipfs-sample-1 34 | -------------------------------------------------------------------------------- /docs/build/html/_static/_sphinx_javascript_frameworks_compat.js: -------------------------------------------------------------------------------- 1 | /* 2 | * _sphinx_javascript_frameworks_compat.js 3 | * ~~~~~~~~~~ 4 | * 5 | * Compatability shim for jQuery and underscores.js. 6 | * 7 | * WILL BE REMOVED IN Sphinx 6.0 8 | * xref RemovedInSphinx60Warning 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | 18 | /** 19 | * small helper function to urldecode strings 20 | * 21 | * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL 22 | */ 23 | jQuery.urldecode = function(x) { 24 | if (!x) { 25 | return x 26 | } 27 | return decodeURIComponent(x.replace(/\+/g, ' ')); 28 | }; 29 | 30 | /** 31 | * small helper function to urlencode strings 32 | */ 33 | jQuery.urlencode = encodeURIComponent; 34 | 35 | /** 36 | * This function returns the parsed url parameters of the 37 | * current request. Multiple values per key are supported, 38 | * it will always return arrays of strings for the value parts. 39 | */ 40 | jQuery.getQueryParameters = function(s) { 41 | if (typeof s === 'undefined') 42 | s = document.location.search; 43 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 44 | var result = {}; 45 | for (var i = 0; i < parts.length; i++) { 46 | var tmp = parts[i].split('=', 2); 47 | var key = jQuery.urldecode(tmp[0]); 48 | var value = jQuery.urldecode(tmp[1]); 49 | if (key in result) 50 | result[key].push(value); 51 | else 52 | result[key] = [value]; 53 | } 54 | return result; 55 | }; 56 | 57 | /** 58 | * highlight a given string on a jquery object by wrapping it in 59 | * span elements with the given class name. 60 | */ 61 | jQuery.fn.highlightText = function(text, className) { 62 | function highlight(node, addItems) { 63 | if (node.nodeType === 3) { 64 | var val = node.nodeValue; 65 | var pos = val.toLowerCase().indexOf(text); 66 | if (pos >= 0 && 67 | !jQuery(node.parentNode).hasClass(className) && 68 | !jQuery(node.parentNode).hasClass("nohighlight")) { 69 | var span; 70 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 71 | if (isInSVG) { 72 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 73 | } else { 74 | span = document.createElement("span"); 75 | span.className = className; 76 | } 77 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 78 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 79 | document.createTextNode(val.substr(pos + text.length)), 80 | node.nextSibling)); 81 | node.nodeValue = val.substr(0, pos); 82 | if (isInSVG) { 83 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 84 | var bbox = node.parentElement.getBBox(); 85 | rect.x.baseVal.value = bbox.x; 86 | rect.y.baseVal.value = bbox.y; 87 | rect.width.baseVal.value = bbox.width; 88 | rect.height.baseVal.value = bbox.height; 89 | rect.setAttribute('class', className); 90 | addItems.push({ 91 | "parent": node.parentNode, 92 | "target": rect}); 93 | } 94 | } 95 | } 96 | else if (!jQuery(node).is("button, select, textarea")) { 97 | jQuery.each(node.childNodes, function() { 98 | highlight(this, addItems); 99 | }); 100 | } 101 | } 102 | var addItems = []; 103 | var result = this.each(function() { 104 | highlight(this, addItems); 105 | }); 106 | for (var i = 0; i < addItems.length; ++i) { 107 | jQuery(addItems[i].parent).before(addItems[i].target); 108 | } 109 | return result; 110 | }; 111 | 112 | /* 113 | * backward compatibility for jQuery.browser 114 | * This will be supported until firefox bug is fixed. 115 | */ 116 | if (!jQuery.browser) { 117 | jQuery.uaMatch = function(ua) { 118 | ua = ua.toLowerCase(); 119 | 120 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 121 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 122 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 123 | /(msie) ([\w.]+)/.exec(ua) || 124 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 125 | []; 126 | 127 | return { 128 | browser: match[ 1 ] || "", 129 | version: match[ 2 ] || "0" 130 | }; 131 | }; 132 | jQuery.browser = {}; 133 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 134 | } 135 | -------------------------------------------------------------------------------- /docs/build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0.0', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false, 12 | SHOW_SEARCH_SUMMARY: true, 13 | ENABLE_SEARCH_SHORTCUTS: true, 14 | }; -------------------------------------------------------------------------------- /docs/build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/file.png -------------------------------------------------------------------------------- /docs/build/html/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/build/html/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/build/html/_static/js/html5shiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); -------------------------------------------------------------------------------- /docs/build/html/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | !function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 63 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 64 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 65 | var s_v = "^(" + C + ")?" + v; // vowel in stem 66 | 67 | this.stemWord = function (w) { 68 | var stem; 69 | var suffix; 70 | var firstch; 71 | var origword = w; 72 | 73 | if (w.length < 3) 74 | return w; 75 | 76 | var re; 77 | var re2; 78 | var re3; 79 | var re4; 80 | 81 | firstch = w.substr(0,1); 82 | if (firstch == "y") 83 | w = firstch.toUpperCase() + w.substr(1); 84 | 85 | // Step 1a 86 | re = /^(.+?)(ss|i)es$/; 87 | re2 = /^(.+?)([^s])s$/; 88 | 89 | if (re.test(w)) 90 | w = w.replace(re,"$1$2"); 91 | else if (re2.test(w)) 92 | w = w.replace(re2,"$1$2"); 93 | 94 | // Step 1b 95 | re = /^(.+?)eed$/; 96 | re2 = /^(.+?)(ed|ing)$/; 97 | if (re.test(w)) { 98 | var fp = re.exec(w); 99 | re = new RegExp(mgr0); 100 | if (re.test(fp[1])) { 101 | re = /.$/; 102 | w = w.replace(re,""); 103 | } 104 | } 105 | else if (re2.test(w)) { 106 | var fp = re2.exec(w); 107 | stem = fp[1]; 108 | re2 = new RegExp(s_v); 109 | if (re2.test(stem)) { 110 | w = stem; 111 | re2 = /(at|bl|iz)$/; 112 | re3 = new RegExp("([^aeiouylsz])\\1$"); 113 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 114 | if (re2.test(w)) 115 | w = w + "e"; 116 | else if (re3.test(w)) { 117 | re = /.$/; 118 | w = w.replace(re,""); 119 | } 120 | else if (re4.test(w)) 121 | w = w + "e"; 122 | } 123 | } 124 | 125 | // Step 1c 126 | re = /^(.+?)y$/; 127 | if (re.test(w)) { 128 | var fp = re.exec(w); 129 | stem = fp[1]; 130 | re = new RegExp(s_v); 131 | if (re.test(stem)) 132 | w = stem + "i"; 133 | } 134 | 135 | // Step 2 136 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; 137 | if (re.test(w)) { 138 | var fp = re.exec(w); 139 | stem = fp[1]; 140 | suffix = fp[2]; 141 | re = new RegExp(mgr0); 142 | if (re.test(stem)) 143 | w = stem + step2list[suffix]; 144 | } 145 | 146 | // Step 3 147 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; 148 | if (re.test(w)) { 149 | var fp = re.exec(w); 150 | stem = fp[1]; 151 | suffix = fp[2]; 152 | re = new RegExp(mgr0); 153 | if (re.test(stem)) 154 | w = stem + step3list[suffix]; 155 | } 156 | 157 | // Step 4 158 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; 159 | re2 = /^(.+?)(s|t)(ion)$/; 160 | if (re.test(w)) { 161 | var fp = re.exec(w); 162 | stem = fp[1]; 163 | re = new RegExp(mgr1); 164 | if (re.test(stem)) 165 | w = stem; 166 | } 167 | else if (re2.test(w)) { 168 | var fp = re2.exec(w); 169 | stem = fp[1] + fp[2]; 170 | re2 = new RegExp(mgr1); 171 | if (re2.test(stem)) 172 | w = stem; 173 | } 174 | 175 | // Step 5 176 | re = /^(.+?)e$/; 177 | if (re.test(w)) { 178 | var fp = re.exec(w); 179 | stem = fp[1]; 180 | re = new RegExp(mgr1); 181 | re2 = new RegExp(meq1); 182 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); 183 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) 184 | w = stem; 185 | } 186 | re = /ll$/; 187 | re2 = new RegExp(mgr1); 188 | if (re.test(w) && re2.test(w)) { 189 | re = /.$/; 190 | w = w.replace(re,""); 191 | } 192 | 193 | // and turn initial Y back to y 194 | if (firstch == "y") 195 | w = firstch.toLowerCase() + w.substr(1); 196 | return w; 197 | } 198 | } 199 | 200 | -------------------------------------------------------------------------------- /docs/build/html/_static/minus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/minus.png -------------------------------------------------------------------------------- /docs/build/html/_static/plus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/_static/plus.png -------------------------------------------------------------------------------- /docs/build/html/_static/pygments.css: -------------------------------------------------------------------------------- 1 | pre { line-height: 125%; } 2 | td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 3 | span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } 4 | td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 5 | span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } 6 | .highlight .hll { background-color: #ffffcc } 7 | .highlight { background: #f8f8f8; } 8 | .highlight .c { color: #3D7B7B; font-style: italic } /* Comment */ 9 | .highlight .err { border: 1px solid #FF0000 } /* Error */ 10 | .highlight .k { color: #008000; font-weight: bold } /* Keyword */ 11 | .highlight .o { color: #666666 } /* Operator */ 12 | .highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ 13 | .highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ 14 | .highlight .cp { color: #9C6500 } /* Comment.Preproc */ 15 | .highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ 16 | .highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ 17 | .highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ 18 | .highlight .gd { color: #A00000 } /* Generic.Deleted */ 19 | .highlight .ge { font-style: italic } /* Generic.Emph */ 20 | .highlight .gr { color: #E40000 } /* Generic.Error */ 21 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ 22 | .highlight .gi { color: #008400 } /* Generic.Inserted */ 23 | .highlight .go { color: #717171 } /* Generic.Output */ 24 | .highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ 25 | .highlight .gs { font-weight: bold } /* Generic.Strong */ 26 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ 27 | .highlight .gt { color: #0044DD } /* Generic.Traceback */ 28 | .highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ 29 | .highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ 30 | .highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ 31 | .highlight .kp { color: #008000 } /* Keyword.Pseudo */ 32 | .highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ 33 | .highlight .kt { color: #B00040 } /* Keyword.Type */ 34 | .highlight .m { color: #666666 } /* Literal.Number */ 35 | .highlight .s { color: #BA2121 } /* Literal.String */ 36 | .highlight .na { color: #687822 } /* Name.Attribute */ 37 | .highlight .nb { color: #008000 } /* Name.Builtin */ 38 | .highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */ 39 | .highlight .no { color: #880000 } /* Name.Constant */ 40 | .highlight .nd { color: #AA22FF } /* Name.Decorator */ 41 | .highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */ 42 | .highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ 43 | .highlight .nf { color: #0000FF } /* Name.Function */ 44 | .highlight .nl { color: #767600 } /* Name.Label */ 45 | .highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ 46 | .highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ 47 | .highlight .nv { color: #19177C } /* Name.Variable */ 48 | .highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ 49 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */ 50 | .highlight .mb { color: #666666 } /* Literal.Number.Bin */ 51 | .highlight .mf { color: #666666 } /* Literal.Number.Float */ 52 | .highlight .mh { color: #666666 } /* Literal.Number.Hex */ 53 | .highlight .mi { color: #666666 } /* Literal.Number.Integer */ 54 | .highlight .mo { color: #666666 } /* Literal.Number.Oct */ 55 | .highlight .sa { color: #BA2121 } /* Literal.String.Affix */ 56 | .highlight .sb { color: #BA2121 } /* Literal.String.Backtick */ 57 | .highlight .sc { color: #BA2121 } /* Literal.String.Char */ 58 | .highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */ 59 | .highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ 60 | .highlight .s2 { color: #BA2121 } /* Literal.String.Double */ 61 | .highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ 62 | .highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */ 63 | .highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ 64 | .highlight .sx { color: #008000 } /* Literal.String.Other */ 65 | .highlight .sr { color: #A45A77 } /* Literal.String.Regex */ 66 | .highlight .s1 { color: #BA2121 } /* Literal.String.Single */ 67 | .highlight .ss { color: #19177C } /* Literal.String.Symbol */ 68 | .highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */ 69 | .highlight .fm { color: #0000FF } /* Name.Function.Magic */ 70 | .highlight .vc { color: #19177C } /* Name.Variable.Class */ 71 | .highlight .vg { color: #19177C } /* Name.Variable.Global */ 72 | .highlight .vi { color: #19177C } /* Name.Variable.Instance */ 73 | .highlight .vm { color: #19177C } /* Name.Variable.Magic */ 74 | .highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ -------------------------------------------------------------------------------- /docs/build/html/genindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Index — IPFS Operator 0.0 documentation 7 | 8 | 9 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 |
25 | 50 | 51 |
55 | 56 |
57 |
58 |
59 |
    60 |
  • »
  • 61 |
  • Index
  • 62 |
  • 63 |
  • 64 |
65 |
66 |
67 |
68 |
69 | 70 | 71 |

Index

72 | 73 |
74 | 75 |
76 | 77 | 78 |
79 |
80 |
81 | 82 |
83 | 84 |
85 |

© Copyright 2022.

86 |
87 | 88 | Built with Sphinx using a 89 | theme 90 | provided by Read the Docs. 91 | 92 | 93 |
94 |
95 |
96 |
97 |
98 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /docs/build/html/getting_started.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Getting Started — IPFS Operator 0.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 53 | 54 |
58 | 59 |
60 |
61 |
62 | 69 |
70 |
71 |
72 |
73 | 74 |
75 |

Getting Started

76 |

This document explains methods that can be used to install the operator onto an existing kubernetes cluster.

77 |

No matter which method you choose, the same operator will be installed.

78 |

With helm 79 | :: 80 | helm install ipfs-operator ./helm/ipfs-operator

81 |

manually 82 | :: 83 | make install

84 |

Confirm that the operator is installed.

85 |

When the operator is installed, a new namespace will be created. Verify the operator is running in the ipfs-operator namespace.

86 |

Once the operator is installed, you can proceed with installing your first cluster.

87 |
88 | 89 | 90 |
91 |
92 | 109 |
110 |
111 |
112 |
113 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /docs/build/html/hacking.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Hacking — IPFS Operator 0.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 52 | 53 |
57 | 58 |
59 |
60 |
61 | 68 |
69 |
70 |
71 |
72 | 73 |
74 |

Hacking

75 |

Have you have found a bug in the operator or do you have an additional feature? That’s great! Here are some tips.

76 |

Running your own images.

77 |

If you have edited the code, you would like to install it in your own cluster, you will first need an account on an image host like dockerhub. Once you have this, you can build, push, and install the image using the IMG environment variable

78 |
export IMG=your-image-repo/ipfs-operator:version
 79 | make docker-build
 80 | make docker-push
 81 | make install
 82 | 
83 |
84 |

Creating a pull request

85 |

Pull requests are welcome and encouraged. Please make pull reqeusts against https://github.com/redhat-et/ipfs-operator.

86 |
87 | 88 | 89 |
90 |
91 |
94 | 95 |
96 | 97 |
98 |

© Copyright 2022.

99 |
100 | 101 | Built with Sphinx using a 102 | theme 103 | provided by Read the Docs. 104 | 105 | 106 |
107 |
108 |
109 |
110 |
111 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /docs/build/html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | IPFS Operator — IPFS Operator 0.0 documentation 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 52 | 53 |
57 | 58 |
59 |
60 |
61 | 68 |
69 |
70 |
71 |
72 | 73 |
74 |

IPFS Operator

75 |

IPFS Operator is a Kubernetes Operator <https://kubernetes.io/docs/concepts/extend-kubernetes/operator/> 76 | designed to assist with running IPFS Cluster <https://cluster.ipfs.io>

77 |

IPFS cluster is used as the storage backend for many important and interesting projects. Some of the community projects 78 | hosted using IPFS cluster are listed here <https://collab.ipfscluster.io/>. The largest known clusters are host to tens of millions of pins and host nearly 1PB of objects.

79 |

The IPFS Operator brings this technology to kubernetes. This operator enables kubernetes users to create clusters consisting of hundreds of peers quickly and easily. See _getting_started to begin.

80 |
81 |

Note

82 |

This project is under active development.

83 |
84 |
85 |

Contents

86 |
87 | 92 |
93 |
94 |
95 | 96 | 97 |
98 |
99 |
102 | 103 |
104 | 105 |
106 |

© Copyright 2022.

107 |
108 | 109 | Built with Sphinx using a 110 | theme 111 | provided by Read the Docs. 112 | 113 | 114 |
115 |
116 |
117 |
118 |
119 | 124 | 125 | 126 | -------------------------------------------------------------------------------- /docs/build/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-cluster/ipfs-operator/a3c6dbbbb009ead783ef9b9223bb6bd58c90e200/docs/build/html/objects.inv -------------------------------------------------------------------------------- /docs/build/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Search — IPFS Operator 0.0 documentation 7 | 8 | 9 | 10 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 |
28 | 53 | 54 |
58 | 59 |
60 |
61 |
62 |
    63 |
  • »
  • 64 |
  • Search
  • 65 |
  • 66 |
  • 67 |
68 |
69 |
70 |
71 |
72 | 73 | 80 | 81 | 82 |
83 | 84 |
85 | 86 |
87 |
88 |
89 | 90 |
91 | 92 |
93 |

© Copyright 2022.

94 |
95 | 96 | Built with Sphinx using a 97 | theme 98 | provided by Read the Docs. 99 | 100 | 101 |
102 |
103 |
104 |
105 |
106 | 111 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /docs/build/html/searchindex.js: -------------------------------------------------------------------------------- 1 | Search.setIndex({"docnames": ["getting_started", "hacking", "index", "your_first_cluster"], "filenames": ["getting_started.rst", "hacking.rst", "index.rst", "your_first_cluster.rst"], "titles": ["Getting Started", "Hacking", "IPFS Operator", "Your First Cluster"], "terms": {"thi": [0, 1, 2], "document": 0, "explain": 0, "method": 0, "can": [0, 1], "us": [0, 1, 2, 3], "instal": [0, 1], "oper": [0, 1, 3], "onto": 0, "an": [0, 1], "exist": 0, "kubernet": [0, 2], "cluster": [0, 1, 2], "No": 0, "matter": 0, "which": 0, "you": [0, 1, 3], "choos": 0, "same": 0, "With": 0, "helm": 0, "ipf": [0, 1, 3], "manual": 0, "make": [0, 1], "confirm": 0, "i": [0, 2], "when": 0, "new": 0, "namespac": [0, 3], "creat": [0, 1, 2, 3], "verifi": [0, 3], "run": [0, 1, 2], "onc": [0, 1, 3], "proce": 0, "your": [0, 1, 2], "first": [0, 1, 2], "have": [1, 3], "found": 1, "bug": 1, "do": 1, "addit": 1, "featur": 1, "That": 1, "": 1, "great": 1, "here": [1, 2], "ar": [1, 2], "some": [1, 2], "tip": 1, "own": 1, "imag": 1, "If": 1, "edit": 1, "code": [1, 3], "would": 1, "like": 1, "need": [1, 3], "account": 1, "host": [1, 2], "dockerhub": 1, "build": 1, "push": 1, "img": 1, "environ": 1, "variabl": 1, "export": 1, "repo": 1, "version": 1, "docker": 1, "pull": 1, "request": 1, "welcom": 1, "encourag": 1, "pleas": 1, "reqeust": 1, "against": 1, "http": [1, 2], "github": 1, "com": [1, 3], "redhat": 1, "et": 1, "io": [2, 3], "doc": 2, "concept": 2, "extend": 2, "design": 2, "assist": 2, "storag": [2, 3], "backend": 2, "mani": 2, "import": 2, "interest": 2, "project": 2, "commun": 2, "list": 2, "collab": 2, "ipfsclust": 2, "The": 2, "largest": 2, "known": 2, "ten": 2, "million": 2, "pin": 2, "nearli": 2, "1pb": 2, "object": 2, "bring": 2, "technologi": 2, "enabl": 2, "user": 2, "consist": 2, "hundr": 2, "peer": 2, "quickli": 2, "easili": 2, "see": 2, "_getting_start": 2, "begin": 2, "under": 2, "activ": 2, "develop": 2, "get": 2, "start": [2, 3], "hack": 2, "we": 3, "crd": 3, "file": 3, "follow": 3, "inform": 3, "apivers": 3, "v1alpha1": 3, "kind": 3, "metadata": 3, "name": 3, "sampl": 3, "1": 3, "spec": 3, "url": 3, "app": 3, "exampl": 3, "ipfsstorag": 3, "2gi": 3, "clusterstorag": 3, "replica": 3, "5": 3, "public": 3, "true": 3, "adjust": 3, "requir": 3, "meet": 3, "made": 3, "necessari": 3, "appli": 3, "kubectl": 3, "block": 3, "bash": 3, "my_clust": 3, "n": 3, "f": 3, "yaml": 3, "ha": 3, "view": 3, "statu": 3, "my_namespac": 3}, "objects": {}, "objtypes": {}, "objnames": {}, "titleterms": {"get": 0, "start": 0, "hack": 1, "ipf": 2, "oper": 2, "content": 2, "your": 3, "first": 3, "cluster": 3}, "envversion": {"sphinx.domains.c": 2, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 6, "sphinx.domains.index": 1, "sphinx.domains.javascript": 2, "sphinx.domains.math": 2, "sphinx.domains.python": 3, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx": 56}}) -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | 3 | # -- Project information 4 | 5 | project = 'IPFS Operator' 6 | copyright = '2022' 7 | author = 'IPFS Operator Dev Team' 8 | 9 | release = '0.0' 10 | version = '0.0.0' 11 | 12 | # -- General configuration 13 | 14 | extensions = [ 15 | 'sphinx.ext.duration', 16 | 'sphinx.ext.doctest', 17 | 'sphinx.ext.autodoc', 18 | 'sphinx.ext.autosummary', 19 | 'sphinx.ext.autosectionlabel', 20 | 'sphinx.ext.intersphinx', 21 | # 'autoapi.extension', 22 | ] 23 | 24 | # autoapi_type='go' 25 | # autoapi_dirs=['../'] 26 | 27 | intersphinx_mapping = { 28 | 'python': ('https://docs.python.org/3/', None), 29 | 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 30 | } 31 | intersphinx_disabled_domains = ['std'] 32 | 33 | templates_path = ['_templates'] 34 | 35 | # -- Options for HTML output 36 | 37 | html_theme = 'sphinx_rtd_theme' 38 | 39 | # -- Options for EPUB output 40 | epub_show_urls = 'footnote' 41 | -------------------------------------------------------------------------------- /docs/source/getting_started.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | =================================== 3 | 4 | This document explains methods that can be used to install the operator onto an existing kubernetes cluster. 5 | 6 | No matter which method you choose, the same operator will be installed. 7 | 8 | **With helm** 9 | 10 | 11 | .. code-block:: bash 12 | 13 | helm install ipfs-operator ./helm/ipfs-operator 14 | 15 | **manually** 16 | 17 | .. code-block:: bash 18 | 19 | make install 20 | 21 | 22 | 23 | Confirm that the operator is installed. 24 | 25 | When the operator is installed, a new namespace will be created. Verify the operator is running in the `ipfs-operator` namespace. 26 | 27 | Once the operator is installed, you can proceed with installing your first cluster. 28 | -------------------------------------------------------------------------------- /docs/source/hacking.rst: -------------------------------------------------------------------------------- 1 | Hacking 2 | =================================== 3 | 4 | Have you have found a bug in the operator or do you have an additional feature? That's great! Here are some tips. 5 | 6 | Running your own images 7 | ======================= 8 | 9 | If you have edited the code, you would like to install it in your own cluster, you will first need an account on an image host like dockerhub. Once you have this, you can build, push, and install the image using the **IMG** environment variable 10 | 11 | .. code-block:: bash 12 | 13 | export IMG=your-image-repo/ipfs-operator:version 14 | make docker-build 15 | make docker-push 16 | make install 17 | 18 | Creating a pull request 19 | ======================= 20 | 21 | Pull requests are welcome and encouraged. Please make pull reqeusts against https://github.com/redhat-et/ipfs-operator. 22 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | IPFS Operator 2 | =================================== 3 | 4 | **IPFS Operator** is a `Kubernetes Operator `_ 5 | designed to assist with running `IPFS Cluster `_ 6 | 7 | IPFS cluster is used as the storage backend for many important and interesting projects. Some of the community projects 8 | hosted using IPFS cluster are listed `here `_. The largest known clusters are host to tens of millions of pins and host nearly 1PB of objects. 9 | 10 | The IPFS Operator brings this technology to kubernetes. This operator enables kubernetes users to create clusters consisting of hundreds of peers quickly and easily. See :ref:`Getting Started` to begin. 11 | 12 | .. note:: 13 | 14 | This project is under active development. 15 | 16 | Contents 17 | -------- 18 | 19 | .. toctree:: 20 | 21 | getting_started 22 | your_first_cluster 23 | hacking 24 | -------------------------------------------------------------------------------- /docs/source/requirements.txt: -------------------------------------------------------------------------------- 1 | mypy 2 | plantweb 3 | pylint 4 | rstcheck 5 | sphinx_rtd_theme 6 | sphinx-tabs 7 | sphinx-toolbox 8 | -------------------------------------------------------------------------------- /docs/source/your_first_cluster.rst: -------------------------------------------------------------------------------- 1 | Your First Cluster 2 | =================================== 3 | 4 | Create a cluster, we need to create a cluster using the ipfs operator CRD. 5 | 6 | Create a file with the following information 7 | 8 | .. code-block:: yaml 9 | 10 | apiVersion: cluster.ipfs.io/v1alpha1 11 | kind: IpfsCluster 12 | metadata: 13 | name: ipfs-sample-1 14 | spec: 15 | ipfsStorage: 2Gi 16 | clusterStorage: 2Gi 17 | replicas: 5 18 | follows: [] 19 | networking: 20 | circuitRelays: 1 21 | 22 | 23 | Adjust the storage requirements to meet your needs. 24 | 25 | Once you have made the necessary adjustments, apply it to your cluster with kubectl 26 | 27 | .. code-block:: bash 28 | 29 | kubectl create namespace mycluster 30 | kubectl -n mycluster apply -f ipfs.yaml 31 | 32 | Verify that the cluster has started by viewing the status of the cluster. 33 | 34 | .. code-block:: bash 35 | 36 | kubectl -n mycluster status ipfs-sample-1 37 | -------------------------------------------------------------------------------- /examples/collab-follow-small.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: cluster.ipfs.io/v1alpha1 4 | kind: Ipfs 5 | metadata: 6 | name: ipfs-sample-collab 7 | spec: 8 | ipfsStorage: 2Gi 9 | clusterStorage: 1Gi 10 | replicas: 1 11 | networking: 12 | circuitRelays: 1 13 | follows: 14 | - name: gutenberg_es 15 | template: gutenberg-es.collab.ipfscluster.io -------------------------------------------------------------------------------- /examples/collab-follow.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: ipfs-sample-collab 6 | spec: 7 | ipfsStorage: 5Ti 8 | clusterStorage: 20Gi 9 | replicas: 5 10 | follows: 11 | - name: filecoin 12 | template: filecoin.collab.ipfscluster.io 13 | - name: gutenberg_es 14 | template: gutenberg-es.collab.ipfscluster.io 15 | - name: ipfs-websites 16 | template: ipfs-websites.collab.ipfscluster.io 17 | - name: pkg.pacman.store 18 | template: cluster.pkg.pacman.store 19 | - name: ravencoin 20 | template: ipfs-collab.ravencoin.network 21 | - name: wikipedia 22 | template: wikipedia.collab.ipfscluster.io 23 | networking: 24 | circuitRelays: 1 25 | -------------------------------------------------------------------------------- /examples/ipfs-medium-private.yaml: -------------------------------------------------------------------------------- 1 | # This IPFS resource is designed for running on laptops 2 | # without many resources. 3 | --- 4 | apiVersion: cluster.ipfs.io/v1alpha1 5 | kind: IpfsCluster 6 | metadata: 7 | name: ipfs-cluster-medium 8 | spec: 9 | ipfsStorage: 2Gi 10 | clusterStorage: 1Gi 11 | replicas: 2 12 | follows: [] 13 | networking: 14 | circuitRelays: 1 15 | public: false 16 | -------------------------------------------------------------------------------- /examples/ipfs-small-private.yaml: -------------------------------------------------------------------------------- 1 | # This IPFS resource is designed for running on laptops 2 | # without many resources. 3 | --- 4 | apiVersion: cluster.ipfs.io/v1alpha1 5 | kind: IpfsCluster 6 | metadata: 7 | name: ipfs-cluster-small 8 | spec: 9 | ipfsStorage: 2Gi 10 | clusterStorage: 1Gi 11 | replicas: 1 12 | follows: [] 13 | networking: 14 | circuitRelays: 1 15 | public: false -------------------------------------------------------------------------------- /examples/ipfs-small.yaml: -------------------------------------------------------------------------------- 1 | # This IPFS resource is designed for running on laptops 2 | # without many resources. 3 | --- 4 | apiVersion: cluster.ipfs.io/v1alpha1 5 | kind: IpfsCluster 6 | metadata: 7 | name: ipfs-cluster-small 8 | spec: 9 | ipfsStorage: 2Gi 10 | clusterStorage: 1Gi 11 | replicas: 1 12 | follows: [] 13 | networking: 14 | circuitRelays: 1 15 | -------------------------------------------------------------------------------- /examples/ipfs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: ipfs-sample 6 | spec: 7 | ipfsStorage: 50Gi 8 | clusterStorage: 5Gi 9 | replicas: 2 10 | follows: [] 11 | networking: 12 | circuitRelays: 2 13 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /hack/run-in-kind.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | # Declare globals 6 | if ! [[ "${KIND_TAG}" == "" ]]; then 7 | echo "❗ Using KIND_TAG: ${KIND_TAG} ❗" 8 | fi 9 | 10 | KIND_TAG="${KIND_TAG:-local-build}" 11 | 12 | 13 | # process arguments 14 | skipBuild="false" 15 | skipTag="false" 16 | 17 | for arg in "$@"; do 18 | case "${arg}" in 19 | "--skip-build") 20 | skipBuild="true" 21 | ;; 22 | "--skip-tag") 23 | skipTag="true" 24 | ;; 25 | esac 26 | done 27 | 28 | 29 | ################################ 30 | # Makes sure the given commands are installed 31 | # before continuing. 32 | # Globals: 33 | # None 34 | # Arguments: 35 | # args: (list) commands to check for 36 | # Returns: 37 | # 0 if all commands are installed, 1 otherwise 38 | ################################ 39 | function check_cmd() { 40 | for cmd in "$@"; do 41 | if ! command -v $cmd >/dev/null 2>&1; then 42 | echo "Error: $cmd is not installed" 43 | exit 1 44 | fi 45 | done 46 | } 47 | 48 | # make sure that helm, kind, and docker are installed 49 | check_cmd helm docker kind 50 | 51 | # load images into kind 52 | IMAGES=( 53 | "quay.io/redhat-et-ipfs/ipfs-operator" 54 | ) 55 | 56 | 57 | # build the two images 58 | if [[ "${skipBuild}" == false ]]; then 59 | make docker-build 60 | else 61 | echo "⏩ skipping build" 62 | fi 63 | 64 | # tag the latest images 65 | if [[ "${skipTag}" == false ]]; then 66 | for i in "${IMAGES[@]}"; do 67 | docker tag "${i}:latest" "${i}:${KIND_TAG}" 68 | kind load docker-image "${i}:${KIND_TAG}" 69 | done 70 | else 71 | echo "⏩ skipping tag" 72 | fi 73 | 74 | 75 | # using helm, install the IPFS Cluster Operator into the current cluster 76 | helm upgrade --install \ 77 | --debug \ 78 | --set image.tag="${KIND_TAG}" \ 79 | --wait --timeout=300s \ 80 | ipfs-cluster ./helm/ipfs-operator 81 | 82 | # TODO: implement auto-deletion of previous operator pod 83 | # # if there is an existing operator pod running, delete it so we can properly restart 84 | # kubectl delete pod -l app=ipfs-operator -------------------------------------------------------------------------------- /hack/setup-kind-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e -o pipefail 3 | 4 | # imports utils 5 | source ./hack/utils.sh 6 | 7 | # run commands or install 8 | case "${1:-}" in 9 | "check") 10 | log "Checking for required commands" 11 | check_cmd docker kind kubectl 12 | log "All required commands are installed" 13 | ;; 14 | "metallb") 15 | log "Installing metallb..." 16 | install_metallb 17 | log "✅ installed" 18 | ;; 19 | *) 20 | check_cmd docker kind kubectl 21 | log "Setting up a kind cluster" 22 | kind create cluster --name "${CLUSTER_NAME:-kind}" 23 | install_metallb 24 | ;; 25 | esac 26 | -------------------------------------------------------------------------------- /hack/utils.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################ 4 | # Makes sure the given commands are installed 5 | # before continuing. 6 | # Globals: 7 | # None 8 | # Arguments: 9 | # args: (list) commands to check for 10 | # Returns: 11 | # 0 if all commands are installed, 1 otherwise 12 | ################################ 13 | function check_cmd() { 14 | for cmd in "$@"; do 15 | if ! command -v $cmd >/dev/null 2>&1; then 16 | echo "Error: $cmd is not installed" 17 | exit 1 18 | fi 19 | done 20 | } 21 | 22 | ###################################### 23 | # Prints out the given message to STDOUT 24 | # with timestamped formatting. 25 | # Globals: 26 | # None 27 | # Arguments: 28 | # msg: (string) message to print 29 | # Returns: 30 | # None 31 | ###################################### 32 | function log() { 33 | local msg="$1" 34 | # get the current time 35 | local time=$(date +%Y-%m-%dT%H:%M:%S%z) 36 | # print the message with printf 37 | printf "[%s] %s\n" "${time}" "${msg}" 38 | } 39 | 40 | ###################################### 41 | # Installs MetalLB into the default namespace 42 | # for the current cluster. 43 | # Globals: 44 | # None 45 | # Arguments: 46 | # None 47 | # Returns: 48 | # 1 if MetalLB is not installed, 0 otherwise 49 | ###################################### 50 | function install_metallb() { 51 | local metalLBVersion='v0.12.1' 52 | local metalLBManifests="https://raw.githubusercontent.com/metallb/metallb/${metalLBVersion}/manifests" 53 | local metalLBNamespaceURL="${metalLBManifests}/namespace.yaml" 54 | local metalLBURL="${metalLBManifests}/metallb.yaml" 55 | local metalLBNamespace="metallb-system" 56 | 57 | # create the metallb namespace 58 | if ! [[ $(kubectl apply -f "${metalLBNamespaceURL}") ]]; then 59 | log "Failed to create metallb namespace" 60 | return 1 61 | fi 62 | 63 | # create the manifest 64 | if ! [[ $(kubectl apply -f "${metalLBURL}") ]]; then 65 | log "Failed to create metallb manifest" 66 | return 1 67 | fi 68 | 69 | # wait for all pods with the label 'app=metallb' to be ready 70 | # HACK: find a way to wait on the parent condition for the pods instead of 71 | # waiting 30 seconds before trying to wait on the pods themselves. 72 | log "💤 Sleeping for 30 seconds to allow the metallb namespace to be initialized" 73 | sleep 30 74 | log "📦 Waiting for pods to be ready..." 75 | kubectl wait --for=condition=ready pod -l app=metallb -n "${metalLBNamespace}" 76 | 77 | # allocate a group of subnets to be used for the MetalLB instances 78 | log "📢 Allocating network addresses" 79 | local ipamConfig=$(docker network inspect -f '{{.IPAM.Config}}' kind) 80 | local subnet=$(echo "${ipamConfig}" | awk '{ print $1 }') 81 | local regex="([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/([0-9]+)" 82 | if ! [[ $subnet =~ $regex ]]; then 83 | log "could not match" 84 | return 1 85 | fi 86 | 87 | # create the subnets in MetalLB 88 | local net1="${BASH_REMATCH[1]}" 89 | local net2="${BASH_REMATCH[2]}" 90 | local net3="${BASH_REMATCH[3]}" 91 | local net4="${BASH_REMATCH[4]}" 92 | local subnetMask="${BASH_REMATCH[5]}" 93 | 94 | if ! [[ "${subnetMask}" -eq "16" ]]; then 95 | log "subnet unsupported" 96 | return 1 97 | fi 98 | 99 | # create a single subnet, grant 255 addresses to LB 100 | cat < 1 { 61 | templateBase = os.Args[1] 62 | } 63 | 64 | objs, err := DecodeObjs() 65 | if err != nil { 66 | panic(err) 67 | } 68 | 69 | for _, obj := range objs { 70 | var f *os.File 71 | // ignore if these values are not set 72 | var ok bool 73 | var kind, name string 74 | var meta map[any]any 75 | if kind, ok = obj["kind"].(string); !ok { 76 | log.Printf("no kind found in %v, skipping\n", obj) 77 | continue 78 | } 79 | if meta, ok = obj["metadata"].(map[any]any); !ok { 80 | log.Printf("no metadata could be found\n") 81 | continue 82 | } 83 | if name, ok = meta["name"].(string); !ok { 84 | log.Printf("name not found") 85 | continue 86 | } 87 | 88 | fileName := fmt.Sprintf("%s-%s.yaml", kind, name) 89 | baseName := templateBase 90 | if kind == "CustomResourceDefinition" { 91 | baseName = crdBase 92 | } 93 | 94 | if err = ModifyObj(obj); err != nil { 95 | log.Printf("error modifying obj %s: %v\n", fileName, err) 96 | os.Exit(1) 97 | } 98 | 99 | if f, err = os.Create(fmt.Sprintf("%s/%s", baseName, fileName)); err != nil { 100 | log.Printf("error creating file %s: %v\n", fileName, err) 101 | os.Exit(1) 102 | } 103 | defer f.Close() 104 | enc := yaml.NewEncoder(f) 105 | if err = enc.Encode(obj); err != nil { 106 | log.Printf("error encoding obj %s: %v\n", fileName, err) 107 | return 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /helm/ipfs-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ipfs-operator 3 | type: application 4 | version: "0.0.2" 5 | appVersion: "v0.0.1" 6 | -------------------------------------------------------------------------------- /helm/ipfs-operator/crds/cluster.ipfs.io_circuitrelays.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.8.0 7 | creationTimestamp: null 8 | name: circuitrelays.cluster.ipfs.io 9 | spec: 10 | group: cluster.ipfs.io 11 | names: 12 | kind: CircuitRelay 13 | listKind: CircuitRelayList 14 | plural: circuitrelays 15 | singular: circuitrelay 16 | scope: Namespaced 17 | versions: 18 | - name: v1alpha1 19 | schema: 20 | openAPIV3Schema: 21 | description: CircuitRelay is the Schema for the circuitrelays API. 22 | properties: 23 | apiVersion: 24 | description: 'APIVersion defines the versioned schema of this representation 25 | of an object. Servers should convert recognized schemas to the latest 26 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 27 | type: string 28 | kind: 29 | description: 'Kind is a string value representing the REST resource this 30 | object represents. Servers may infer this from the endpoint the client 31 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 32 | type: string 33 | metadata: 34 | type: object 35 | spec: 36 | description: CircuitRelaySpec Defines a specification for the RelayCircuit 37 | launched by Kubernetes. 38 | properties: 39 | swarmKeyRef: 40 | description: SwarmKeyRef points to a multicodec-encoded v1 PSK stored 41 | within a secret somewhere. 42 | properties: 43 | keyName: 44 | type: string 45 | secretName: 46 | type: string 47 | required: 48 | - keyName 49 | - secretName 50 | type: object 51 | type: object 52 | status: 53 | properties: 54 | addrInfo: 55 | description: This is intended to mimic peer.AddrInfo. 56 | properties: 57 | addrs: 58 | items: 59 | type: string 60 | type: array 61 | id: 62 | type: string 63 | required: 64 | - addrs 65 | - id 66 | type: object 67 | required: 68 | - addrInfo 69 | type: object 70 | type: object 71 | served: true 72 | storage: true 73 | subresources: 74 | status: {} 75 | status: 76 | acceptedNames: 77 | kind: "" 78 | plural: "" 79 | conditions: [] 80 | storedVersions: [] 81 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ClusterRole-ipfs-operator-manager-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: {} 5 | creationTimestamp: null 6 | labels: {} 7 | name: ipfs-operator-manager-role 8 | rules: 9 | - apiGroups: 10 | - '*' 11 | resources: 12 | - '*' 13 | verbs: 14 | - get 15 | - list 16 | - apiGroups: 17 | - apps 18 | resources: 19 | - deployments 20 | verbs: 21 | - create 22 | - delete 23 | - get 24 | - list 25 | - patch 26 | - update 27 | - watch 28 | - apiGroups: 29 | - apps 30 | resources: 31 | - statefulsets 32 | verbs: 33 | - create 34 | - delete 35 | - get 36 | - list 37 | - patch 38 | - update 39 | - watch 40 | - apiGroups: 41 | - cluster.ipfs.io 42 | resources: 43 | - circuitrelays 44 | verbs: 45 | - create 46 | - delete 47 | - get 48 | - list 49 | - patch 50 | - update 51 | - watch 52 | - apiGroups: 53 | - cluster.ipfs.io 54 | resources: 55 | - circuitrelays/finalizers 56 | verbs: 57 | - update 58 | - apiGroups: 59 | - cluster.ipfs.io 60 | resources: 61 | - circuitrelays/status 62 | verbs: 63 | - get 64 | - patch 65 | - update 66 | - apiGroups: 67 | - cluster.ipfs.io 68 | resources: 69 | - ipfsclusters 70 | verbs: 71 | - create 72 | - delete 73 | - get 74 | - list 75 | - patch 76 | - update 77 | - watch 78 | - apiGroups: 79 | - cluster.ipfs.io 80 | resources: 81 | - ipfsclusters/finalizers 82 | verbs: 83 | - update 84 | - apiGroups: 85 | - cluster.ipfs.io 86 | resources: 87 | - ipfsclusters/status 88 | verbs: 89 | - get 90 | - patch 91 | - update 92 | - apiGroups: 93 | - "" 94 | resources: 95 | - configmaps 96 | verbs: 97 | - create 98 | - delete 99 | - get 100 | - list 101 | - patch 102 | - update 103 | - watch 104 | - apiGroups: 105 | - "" 106 | resources: 107 | - persistentvolumeclaims 108 | verbs: 109 | - create 110 | - delete 111 | - get 112 | - list 113 | - patch 114 | - update 115 | - watch 116 | - apiGroups: 117 | - "" 118 | resources: 119 | - secrets 120 | verbs: 121 | - create 122 | - delete 123 | - get 124 | - list 125 | - patch 126 | - update 127 | - watch 128 | - apiGroups: 129 | - "" 130 | resources: 131 | - serviceaccounts 132 | verbs: 133 | - create 134 | - delete 135 | - get 136 | - list 137 | - patch 138 | - update 139 | - watch 140 | - apiGroups: 141 | - "" 142 | resources: 143 | - services 144 | verbs: 145 | - create 146 | - delete 147 | - get 148 | - list 149 | - patch 150 | - update 151 | - watch 152 | - apiGroups: 153 | - networking.k8s.io 154 | resources: 155 | - ingresses 156 | verbs: 157 | - create 158 | - delete 159 | - get 160 | - list 161 | - patch 162 | - update 163 | - watch 164 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ClusterRole-ipfs-operator-metrics-reader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-metrics-reader 7 | rules: 8 | - nonResourceURLs: 9 | - /metrics 10 | verbs: 11 | - get 12 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ClusterRole-ipfs-operator-proxy-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-proxy-role 7 | rules: 8 | - apiGroups: 9 | - authentication.k8s.io 10 | resources: 11 | - tokenreviews 12 | verbs: 13 | - create 14 | - apiGroups: 15 | - authorization.k8s.io 16 | resources: 17 | - subjectaccessreviews 18 | verbs: 19 | - create 20 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ClusterRoleBinding-ipfs-operator-manager-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-manager-rolebinding 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: ipfs-operator-manager-role 11 | subjects: 12 | - kind: ServiceAccount 13 | name: ipfs-operator-controller-manager 14 | namespace: ipfs-operator-system 15 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ClusterRoleBinding-ipfs-operator-proxy-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-proxy-rolebinding 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: ipfs-operator-proxy-role 11 | subjects: 12 | - kind: ServiceAccount 13 | name: ipfs-operator-controller-manager 14 | namespace: ipfs-operator-system 15 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ConfigMap-ipfs-operator-manager-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | controller_manager_config.yaml: | 4 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 5 | kind: ControllerManagerConfig 6 | health: 7 | healthProbeBindAddress: :8081 8 | metrics: 9 | bindAddress: 127.0.0.1:8080 10 | webhook: 11 | port: 9443 12 | leaderElection: 13 | leaderElect: true 14 | resourceName: 658003f6.ipfs.io 15 | kind: ConfigMap 16 | metadata: 17 | annotations: {} 18 | labels: {} 19 | name: ipfs-operator-manager-config 20 | namespace: ipfs-operator-system 21 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/Deployment-ipfs-operator-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: {} 5 | labels: 6 | control-plane: controller-manager 7 | name: ipfs-operator-controller-manager 8 | namespace: ipfs-operator-system 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | control-plane: controller-manager 14 | template: 15 | metadata: 16 | annotations: 17 | kubectl.kubernetes.io/default-container: manager 18 | labels: 19 | control-plane: controller-manager 20 | spec: 21 | containers: 22 | - args: 23 | - --secure-listen-address=0.0.0.0:8443 24 | - --upstream=http://127.0.0.1:8080/ 25 | - --logtostderr=true 26 | - --v=0 27 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 28 | name: kube-rbac-proxy 29 | ports: 30 | - containerPort: 8443 31 | name: https 32 | protocol: TCP 33 | resources: 34 | limits: 35 | cpu: 500m 36 | memory: 128Mi 37 | requests: 38 | cpu: 5m 39 | memory: 64Mi 40 | - args: 41 | - --health-probe-bind-address=:8081 42 | - --metrics-bind-address=127.0.0.1:8080 43 | - --leader-elect 44 | command: 45 | - /manager 46 | image: "{{ include "container-image" (list . .Values.image) }}" 47 | imagePullPolicy: {{ .Values.image.pullPolicy }} 48 | livenessProbe: 49 | httpGet: 50 | path: /healthz 51 | port: 8081 52 | initialDelaySeconds: 15 53 | periodSeconds: 20 54 | name: manager 55 | readinessProbe: 56 | httpGet: 57 | path: /readyz 58 | port: 8081 59 | initialDelaySeconds: 5 60 | periodSeconds: 10 61 | resources: 62 | limits: 63 | cpu: 100m 64 | memory: 300Mi 65 | requests: 66 | cpu: 100m 67 | memory: 20Mi 68 | securityContext: 69 | allowPrivilegeEscalation: false 70 | securityContext: 71 | runAsNonRoot: true 72 | serviceAccountName: ipfs-operator-controller-manager 73 | terminationGracePeriodSeconds: 10 74 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/Namespace-ipfs-operator-system.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | annotations: {} 5 | labels: 6 | control-plane: controller-manager 7 | name: ipfs-operator-system 8 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/Role-ipfs-operator-leader-election-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-leader-election-role 7 | namespace: ipfs-operator-system 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - create 18 | - update 19 | - patch 20 | - delete 21 | - apiGroups: 22 | - coordination.k8s.io 23 | resources: 24 | - leases 25 | verbs: 26 | - get 27 | - list 28 | - watch 29 | - create 30 | - update 31 | - patch 32 | - delete 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - events 37 | verbs: 38 | - create 39 | - patch 40 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/RoleBinding-ipfs-operator-leader-election-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-leader-election-rolebinding 7 | namespace: ipfs-operator-system 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: ipfs-operator-leader-election-role 12 | subjects: 13 | - kind: ServiceAccount 14 | name: ipfs-operator-controller-manager 15 | namespace: ipfs-operator-system 16 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/Service-ipfs-operator-controller-manager-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: {} 5 | labels: 6 | control-plane: controller-manager 7 | name: ipfs-operator-controller-manager-metrics-service 8 | namespace: ipfs-operator-system 9 | spec: 10 | ports: 11 | - name: https 12 | port: 8443 13 | protocol: TCP 14 | targetPort: https 15 | selector: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/ServiceAccount-ipfs-operator-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | annotations: {} 5 | labels: {} 6 | name: ipfs-operator-controller-manager 7 | namespace: ipfs-operator-system 8 | -------------------------------------------------------------------------------- /helm/ipfs-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Define container-image so that we can override which image is used 3 | */}} 4 | {{- define "container-image" -}} 5 | {{- $ := index . 0 }} 6 | {{- with index . 1 }} 7 | {{- if .image -}} 8 | {{ .image }} 9 | {{- else -}} 10 | {{ .repository }}:{{ .tag | default $.Chart.AppVersion }} 11 | {{- end -}} 12 | {{- end -}} 13 | {{- end -}} 14 | -------------------------------------------------------------------------------- /helm/ipfs-operator/values.yaml: -------------------------------------------------------------------------------- 1 | # This helm chart has no configurable options at the moment. 2 | # This will change in the future. 3 | # 4 | # To re-generate this helm chart, run `make helm-template` 5 | 6 | # image defines the main controller image to be used for the manager container. 7 | image: 8 | repository: quay.io/redhat-et-ipfs/ipfs-operator 9 | pullPolicy: IfNotPresent 10 | # tag value or the default 11 | tag: "" 12 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | 23 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 24 | // to ensure that exec-entrypoint and run can make use of them. 25 | _ "k8s.io/client-go/plugin/pkg/client/auth" 26 | 27 | "k8s.io/apimachinery/pkg/runtime" 28 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 29 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 30 | ctrl "sigs.k8s.io/controller-runtime" 31 | "sigs.k8s.io/controller-runtime/pkg/healthz" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | 34 | clusterv1alpha1 "github.com/redhat-et/ipfs-operator/api/v1alpha1" 35 | "github.com/redhat-et/ipfs-operator/controllers" 36 | //+kubebuilder:scaffold:imports 37 | ) 38 | 39 | const ( 40 | MgrPort = 9443 41 | ) 42 | 43 | // define flag names. 44 | const ( 45 | metricsAddrFlagName = "metrics-bind-address" 46 | probeAddrFlagName = "health-probe-bind-address" 47 | leaderElectFlagName = "leader-elect" 48 | ) 49 | 50 | // define flag defaults. 51 | const ( 52 | defaultMetricsAddr = ":8080" 53 | defaultProbeAddr = ":8081" 54 | defaultLeaderElect = false 55 | ) 56 | 57 | var ( 58 | scheme = runtime.NewScheme() 59 | setupLog = ctrl.Log.WithName("setup") 60 | ) 61 | 62 | func init() { 63 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 64 | utilruntime.Must(clusterv1alpha1.AddToScheme(scheme)) 65 | //+kubebuilder:scaffold:scheme 66 | } 67 | 68 | // addCommandLineFlags Creates the flags to be consumed by the binary on startup, 69 | // and binds their values to the provided arguments. 70 | func addCommandLineFlags(metricsAddr, probeAddr *string, enableLeaderElection *bool) { 71 | flag.StringVar(metricsAddr, metricsAddrFlagName, defaultMetricsAddr, 72 | "The address the metric endpoint binds to.", 73 | ) 74 | flag.StringVar(probeAddr, probeAddrFlagName, defaultProbeAddr, 75 | "The address the probe endpoint binds to.", 76 | ) 77 | flag.BoolVar(enableLeaderElection, leaderElectFlagName, defaultLeaderElect, 78 | "Enable leader election for controller manager. "+ 79 | "Enabling this will ensure there is only one active controller manager.", 80 | ) 81 | opts := zap.Options{ 82 | Development: true, 83 | } 84 | opts.BindFlags(flag.CommandLine) 85 | flag.Parse() 86 | 87 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 88 | } 89 | 90 | func main() { 91 | var metricsAddr, probeAddr string 92 | var enableLeaderElection bool 93 | 94 | // set the command line flags 95 | addCommandLineFlags(&metricsAddr, &probeAddr, &enableLeaderElection) 96 | 97 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 98 | Scheme: scheme, 99 | MetricsBindAddress: metricsAddr, 100 | Port: MgrPort, 101 | HealthProbeBindAddress: probeAddr, 102 | LeaderElection: enableLeaderElection, 103 | LeaderElectionID: "658003f6.ipfs.io", 104 | }) 105 | if err != nil { 106 | setupLog.Error(err, "unable to start manager") 107 | os.Exit(1) 108 | } 109 | 110 | if err = (&controllers.IpfsClusterReconciler{ 111 | Client: mgr.GetClient(), 112 | Scheme: mgr.GetScheme(), 113 | }).SetupWithManager(mgr); err != nil { 114 | setupLog.Error(err, "unable to create controller", "controller", "Ipfs") 115 | os.Exit(1) 116 | } 117 | if err = (&controllers.CircuitRelayReconciler{ 118 | Client: mgr.GetClient(), 119 | Scheme: mgr.GetScheme(), 120 | }).SetupWithManager(mgr); err != nil { 121 | setupLog.Error(err, "unable to create controller", "controller", "CircuitRelay") 122 | os.Exit(1) 123 | } 124 | if err = (&controllers.IpfsClusterReconciler{ 125 | Client: mgr.GetClient(), 126 | Scheme: mgr.GetScheme(), 127 | }).SetupWithManager(mgr); err != nil { 128 | setupLog.Error(err, "unable to create controller", "controller", "IpfsCluster") 129 | os.Exit(1) 130 | } 131 | //+kubebuilder:scaffold:builder 132 | 133 | if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 134 | setupLog.Error(err, "unable to set up health check") 135 | os.Exit(1) 136 | } 137 | if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 138 | setupLog.Error(err, "unable to set up ready check") 139 | os.Exit(1) 140 | } 141 | 142 | setupLog.Info("starting manager") 143 | if err = mgr.Start(ctrl.SetupSignalHandler()); err != nil { 144 | setupLog.Error(err, "problem running manager") 145 | os.Exit(1) 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "IPFS operator" 3 | authors = [{name = "IPFS Operator dev team", email = ""}] 4 | dynamic = ["version", "description"] 5 | -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/00-create-ipfs-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: private-ipfs 6 | spec: 7 | ipfsStorage: 2Gi 8 | clusterStorage: 2Gi 9 | replicas: 2 10 | follows: [] 11 | ipfsResources: 12 | limits: 13 | cpu: 100m 14 | memory: 128M 15 | networking: 16 | public: false 17 | circuitRelays: 1 18 | -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/05-assert-private-ipfs-network.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | collectors: 5 | - type: command 6 | command: kubectl -n "$NAMESPACE" describe all,ipfscluster 7 | - type: command 8 | command: kubectl -n ipfs-operator-system logs deployment/ipfs-operator-controller-manager 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: ipfs-cluster-private-ipfs 14 | status: 15 | currentReplicas: 2 16 | readyReplicas: 2 17 | -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/10-test-data-creation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | echo "whoami: $(whoami)" 5 | 6 | # imports 7 | echo "sourcing utils.sh" 8 | source './utils.sh' 9 | 10 | 11 | echo "checking to see if awk is installed: '$(which awk)'" 12 | echo "checking if grep is installed: '$(which grep)'" 13 | 14 | main() { 15 | echo "this is the first message once the script begins to run" 16 | # get the name of an IPFS Cluster pod 17 | crdName='private-ipfs' 18 | labelValue="ipfs-cluster-${crdName}" 19 | labelName='app.kubernetes.io/name' 20 | ipfsClusterPodName=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[0].metadata.name}') 21 | 22 | # write a file to the ipfs-cluster container in the pod 23 | echo "writing a file to ${ipfsClusterPodName}" 24 | local results=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'echo "hello from ${HOSTNAME} at $(date)" > /tmp/testfile.txt') 25 | 26 | echo "list out the contents of temp, check for testfile" 27 | results=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'ls -al /tmp | grep testfile') 28 | echo "results: '${results}'" 29 | echo "contents of new file:" 30 | local contents=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'cat /tmp/testfile.txt') 31 | echo "file contents: '${contents}'" 32 | echo "checking for ipfs-cluster-ctl" 33 | local ipfsClusterCMD=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'which ipfs-cluster-ctl') 34 | echo "ipfs-cluster-ctl: ${ipfsClusterCMD}" 35 | echo "checking permissions to use ipfs-cluster-ctl:" 36 | local ipfsClusterCtlPerms=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'ls -al /usr/local/bin/ipfs-cluster-ctl') 37 | echo "perms: ${ipfsClusterCtlPerms}" 38 | echo "checking container user:" 39 | local ctrUser=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'whoami') 40 | echo "continer user: ${ctrUser}" 41 | 42 | # delete the lockfile if it exists 43 | kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'if [ -e /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock; fi' 44 | 45 | 46 | # this fails 47 | echo "grabbing the content ID" 48 | myCID="" 49 | until [[ -n $myCID ]]; do 50 | myCID=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'if [ -e /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock; fi; ipfs-cluster-ctl add /tmp/testfile.txt' | awk '{print $2}') || true 51 | [[ -z $myCID ]] && sleep 5 52 | echo "trying again" 53 | done 54 | echo "content ID is: ${myCID}" 55 | 56 | # myCID=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'if [ -e /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock; fi; ipfs-cluster-ctl add /tmp/testfile.txt' | awk '{print $2}') 57 | 58 | # read the value 59 | echo "getting the other ipfs cluster podname" 60 | ipfsClusterPodname2=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[1].metadata.name}') 61 | 62 | # delete the lockfile if it exists 63 | echo "deleting the other lockfile now" 64 | # kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs-cluster -- sh -c 'if [ -e /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock; fi' 65 | # echo "checking to see if lockfile still exists" 66 | # results=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs-cluster -- sh -c 'ls -al /data/ipfs' | grep 'repo.lock') 67 | # echo "lockfile: ${results}" 68 | 69 | ipfsCommand="if [ -e /data/ipfs/repo.lock ]; then rm /data/ipfs/repo.lock; fi; ipfs get --output /tmp/myfile.txt -- ${myCID}" 70 | 71 | echo "reading a file from ${ipfsClusterPodname2} using command: '${ipfsCommand}'" 72 | until kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs -- sh -c "${ipfsCommand}"; do 73 | echo "failed, sleeping..." 74 | sleep 5 75 | done 76 | 77 | # kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs -- sh -c "${ipfsCommand}" 78 | echo "success!" 79 | } 80 | 81 | main 82 | -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/10-test-data-creation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: TestStep 3 | apiVersion: kuttl.dev/v1beta1 4 | timeout: 900 5 | commands: 6 | - script: | 7 | /bin/bash ./10-test-data-creation.sh -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/README.md: -------------------------------------------------------------------------------- 1 | # Private Network Test 2 | 3 | This test validates that the operator is capable of running in private mode. 4 | 5 | ### Test Steps: 6 | 7 | - 00. Create an IPFSCluster resource 8 | - 05. Assert that an IPFSCluster StatefulSet was created. 9 | - 10. Assert that one private node can view another's content. 10 | 11 | ### Still Needs: 12 | - [ ] Test that public nodes aren't able to read private data -------------------------------------------------------------------------------- /test-kuttl/e2e-copy/private-network/utils.sh: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # logs a message to stdout. 3 | # Globals: 4 | # None. 5 | # Arguments: 6 | # A list of strings to print out. 7 | ######################################## 8 | log() { 9 | input="${*}" 10 | echo '['$(date)"]: ${input}" 11 | } -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/00-create-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: cluster-1 6 | spec: 7 | clusterStorage: 1Gi 8 | ipfsStorage: 2Gi 9 | replicas: 1 10 | networking: 11 | circuitRelays: 1 12 | public: false 13 | ipfsResources: 14 | limits: 15 | cpu: 250m 16 | memory: 512M 17 | follows: 18 | - name: gutenberg_es 19 | template: gutenberg-es.collab.ipfscluster.io -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/05-assert-running.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | --- 5 | apiVersion: apps/v1 6 | kind: StatefulSet 7 | metadata: 8 | name: ipfs-cluster-cluster-1 9 | status: 10 | currentReplicas: 1 11 | readyReplicas: 1 12 | -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/10-validate-contents.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | # imports 5 | source './utils.sh' 6 | 7 | ######################################## 8 | # Count the amount of pins in the IPFS node 9 | # Arguments: 10 | # Name of IPFS pod 11 | # Name of follow container 12 | # Globals: 13 | # NAMESPACE 14 | # Returns: 15 | # Number of pinned nodes 16 | ######################################## 17 | count_pins() { 18 | declare podName="${1}" 19 | declare followContainerName="${2}" 20 | declare -i numPins=0 21 | declare pinType="" 22 | declare pinTypeRecursive="recursive" 23 | 24 | # ensure that the corresponding IPFS container has pinned content 25 | pins=$(kubectl exec "${podName}" -n "${NAMESPACE}" -c "${followContainerName}" -- ipfs pin ls | grep -i "${pinTypeRecursive}") 26 | readarray -d $'\n' -t pinArray <<< "${pins}" 27 | echo "${#pinArray[*]}" 28 | } 29 | 30 | main() { 31 | log "verifying contents" 32 | 33 | # default number of pins 34 | declare -i DEFAULT_PINS=2 35 | 36 | # get the name of an IPFS Cluster pod 37 | declare crdName='cluster-1' 38 | declare labelValue="ipfs-cluster-${crdName}" 39 | declare labelName='app.kubernetes.io/name' 40 | declare ipfsClusterPodName=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[0].metadata.name}') 41 | 42 | # get number of pins: 43 | declare -i numPins=0 44 | while [[ "${numPins}" -le "${DEFAULT_PINS}" ]]; do 45 | numPins=$(count_pins "${ipfsClusterPodName}") 46 | log "current number of pins: ${numPins}" 47 | if [[ "${numPins}" -gt "${DEFAULT_PINS}" ]]; then 48 | break 49 | else 50 | log "not enough pins, sleeping..." 51 | sleep 5 52 | fi 53 | done 54 | } 55 | 56 | main -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/10-validate-contents.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestStep 4 | commands: 5 | - script: | 6 | /bin/bash ./10-validate-contents.sh -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/README.md: -------------------------------------------------------------------------------- 1 | # IPFS Cluster Follow Test 2 | 3 | This test verifies that the IPFS Cluster is capable of joining 4 | a collaborative cluster and replicating data. 5 | 6 | ### Test Steps: 7 | 8 | - 00. Create an IPFSCluster resource 9 | - 05. Assert the CR is available 10 | - 10. Validate that the IPFS container is pinning the gutenurg_es CIDs -------------------------------------------------------------------------------- /test-kuttl/e2e-release/cluster-follow/utils.sh: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # logs a message to stdout. 3 | # Globals: 4 | # None. 5 | # Arguments: 6 | # A list of strings to print out. 7 | ######################################## 8 | log() { 9 | input="${*}" 10 | echo '['$(date)"]: ${input}\n" 11 | } -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/00-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | timeout: 200 5 | --- 6 | apiVersion: apps/v1 7 | kind: Deployment 8 | metadata: 9 | name: ipfs-operator-controller-manager 10 | namespace: ipfs-operator-system 11 | status: 12 | availableReplicas: 1 13 | readyReplicas: 1 14 | -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/05-create-ipfs-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: ipfs-sample-1 6 | spec: 7 | ipfsStorage: 2Gi 8 | clusterStorage: 2Gi 9 | replicas: 2 10 | follows: [] 11 | ipfsResources: 12 | limits: 13 | cpu: 250m 14 | memory: 512M 15 | networking: 16 | public: true 17 | circuitRelays: 0 -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/10-assert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | --- 5 | apiVersion: apps/v1 6 | kind: StatefulSet 7 | metadata: 8 | name: ipfs-cluster-ipfs-sample-1 9 | status: 10 | currentReplicas: 2 11 | readyReplicas: 2 12 | -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/15-test-data-creation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | # imports 5 | source './utils.sh' 6 | 7 | main() { 8 | # get the name of an IPFS Cluster pod 9 | crdName='ipfs-sample-1' 10 | labelValue="ipfs-cluster-${crdName}" 11 | labelName='app.kubernetes.io/name' 12 | ipfsClusterPodName=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[0].metadata.name}') 13 | 14 | # write a file to the ipfs-cluster container in the pod 15 | log "writing a file to ${ipfsClusterPodName}" 16 | kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'echo "hello from ${HOSTNAME} at $(date)" > /tmp/testfile.txt' 17 | myCID=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'ipfs-cluster-ctl add /tmp/testfile.txt' | awk '{print $2}') 18 | 19 | # read the value 20 | ipfsClusterPodname2=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[1].metadata.name}') 21 | log "reading a file from ${ipfsClusterPodname2}" 22 | ipfsCommand="ipfs get --output /tmp/myfile.txt -- ${myCID}" 23 | kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs -- sh -c "${ipfsCommand}" 24 | } 25 | 26 | main -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/15-test-data-creation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: TestStep 3 | apiVersion: kuttl.dev/v1beta1 4 | timeout: 900 5 | commands: 6 | - script: | 7 | /bin/bash ./15-test-data-creation.sh -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/README.md: -------------------------------------------------------------------------------- 1 | # IPFSCluster Test 2 | 3 | This test validates that the operator is capable of creating the amount of required replicas, 4 | and that data can be made available and accessed through IPFS. 5 | 6 | ### Test Steps: 7 | 8 | - 00. Assert the Operator is running 9 | - 05. Create an IPFSCluster resource 10 | - 10. Assert the desired amount of replicas were created 11 | - 15. Test creation on one replica and retrieval from another -------------------------------------------------------------------------------- /test-kuttl/e2e/ipfs/utils.sh: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # logs a message to stdout. 3 | # Globals: 4 | # None. 5 | # Arguments: 6 | # A list of strings to print out. 7 | ######################################## 8 | log() { 9 | input="${*}" 10 | echo '['$(date)"]: ${input}" 11 | } -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/00-create-ipfs-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cluster.ipfs.io/v1alpha1 3 | kind: IpfsCluster 4 | metadata: 5 | name: private-ipfs 6 | spec: 7 | ipfsStorage: 2Gi 8 | clusterStorage: 2Gi 9 | replicas: 2 10 | follows: [] 11 | ipfsResources: 12 | limits: 13 | cpu: 100m 14 | memory: 128M 15 | networking: 16 | public: false 17 | circuitRelays: 1 18 | -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/05-assert-private.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestAssert 4 | collectors: 5 | - type: command 6 | command: kubectl -n "$NAMESPACE" describe all,ipfscluster 7 | - type: command 8 | command: kubectl -n ipfs-operator-system logs deployment/ipfs-operator-controller-manager 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: ipfs-cluster-private-ipfs 14 | status: 15 | currentReplicas: 2 16 | readyReplicas: 2 17 | -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/10-test-data-creation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | # imports 5 | source './utils.sh' 6 | 7 | 8 | 9 | main() { 10 | # get the name of an IPFS Cluster pod 11 | crdName='private-ipfs' 12 | labelValue="ipfs-cluster-${crdName}" 13 | labelName='app.kubernetes.io/name' 14 | ipfsClusterPodName=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[0].metadata.name}') 15 | 16 | # write a file to the ipfs-cluster container in the pod 17 | log "writing a file to ${ipfsClusterPodName}" 18 | kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'echo "hello from ${HOSTNAME} at $(date)" > /tmp/testfile.txt' 19 | myCID=$(kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodName}" -c ipfs-cluster -- sh -c 'ipfs-cluster-ctl add /tmp/testfile.txt' | awk '{print $2}') 20 | 21 | # read the value 22 | ipfsClusterPodname2=$(kubectl get pod -n "${NAMESPACE}" -l "${labelName}=${labelValue}" -o jsonpath='{.items[1].metadata.name}') 23 | log "reading a file from ${ipfsClusterPodname2}" 24 | ipfsCommand="ipfs get --output /tmp/myfile.txt -- ${myCID}" 25 | kubectl exec -n "${NAMESPACE}" "${ipfsClusterPodname2}" -c ipfs -- sh -c "${ipfsCommand}" 26 | } 27 | 28 | main -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/10-test-data-creation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: TestStep 3 | apiVersion: kuttl.dev/v1beta1 4 | timeout: 900 5 | commands: 6 | - script: | 7 | /bin/bash ./10-test-data-creation.sh -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/README.md: -------------------------------------------------------------------------------- 1 | # Private Network Test 2 | 3 | This test validates that the operator is capable of running in private mode. 4 | 5 | ### Test Steps: 6 | 7 | - 00. Create an IPFSCluster resource 8 | - 05. Assert that an IPFSCluster StatefulSet was created. 9 | - 10. Assert that one private node can view another's content. 10 | 11 | ### Still Needs: 12 | - [ ] Test that public nodes aren't able to read private data -------------------------------------------------------------------------------- /test-kuttl/e2e/private-network/utils.sh: -------------------------------------------------------------------------------- 1 | ######################################## 2 | # logs a message to stdout. 3 | # Globals: 4 | # None. 5 | # Arguments: 6 | # A list of strings to print out. 7 | ######################################## 8 | log() { 9 | input="${*}" 10 | echo '['$(date)"]: ${input}" 11 | } -------------------------------------------------------------------------------- /test-kuttl/kuttl-test-release.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: kuttl.dev/v1beta1 4 | kind: TestSuite 5 | testDirs: 6 | - ./e2e 7 | - ./e2e-release 8 | timeout: 600 9 | -------------------------------------------------------------------------------- /test-kuttl/kuttl-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kuttl.dev/v1beta1 3 | kind: TestSuite 4 | testDirs: 5 | - ./e2e-copy 6 | timeout: 120 7 | parallel: 1 8 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | //go:build tools 2 | // +build tools 3 | 4 | package main 5 | 6 | // this file exists so that we can store the dependencies from the 7 | // ginkgo/v2 binary within our go.mod file 8 | import ( 9 | _ "github.com/onsi/ginkgo/v2/ginkgo" 10 | ) 11 | -------------------------------------------------------------------------------- /version.mk: -------------------------------------------------------------------------------- 1 | # This file contains information about versioning that is used when building containers. 2 | 3 | VERSION := 0.0.1 4 | HEAD_HASH := $(shell git rev-parse --short HEAD) 5 | # whether or not this repository has any commits, or this is directly from the commit# 6 | DIRTY := $(shell git diff --quiet || echo '-dirty') 7 | # the full version string, to be consumed by containers 8 | BUILD_VERSION := v$(VERSION)+$(HEAD_HASH)$(DIRTY) 9 | 10 | BUILD_DATE := $(shell date -u '+%Y-%m-%dT%H:%M:%S.%NZ') --------------------------------------------------------------------------------