├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── docker.yml │ ├── helm-oci.yml │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── Dockerfile ├── LICENSE.md ├── README.md ├── SECURITY.md ├── charts └── chisel-operator │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── _helpers.tpl │ ├── crds │ │ ├── exit-node-provisioner.yaml │ │ └── exit-node.yaml │ ├── deployment.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── deploy ├── clusterrole.yaml ├── clusterrolebinding.yaml ├── crd │ ├── exit-node-provisioner.yaml │ ├── exit-node.yaml │ └── kustomization.yaml ├── deployment.yaml ├── kustomization.yaml ├── namespace.yaml └── serviceaccount.yaml ├── example ├── cloud-provider.yaml ├── exit-node-cloud.yaml ├── exit-node.yaml └── whoami.yaml ├── kustomization.yaml ├── site ├── .gitignore ├── README.md ├── astro.config.mjs ├── package.json ├── pnpm-lock.yaml ├── public │ └── favicon.svg ├── src │ ├── assets │ │ └── houston.webp │ ├── content │ │ ├── config.ts │ │ └── docs │ │ │ ├── cloud │ │ │ ├── aws.md │ │ │ ├── digitalocean.md │ │ │ └── linode.md │ │ │ ├── guides │ │ │ ├── exposing-a-service.md │ │ │ ├── installation.md │ │ │ ├── self-host-exit-node.md │ │ │ └── using-cloud-provisioning.md │ │ │ ├── index.mdx │ │ │ └── reference │ │ │ ├── exitnode.md │ │ │ └── exitnodeprovisioner.md │ └── env.d.ts └── tsconfig.json └── src ├── bin └── crdgen.rs ├── cloud ├── aws.rs ├── cloud_init.rs ├── digitalocean.rs ├── linode.rs ├── mod.rs └── pwgen.rs ├── daemon.rs ├── deployment.rs ├── error.rs ├── lib.rs ├── main.rs ├── ops.rs └── util.rs /.dockerignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .env 3 | Dockerfile 4 | *.log -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "cargo" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "npm" # See documentation for possible values 13 | directory: "/site" # Location of package manifests 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | on: 4 | schedule: 5 | - cron: "15 14 * * *" 6 | push: 7 | paths: 8 | - "Dockerfile" 9 | - "src/**" 10 | - "Cargo.lock" 11 | - "Cargo.toml" 12 | - ".dockerignore" 13 | - ".github/workflows/docker.yml" 14 | branches: ["main", "stable"] 15 | # Publish semver tags as releases. 16 | tags: ["v*.*.*"] 17 | pull_request: 18 | branches: ["main"] 19 | workflow_dispatch: 20 | 21 | jobs: 22 | docker: 23 | uses: FyraLabs/actions/.github/workflows/docker.yml@main 24 | with: 25 | publish: ${{ github.event_name != 'pull_request' }} 26 | permissions: 27 | contents: read 28 | packages: write 29 | # This is used to complete the identity challenge 30 | # with sigstore/fulcio when running outside of PRs. 31 | id-token: write 32 | -------------------------------------------------------------------------------- /.github/workflows/helm-oci.yml: -------------------------------------------------------------------------------- 1 | name: Helm OCI 2 | 3 | on: 4 | schedule: 5 | - cron: "15 14 * * *" 6 | push: 7 | paths: 8 | - "charts/**" 9 | - ".github/workflows/helm-oci.yml" 10 | 11 | branches: ["main", "stable"] 12 | # Publish semver tags as releases. 13 | # tags: ["v*.*.*"] 14 | pull_request: 15 | branches: ["main"] 16 | workflow_dispatch: 17 | 18 | jobs: 19 | docker: 20 | if: contains(github.ref, 'refs/tags/v') || contains(github.ref, 'refs/heads/stable') 21 | permissions: 22 | contents: read 23 | packages: write 24 | # This is used to complete the identity challenge 25 | # with sigstore/fulcio when running outside of PRs. 26 | id-token: write 27 | # uses: appany/helm-oci-chart-releaser@v0.3.0 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v2 31 | - uses: appany/helm-oci-chart-releaser@v0.3.0 32 | with: 33 | name: chisel-operator 34 | repository: fyralabs/chisel-operator 35 | tag: 0.1.0 36 | path: charts/chisel-operator 37 | registry: ghcr.io 38 | registry_username: ${{ github.actor }} 39 | registry_password: ${{ secrets.GITHUB_TOKEN }} 40 | # update_dependencies: true 41 | 42 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | jobs: 8 | rust: 9 | permissions: 10 | security-events: write 11 | uses: FyraLabs/actions/.github/workflows/rust.yml@main -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | 3 | 4 | /test 5 | 6 | .env 7 | .vscode/ 8 | *.log -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "chisel-operator" 3 | version = "0.6.0-beta.1" 4 | edition = "2021" 5 | description = "Chisel tunnel operator for Kubernetes" 6 | authors = [ 7 | "Pornpipat 'Cappy Ishihara' Popum ", 8 | "Lleyton Grey ", 9 | ] 10 | categories = ["Network programming", "Configuration"] 11 | license = "MIT" 12 | keywords = [ 13 | "kubernetes", 14 | "tunnel", 15 | "chisel", 16 | "k8s-operator", 17 | "operator", 18 | "http", 19 | "tcp", 20 | "udp", 21 | "networking", 22 | ] 23 | default-run = "chisel-operator" 24 | 25 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 26 | 27 | [dependencies] 28 | kube = { version = "0.98", features = [ 29 | "runtime", 30 | "derive", 31 | "rustls-tls", 32 | "jsonpatch", 33 | "client", 34 | ], default-features = false } 35 | k8s-openapi = { version = "0.24", features = ["v1_32"] } 36 | serde = { version = "1.0.204", features = ["derive"] } 37 | serde_json = "1" 38 | serde_yaml = "0.9" 39 | dotenvy = "0.15" 40 | tokio = { version = "1", features = ["full"] } 41 | tracing = { version = "0.1", features = ["log", "async-await"] } 42 | color-eyre = "0.6" 43 | schemars = "0.8" 44 | futures = "0.3" 45 | thiserror = "1.0" 46 | # pretty_env_logger = "0.5.0" 47 | tracing-subscriber = { version = "0.3", features = ["json", "env-filter"] } 48 | tracing-logfmt = "0.3.4" 49 | uuid = "1.15" 50 | digitalocean-rs = { version = "0.1.9", default-features = false, features = [ 51 | "default-rustls", 52 | ] } 53 | rand = { version = "0.8.5", features = ["log", "serde"] } 54 | async-trait = "0.1.80" 55 | names = "0.14.0" 56 | linode-rs = { version = "0.1.3", default-features = false, features = [ 57 | "default-rustls", 58 | ] } 59 | base64 = "0.22.0" 60 | trait_enum = "0.5.0" 61 | aws-config = { version = "1.1.1", default-features = false, features = [ 62 | "rt-tokio", 63 | "behavior-version-latest", 64 | ] } 65 | aws-sdk-ec2 = { version = "1.13.0", default-features = false, features = [ 66 | "rt-tokio", 67 | "behavior-version-latest", 68 | ] } 69 | aws-sdk-ssm = { version = "1.49.0", default-features = false, features = [ 70 | "rt-tokio", 71 | "behavior-version-latest", 72 | ] } 73 | aws-smithy-runtime = { version = "1.7.8", default-features = false, features = [ 74 | "client", 75 | "connector-hyper-0-14-x", 76 | ] } 77 | hyper-rustls = { version = "0.24.2", features = [ 78 | "http2", 79 | "webpki-roots", 80 | "webpki-tokio", 81 | ] } 82 | itertools = "0.14.0" 83 | # opentelemetry = { version = "0.18.0", features = ["trace", "rt-tokio"] } 84 | # opentelemetry-otlp = { version = "0.11.0", features = ["tokio"] } 85 | # tonic = { version = "0.8.3" } 86 | # tracing-opentelemetry = "0.18.0" 87 | # opentelemetry_api = "0.18.0" 88 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:latest AS build-env 2 | RUN apt update 3 | RUN apt install -y libssl-dev mold 4 | WORKDIR /app 5 | COPY . /app 6 | ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse 7 | ENV RUSTFLAGS="-C link-arg=-B/usr/bin/mold" 8 | # copy build artifact somewhere accessible so we can copy it in the next stage 9 | RUN --mount=type=cache,target=/root/.cargo \ 10 | cargo build --release 11 | 12 | FROM redhat/ubi9-micro:latest 13 | # RUN useradd -u 1001 chisel 14 | USER 1001 15 | COPY --from=build-env --chown=chisel /app/target/release/chisel-operator /usr/bin/chisel-operator 16 | CMD ["chisel-operator"] 17 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Chisel Kubernetes Operator ⚒️ 2 | 3 | Use a VPS (or any other machine) as a reverse proxy for your Kubernetes cluster, without paying the extra 25$ a month! 4 | 5 | This is a Kubernetes operator for Chisel. It allows you to use Chisel as a LoadBalancer provider for your Kubernetes cluster, similar to [inlets-operator](https://github.com/inlets/inlets-operator) 6 | 7 | View the documentation: https://chisel.fyralabs.com 8 | 9 | ## Features 10 | 11 | - Automatic provisioning of exit nodes on cloud providers 12 | - Free and open-source 13 | - TCP and UDP support 14 | - **INFINITE** tunnels! (You can create as many tunnels as you want, as long as you have enough bandwidth) 15 | - Use any machine as an exit node 16 | - Hybrid cloud support (You can use multiple cloud providers at once) 17 | 18 | ## TODO 19 | 20 | - [x] Authentication 21 | - [x] Multiple tunnel services per exit node (so you don't have to pay for multiple VMs) 22 | - [x] Extra configuration options 23 | - [x] TCP/UDP support 24 | - [ ] Multiple IPs per exit node 25 | - [x] Multiple exit nodes support 26 | - [x] Cloud provisioner (like inletsctl/inlets-operator) 27 | 28 | ## Why? 29 | 30 | ### The issue 31 | 32 | If you want to expose a service to the internet, you need a public IP address. 33 | 34 | However, if you're running a cluster inside a NATed network (like a home network), you can't just expose your service to the internet. You need to port forward your service to the internet. This might be fine, but then there's another problem: 35 | 36 | The world's running out of IPv4 addresses. This means that ISPs are starting to charge extra for public IP addresses, and most home networks are locked behind a CGNAT, and requires you to pay extra for a public IP address. 37 | 38 | You could just use an IPv6 address, but most ISPs don't support IPv6 yet, and K8s with IPv6 is kind of a pain to set up. 39 | 40 | > Disclaimer: We are not responsible for any problems arising from abuse of this software. Please proxy responsibly. 41 | > See [LICENSE.md](LICENSE.md) for more details. 42 | 43 | ### The other issue 44 | 45 | You might say, "What about Inlets?" Inlets is a great solution, but it comes with a couple caveats: 46 | 47 | - You need to pay for an inlets PRO license to even use it (It's a proprietary solution) 48 | - You still need to pay for the exit node on top of the inlets PRO license 49 | 50 | ### The solution 51 | 52 | Introducing the Fyra Labs Chisel Operator! This operator provides a replacement for inlets, but free and open-source! 53 | 54 | This operator makes use of the [Chisel] tunnel to expose your services to the internet through SSH. And you can use any machine as an exit node! 55 | 56 | [Chisel]: https://github.com/jpillora/chisel 57 | 58 | Since Chisel routes traffic through SSH, all traffic is encrypted and secure. The Chisel Operator also supports automatic provisioning of exit nodes on cloud providers, so you get basically the same functionality, but free! 59 | 60 | --- 61 | 62 | While this code is free and open-source, we still accept donations! If you really like this project, please consider donating to us on [GitHub Sponsors](https://github.com/sponsors/FyraLabs) :3 63 | 64 | ## How does it work? 65 | 66 | This operator works similarly to inlets-operator. It watches for `LoadBalancer` services, then allocates an exit node's IP address for that service and creates a Chisel client deployment on that node. The Chisel client will then connect to the Chisel server running on the exit node, and the service will be exposed on the exit node's IP address. 67 | 68 | ## Alternatives 69 | 70 | ### SaaS solutions 71 | 72 | - [Cloudflare (Argo) Tunnel](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps) - Cloudflare's solution to this problem. It is free and also open-source, but it only works on Cloudflare-managed domains and requires you to use Cloudflare's DNS service. But it comes with a couple caveats: 73 | - Only HTTP and HTTPS is supported for exposing services. If you want to expose a TCP service, you must connect to it through Cloudflare Tunnel on the client. 74 | - According to Cloudflare's [Terms of Service](https://www.cloudflare.com/terms/), you are not allowed to use Cloudflare's proxies to stream video or audio content. This means that you cannot use Cloudflare Tunnel to expose your Plex or Jellyfin server, or any other media streaming service. This is also the reason I started this project. 75 | - [ngrok](https://ngrok.com/) - ngrok is a proprietary solution that allows you to expose your local service to the internet. It is free to use, but it comes with a couple caveats: 76 | - Only HTTP and HTTPS is supported for exposing services. TCP traffic is supported through a paid plan. 77 | - Limited bandwidth 78 | - Custom domains are only available on a paid plan 79 | 80 | ### Self-hosted solutions 81 | 82 | - Run Chisel manually on your exit node - This is the most straightforward solution. You can simply run Chisel manually on your exit node without using this operator. However, this solution is hard to automate, which is the point of this project. 83 | - [frp](https://github.com/fatedier/frp) - Fast reverse proxy, requires manual configuration of server and client. 84 | - [inlets](https://inlets.dev/) - Bite the bullet and pay for an inlets PRO license. inlets-pro allows you to automatically provision exit nodes on cloud providers, but it is a proprietary solution and requires you to pay a monthly fee. 85 | - [rathole](https://github.com/rapiz1/rathole) - Similar to frp, written in Rust. 86 | 87 | ### VPNs and overlay networks 88 | 89 | - [Tailscale](https://tailscale.com/) - VPN solution that allows you to connect your devices in one big overlay network. Also has Funnel, a reverse proxy solution that allows you to expose your local service to the internet. Self-hostable control plane is available, but default is to use Tailscale's hosted control plane. 90 | - ZeroTier - Similar to Tailscale, Under BSD license, Can connect to multiple networks at once. 91 | 92 | --- 93 | 94 | Find more alternatives [here](https://github.com/anderspitman/awesome-tunneling) 95 | 96 | ## How do I use it? 97 | 98 | There are two ways to use this operator: 99 | 100 | - Manually provision the exit (reverse proxy) node, and let the operator manage the Chisel client deployment 101 | - Let the operator provision exit nodes on a cloud provider of your choice. The operator currently supports the following cloud providers: 102 | - DigitalOcean 103 | - Linode (Currently only on regions with Metadata services) 104 | - AWS 105 | 106 | ## Cluster Installation 107 | Install using the Kustomize config from the stable branch: 108 | 109 | ```bash 110 | kubectl apply -k https://github.com/FyraLabs/chisel-operator?ref=stable 111 | ``` 112 | 113 | Or if you would like to go straight to the latest commit: 114 | 115 | ```bash 116 | kubectl apply -k https://github.com/FyraLabs/chisel-operator 117 | ``` 118 | 119 | A Helm chart is also available as an OCI artifact. You can install it using the following command: 120 | 121 | ```bash 122 | helm install chisel-operator oci://ghcr.io/fyralabs/chisel-operator/chisel-operator 123 | ``` 124 | 125 | ## Usage 126 | 127 | ### Operator-managed exit nodes 128 | 129 | This operator can automatically provision exit nodes on cloud providers. 130 | 131 | To use this feature, you must first create a `ExitNodeProvisioner` resource. This resource contains the configuration for the cloud provider, and the operator will use this resource to provision exit nodes. 132 | 133 | ```yaml 134 | apiVersion: chisel-operator.io/v1 135 | kind: ExitNodeProvisioner 136 | metadata: 137 | name: digitalocean 138 | namespace: default 139 | spec: 140 | # Cloud provider configuration, must be one per resource 141 | # Valid values are DigitalOcean, Linode, AWS 142 | DigitalOcean: 143 | # Reference to a secret containing the DigitalOcean API token 144 | # with key `DIGITALOCEAN_TOKEN` 145 | # Must be in the same namespace as the ExitNodeProvisioner 146 | auth: digitalocean 147 | region: sgp1 148 | ``` 149 | 150 | Now, you can go with one of the two routes: 151 | 152 | #### Automatic provisioning per service 153 | 154 | Chisel Operator can automatically allocate cloud exit nodes for services, 155 | if you set an annotation on a `LoadBalancer` service. 156 | 157 | ```yaml 158 | apiVersion: v1 159 | kind: Service 160 | metadata: 161 | name: whoami 162 | annotations: 163 | # If the provisioner is in another namespace, you need to specify that 164 | # for example, with a namespace of example, the value would be example/digitalocean 165 | chisel-operator.io/exit-node-provisioner: "digitalocean" 166 | spec: 167 | selector: 168 | app: whoami 169 | ports: 170 | - port: 80 171 | targetPort: 80 172 | type: LoadBalancer 173 | ``` 174 | 175 | This will create a new `ExitNode` resource named after the service, and the operator will automatically allocate an exit node for that service. 176 | 177 | This is useful if you want to just allocate an entire exit node for a single service. 178 | 179 | #### Manually-allocated, but operator-managed exit nodes 180 | 181 | You can also manually allocate exit nodes, but still let the operator manage the Chisel client deployment. This is useful if you want to allocate a single exit node for multiple services, in case you're on a budget and don't want to pay for multiple exit nodes for each service. 182 | 183 | To do this, create an `ExitNode` resource with the annotation `chisel-operator.io/exit-node-provisioner` set to the name (and namespace if the provisioner is in a different namespace) of the `ExitNodeProvisioner` resource. 184 | 185 | ```yaml 186 | apiVersion: chisel-operator.io/v1 187 | kind: ExitNode 188 | metadata: 189 | name: my-exit-node 190 | namespace: default 191 | annotations: 192 | # If the provisioner is in another namespace, you need to specify that 193 | # for example, with a namespace of example, the value would be example/digitalocean 194 | chisel-operator.io/exit-node-provisioner: "digitalocean" 195 | spec: 196 | # IP address of exit node 197 | # In this case, we will leave this as a blank string, and let the operator allocate an IP address for us 198 | host: "" 199 | # Control plane socket port 200 | port: 9090 201 | # Name of the secret containing the auth key 202 | # This is not required, but recommended 203 | # If not set, the operator will automatically generate a secret for you 204 | # auth: cloud-test-auth 205 | ``` 206 | 207 | Now, to use this exit node, you can create a `LoadBalancer` service with the annotation `chisel-operator.io/exit-node-name` set to the name of the `ExitNode` resource. 208 | 209 | ```yaml 210 | apiVersion: v1 211 | kind: Service 212 | metadata: 213 | name: whoami 214 | annotations: 215 | chisel-operator.io/exit-node-name: "cloud-test" 216 | spec: 217 | selector: 218 | app: whoami 219 | ports: 220 | - port: 80 221 | targetPort: 80 222 | type: LoadBalancer 223 | ``` 224 | 225 | > NOTE: You can also use this for manually-provisioned exit nodes 226 | 227 | > NOTE: If you do not specify the annotation, the operator will allocate the first available exit node for you. 228 | 229 | ### Provisioning Chisel manually 230 | 231 | > NOTE: You can skip this step if you're using the cloud provisioner. 232 | 233 | To install Chisel, install the Chisel binary on the machine using this script: 234 | 235 | ```bash 236 | curl https://i.jpillora.com/chisel! | bash 237 | ``` 238 | 239 | **OPTIONAL:** You should create a systemd service for Chisel so it can run in the background. Create a file called `/etc/systemd/system/chisel.service` with the following contents: 240 | 241 | ```ini 242 | [Unit] 243 | Description=Chisel Tunnel 244 | Wants=network-online.target 245 | After=network-online.target 246 | StartLimitIntervalSec=0 247 | 248 | [Install] 249 | WantedBy=multi-user.target 250 | 251 | [Service] 252 | Restart=always 253 | RestartSec=1 254 | User=root 255 | # You can add any additional flags here 256 | # This example uses port 9090 for the tunnel socket. `--reverse` is required for our use case. 257 | # `foo:bar` is an example of the authentication credentials. 258 | # The format is `username:password`. 259 | # You may also create an auth file with the `--authfile` flag. 260 | ExecStart=/usr/local/bin/chisel server --port=9090 --reverse --auth foo:bar 261 | ``` 262 | 263 | Then run `systemctl daemon-reload` and `systemctl enable --now chisel.service` to enable and start the service. 264 | 265 | ## Best Practices 266 | 267 | You should always secure your Chisel server with a username and password. You can authenticate to the server by creating a secret in the same namespace as the `ExitNode` with a key called `auth`, and setting the `auth` field in the `ExitNode` to the name of the secret. The secret should be a string of `username:password` in plain text. 268 | 269 | Currently, you should use the public IP address of your exit node as the `host` field in the `ExitNode` resource. This is because the operator currently does not support using a domain name as the `host` field. This will be fixed in the future. 270 | 271 | ### Exposing services 272 | 273 | It is recommended you use an Ingress controller to expose your services. This greatly simplifies the process for exposing other services, as you only need to expose the Ingress controller's HTTP(S) ports. 274 | 275 | ## How do I contribute? 276 | 277 | Feel free to open a pull request or an issue if you have any suggestions or improvements. We're open to any ideas! 278 | 279 | ## Legal 280 | 281 | Fyra Labs disclaims all liability related to usage of chisel-operator. Please proxy responsibly. See [LICENSE.md](LICENSE.md) for additional details. Contact abuse@fyralabs.com with complaints. 282 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Only the latest version is supported. 6 | 7 | ## Reporting a Vulnerability 8 | 9 | Please report vulnerabilities to `security@fyralabs.com` or on GitHub. 10 | -------------------------------------------------------------------------------- /charts/chisel-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/chisel-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: chisel-operator 3 | description: Kubernetes Operator for deploying Chisel reverse proxies 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "v0.6.0-beta.1" 25 | -------------------------------------------------------------------------------- /charts/chisel-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "chisel-operator.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "chisel-operator.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "chisel-operator.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "chisel-operator.labels" -}} 37 | helm.sh/chart: {{ include "chisel-operator.chart" . }} 38 | {{ include "chisel-operator.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "chisel-operator.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "chisel-operator.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "chisel-operator.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "chisel-operator.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /charts/chisel-operator/templates/crds/exit-node-provisioner.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | {{- if .Values.createCrds }} 4 | apiVersion: apiextensions.k8s.io/v1 5 | kind: CustomResourceDefinition 6 | metadata: 7 | name: exitnodeprovisioners.chisel-operator.io 8 | spec: 9 | group: chisel-operator.io 10 | names: 11 | categories: [] 12 | kind: ExitNodeProvisioner 13 | plural: exitnodeprovisioners 14 | shortNames: [] 15 | singular: exitnodeprovisioner 16 | scope: Namespaced 17 | versions: 18 | - additionalPrinterColumns: [] 19 | name: v1 20 | schema: 21 | openAPIV3Schema: 22 | description: Auto-generated derived type for ExitNodeProvisionerSpec via `CustomResource` 23 | properties: 24 | spec: 25 | description: ExitNodeProvisioner is a custom resource that represents a Chisel exit node provisioner on a cloud provider. 26 | oneOf: 27 | - required: 28 | - DigitalOcean 29 | - required: 30 | - Linode 31 | - required: 32 | - AWS 33 | properties: 34 | AWS: 35 | properties: 36 | auth: 37 | description: Reference to a secret containing the AWS access key ID and secret access key, under the `access_key_id` and `secret_access_key` secret keys 38 | type: string 39 | region: 40 | description: Region ID for the AWS region to provision the exit node in See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html 41 | type: string 42 | security_group: 43 | description: Security group name to use for the exit node, uses the default security group if not specified 44 | nullable: true 45 | type: string 46 | size: 47 | default: t2.micro 48 | description: Size for the EC2 instance See https://aws.amazon.com/ec2/instance-types/ 49 | type: string 50 | required: 51 | - auth 52 | - region 53 | type: object 54 | DigitalOcean: 55 | properties: 56 | auth: 57 | description: Reference to a secret containing the DigitalOcean API token, under the `DIGITALOCEAN_TOKEN` secret key 58 | type: string 59 | region: 60 | default: '' 61 | description: Region ID of the DigitalOcean datacenter to provision the exit node in If empty, DigitalOcean will randomly select a region for you, which might not be what you want See https://slugs.do-api.dev/ 62 | type: string 63 | size: 64 | default: s-1vcpu-1gb 65 | description: Size for the DigitalOcean droplet See https://slugs.do-api.dev/ 66 | type: string 67 | ssh_fingerprints: 68 | default: [] 69 | description: SSH key fingerprints to add to the exit node 70 | items: 71 | type: string 72 | type: array 73 | required: 74 | - auth 75 | type: object 76 | Linode: 77 | properties: 78 | auth: 79 | description: Name of the secret containing the Linode API token, under the `LINODE_TOKEN` secret key 80 | type: string 81 | region: 82 | description: Region ID of the Linode datacenter to provision the exit node in See https://api.linode.com/v4/regions 83 | type: string 84 | size: 85 | default: g6-nanode-1 86 | description: Size for the Linode instance See https://api.linode.com/v4/linode/ 87 | type: string 88 | required: 89 | - auth 90 | - region 91 | type: object 92 | type: object 93 | required: 94 | - spec 95 | title: ExitNodeProvisioner 96 | type: object 97 | served: true 98 | storage: true 99 | subresources: {} 100 | 101 | {{- end }} -------------------------------------------------------------------------------- /charts/chisel-operator/templates/crds/exit-node.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.createCrds -}} 2 | 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: exitnodes.chisel-operator.io 7 | spec: 8 | group: chisel-operator.io 9 | names: 10 | categories: [] 11 | kind: ExitNode 12 | plural: exitnodes 13 | shortNames: [] 14 | singular: exitnode 15 | scope: Namespaced 16 | versions: 17 | - additionalPrinterColumns: [] 18 | name: v1 19 | schema: 20 | openAPIV3Schema: 21 | description: Auto-generated derived type for ExitNodeSpec via `CustomResource` 22 | properties: 23 | spec: 24 | description: ExitNode is a custom resource that represents a Chisel exit node. It will be used as the reverse proxy for all services in the cluster. 25 | properties: 26 | auth: 27 | description: Optional authentication secret name to connect to the control plane 28 | nullable: true 29 | type: string 30 | chisel_image: 31 | description: Optional value for the chisel client image used to connect to the chisel server If not provided, jpillora/chisel:latest is used 32 | nullable: true 33 | type: string 34 | default_route: 35 | default: false 36 | description: Optional boolean value for whether to make the exit node the default route for the cluster If true, the exit node will be the default route for the cluster default value is false 37 | type: boolean 38 | external_host: 39 | description: Optional real external hostname/IP of exit node If not provided, the host field will be used 40 | nullable: true 41 | type: string 42 | fingerprint: 43 | description: Optional but highly recommended fingerprint to perform host-key validation against the server's public key 44 | nullable: true 45 | type: string 46 | host: 47 | description: Hostname or IP address of the chisel server 48 | type: string 49 | port: 50 | description: Control plane port of the chisel server 51 | format: uint16 52 | minimum: 0.0 53 | type: integer 54 | required: 55 | - host 56 | - port 57 | type: object 58 | status: 59 | nullable: true 60 | properties: 61 | id: 62 | nullable: true 63 | type: string 64 | ip: 65 | type: string 66 | name: 67 | type: string 68 | provider: 69 | type: string 70 | required: 71 | - ip 72 | - name 73 | - provider 74 | type: object 75 | required: 76 | - spec 77 | title: ExitNode 78 | type: object 79 | served: true 80 | storage: true 81 | subresources: 82 | status: {} 83 | 84 | {{- end -}} -------------------------------------------------------------------------------- /charts/chisel-operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "chisel-operator.fullname" . }} 5 | labels: 6 | name: {{- include "chisel-operator.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount}} 9 | selector: 10 | matchLabels: 11 | {{- include "chisel-operator.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | {{- with .Values.podAnnotations }} 15 | annotations: 16 | {{- toYaml . | nindent 8 }} 17 | {{- end }} 18 | labels: 19 | {{- include "chisel-operator.selectorLabels" . | nindent 8 }} 20 | spec: 21 | {{- if .Values.serviceAccount.create }} 22 | serviceAccountName: {{ include "chisel-operator.serviceAccountName" . }} 23 | automountServiceAccountToken: true # This is required 24 | {{- end }} 25 | {{- with .Values.imagePullSecrets }} 26 | imagePullSecrets: 27 | {{- toYaml . | nindent 8 }} 28 | {{- end }} 29 | containers: 30 | - name: {{ .Chart.Name }} 31 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 32 | imagePullPolicy: {{ .Values.image.pullPolicy }} 33 | resources: 34 | {{- toYaml .Values.resources | nindent 12 }} 35 | 36 | {{- with .Values.nodeSelector }} 37 | nodeSelector: 38 | {{- toYaml . | nindent 8 }} 39 | {{- end }} 40 | {{- with .Values.affinity }} 41 | affinity: 42 | {{- toYaml . | nindent 8 }} 43 | {{- end }} 44 | {{- with .Values.tolerations }} 45 | tolerations: 46 | {{- toYaml . | nindent 8 }} 47 | {{- end }} 48 | 49 | -------------------------------------------------------------------------------- /charts/chisel-operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "chisel-operator.serviceAccountName" . }} 6 | labels: 7 | {{- include "chisel-operator.labels" . | nindent 4 }} 8 | {{- with .Values.serviceAccount.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | --- 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | kind: ClusterRole 15 | metadata: 16 | name: {{ include "chisel-operator.serviceAccountName" . }} 17 | labels: 18 | {{- include "chisel-operator.labels" . | nindent 4 }} 19 | {{- with .Values.serviceAccount.annotations }} 20 | annotations: 21 | {{- toYaml . | nindent 4 }} 22 | {{- end }} 23 | rules: 24 | - apiGroups: ["apps"] 25 | resources: ["deployments", "deployments/*"] 26 | verbs: ["*"] 27 | - apiGroups: [""] 28 | resources: ["services", "services/status", "services/finalizers"] 29 | verbs: ["get", "list", "watch", "update", "patch"] 30 | - apiGroups: ["chisel-operator.io"] 31 | resources: ["*"] 32 | verbs: ["*"] 33 | - apiGroups: [""] 34 | resources: ["secrets"] 35 | verbs: ["get", "list", "create", "update", "patch", "delete"] 36 | 37 | --- 38 | apiVersion: rbac.authorization.k8s.io/v1 39 | kind: ClusterRoleBinding 40 | metadata: 41 | name: {{ include "chisel-operator.serviceAccountName" . }} 42 | labels: 43 | {{- include "chisel-operator.labels" . | nindent 4 }} 44 | {{- with .Values.serviceAccount.annotations }} 45 | annotations: 46 | {{- toYaml . | nindent 4 }} 47 | {{- end }} 48 | subjects: 49 | - kind: ServiceAccount 50 | name: {{ include "chisel-operator.serviceAccountName" . }} 51 | namespace: {{ .Release.Namespace }} 52 | roleRef: 53 | kind: ClusterRole 54 | name: {{ include "chisel-operator.serviceAccountName" . }} 55 | apiGroup: rbac.authorization.k8s.io 56 | 57 | 58 | {{- end }} 59 | -------------------------------------------------------------------------------- /charts/chisel-operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for chisel-operator. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 # Right now only 1 replica is supported 6 | # LeaderElection and multiple replicas may be supported in the future. 7 | # For now, we recommend running only 1 replica else Chisel Operator may constantly 8 | # recreate resources, wasting your API resources and costing you money. 9 | 10 | image: 11 | repository: ghcr.io/fyralabs/chisel-operator 12 | pullPolicy: IfNotPresent 13 | # Overrides the image tag whose default is the chart appVersion. 14 | # tag: 15 | 16 | imagePullSecrets: [] 17 | nameOverride: "" 18 | fullnameOverride: "" 19 | 20 | serviceAccount: 21 | # Specifies whether a service account should be created 22 | create: true 23 | # Annotations to add to the service account 24 | annotations: {} 25 | # The name of the service account to use. 26 | # If not set and create is true, a name is generated using the fullname template 27 | name: "" 28 | 29 | podAnnotations: {} 30 | 31 | 32 | resources: {} 33 | # We usually recommend not to specify default resources and to leave this as a conscious 34 | # choice for the user. This also increases chances charts run on environments with little 35 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 36 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 37 | # limits: 38 | # cpu: 100m 39 | # memory: 128Mi 40 | # requests: 41 | # cpu: 100m 42 | # memory: 128Mi 43 | 44 | nodeSelector: {} 45 | 46 | tolerations: [] 47 | 48 | affinity: {} 49 | 50 | # Create CRDs for Chisel Operator 51 | createCrds: true 52 | -------------------------------------------------------------------------------- /deploy/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: chisel-operator 5 | rules: 6 | - apiGroups: ["apps"] 7 | resources: ["deployments", "deployments/*"] 8 | verbs: ["*"] 9 | - apiGroups: [""] 10 | resources: ["services", "services/status", "services/finalizers"] 11 | verbs: ["get", "list", "watch", "update", "patch"] 12 | - apiGroups: ["chisel-operator.io"] 13 | resources: ["*"] 14 | verbs: ["*"] 15 | - apiGroups: [""] 16 | resources: ["secrets"] 17 | verbs: ["get", "list", "create", "update", "patch", "delete"] 18 | -------------------------------------------------------------------------------- /deploy/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: chisel-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: chisel-operator 8 | namespace: chisel-operator-system 9 | roleRef: 10 | kind: ClusterRole 11 | name: chisel-operator 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /deploy/crd/exit-node-provisioner.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: exitnodeprovisioners.chisel-operator.io 5 | spec: 6 | group: chisel-operator.io 7 | names: 8 | categories: [] 9 | kind: ExitNodeProvisioner 10 | plural: exitnodeprovisioners 11 | shortNames: [] 12 | singular: exitnodeprovisioner 13 | scope: Namespaced 14 | versions: 15 | - additionalPrinterColumns: [] 16 | name: v1 17 | schema: 18 | openAPIV3Schema: 19 | description: Auto-generated derived type for ExitNodeProvisionerSpec via `CustomResource` 20 | properties: 21 | spec: 22 | description: ExitNodeProvisioner is a custom resource that represents a Chisel exit node provisioner on a cloud provider. 23 | oneOf: 24 | - required: 25 | - DigitalOcean 26 | - required: 27 | - Linode 28 | - required: 29 | - AWS 30 | properties: 31 | AWS: 32 | properties: 33 | auth: 34 | description: Reference to a secret containing the AWS access key ID and secret access key, under the `access_key_id` and `secret_access_key` secret keys 35 | type: string 36 | region: 37 | description: Region ID for the AWS region to provision the exit node in See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html 38 | type: string 39 | security_group: 40 | description: Security group name to use for the exit node, uses the default security group if not specified 41 | nullable: true 42 | type: string 43 | size: 44 | default: t2.micro 45 | description: Size for the EC2 instance See https://aws.amazon.com/ec2/instance-types/ 46 | type: string 47 | required: 48 | - auth 49 | - region 50 | type: object 51 | DigitalOcean: 52 | properties: 53 | auth: 54 | description: Reference to a secret containing the DigitalOcean API token, under the `DIGITALOCEAN_TOKEN` secret key 55 | type: string 56 | region: 57 | default: '' 58 | description: Region ID of the DigitalOcean datacenter to provision the exit node in If empty, DigitalOcean will randomly select a region for you, which might not be what you want See https://slugs.do-api.dev/ 59 | type: string 60 | size: 61 | default: s-1vcpu-1gb 62 | description: Size for the DigitalOcean droplet See https://slugs.do-api.dev/ 63 | type: string 64 | ssh_fingerprints: 65 | default: [] 66 | description: SSH key fingerprints to add to the exit node 67 | items: 68 | type: string 69 | type: array 70 | required: 71 | - auth 72 | type: object 73 | Linode: 74 | properties: 75 | auth: 76 | description: Name of the secret containing the Linode API token, under the `LINODE_TOKEN` secret key 77 | type: string 78 | region: 79 | description: Region ID of the Linode datacenter to provision the exit node in See https://api.linode.com/v4/regions 80 | type: string 81 | size: 82 | default: g6-nanode-1 83 | description: Size for the Linode instance See https://api.linode.com/v4/linode/ 84 | type: string 85 | required: 86 | - auth 87 | - region 88 | type: object 89 | type: object 90 | required: 91 | - spec 92 | title: ExitNodeProvisioner 93 | type: object 94 | served: true 95 | storage: true 96 | subresources: {} 97 | -------------------------------------------------------------------------------- /deploy/crd/exit-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: exitnodes.chisel-operator.io 5 | spec: 6 | group: chisel-operator.io 7 | names: 8 | categories: [] 9 | kind: ExitNode 10 | plural: exitnodes 11 | shortNames: [] 12 | singular: exitnode 13 | scope: Namespaced 14 | versions: 15 | - additionalPrinterColumns: [] 16 | name: v1 17 | schema: 18 | openAPIV3Schema: 19 | description: Auto-generated derived type for ExitNodeSpec via `CustomResource` 20 | properties: 21 | spec: 22 | description: ExitNode is a custom resource that represents a Chisel exit node. It will be used as the reverse proxy for all services in the cluster. 23 | properties: 24 | auth: 25 | description: Optional authentication secret name to connect to the control plane 26 | nullable: true 27 | type: string 28 | chisel_image: 29 | description: Optional value for the chisel client image used to connect to the chisel server If not provided, jpillora/chisel:latest is used 30 | nullable: true 31 | type: string 32 | default_route: 33 | default: false 34 | description: Optional boolean value for whether to make the exit node the default route for the cluster If true, the exit node will be the default route for the cluster default value is false 35 | type: boolean 36 | external_host: 37 | description: Optional real external hostname/IP of exit node If not provided, the host field will be used 38 | nullable: true 39 | type: string 40 | fingerprint: 41 | description: Optional but highly recommended fingerprint to perform host-key validation against the server's public key 42 | nullable: true 43 | type: string 44 | host: 45 | description: Hostname or IP address of the chisel server 46 | type: string 47 | port: 48 | description: Control plane port of the chisel server 49 | format: uint16 50 | minimum: 0.0 51 | type: integer 52 | required: 53 | - host 54 | - port 55 | type: object 56 | status: 57 | nullable: true 58 | properties: 59 | id: 60 | nullable: true 61 | type: string 62 | ip: 63 | type: string 64 | name: 65 | type: string 66 | provider: 67 | type: string 68 | required: 69 | - ip 70 | - name 71 | - provider 72 | type: object 73 | required: 74 | - spec 75 | title: ExitNode 76 | type: object 77 | served: true 78 | storage: true 79 | subresources: 80 | status: {} 81 | -------------------------------------------------------------------------------- /deploy/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - exit-node.yaml 3 | - exit-node-provisioner.yaml 4 | -------------------------------------------------------------------------------- /deploy/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: chisel-operator 5 | namespace: chisel-operator-system 6 | labels: 7 | app: chisel-operator 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: chisel-operator 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | app: chisel-operator 17 | spec: 18 | serviceAccountName: chisel-operator 19 | automountServiceAccountToken: true 20 | containers: 21 | - name: chisel-operator 22 | image: ghcr.io/fyralabs/chisel-operator:v0.6.0-beta.1 23 | env: 24 | - name: RUST_LOG 25 | value: "debug" 26 | resources: 27 | requests: 28 | cpu: 100m 29 | memory: 100Mi 30 | limits: 31 | cpu: 100m 32 | memory: 100Mi 33 | -------------------------------------------------------------------------------- /deploy/kustomization.yaml: -------------------------------------------------------------------------------- 1 | 2 | resources: 3 | - ./crd 4 | - namespace.yaml 5 | - clusterrole.yaml 6 | - clusterrolebinding.yaml 7 | - deployment.yaml 8 | - serviceaccount.yaml 9 | -------------------------------------------------------------------------------- /deploy/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: chisel-operator-system 5 | -------------------------------------------------------------------------------- /deploy/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: chisel-operator 5 | namespace: chisel-operator-system 6 | -------------------------------------------------------------------------------- /example/cloud-provider.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chisel-operator.io/v1 2 | kind: ExitNodeProvisioner 3 | metadata: 4 | name: digitalocean 5 | namespace: default 6 | spec: 7 | DigitalOcean: 8 | auth: digitalocean 9 | region: sgp1 -------------------------------------------------------------------------------- /example/exit-node-cloud.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chisel-operator.io/v1 2 | kind: ExitNode 3 | metadata: 4 | name: my-exit-node 5 | namespace: default 6 | annotations: 7 | chisel-operator.io/exit-node-provisioner: "digitalocean" 8 | spec: 9 | # IP address of exit node 10 | host: "" 11 | # Control plane socket port 12 | port: 9090 13 | # Name of the secret containing the auth key 14 | # Create a secret with a key named "auth" and put the value there 15 | # auth: SECRET-NAME 16 | -------------------------------------------------------------------------------- /example/exit-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: chisel-operator.io/v1 2 | kind: ExitNode 3 | metadata: 4 | name: my-exit-node 5 | namespace: default 6 | spec: 7 | # IP address of exit node 8 | host: "192.168.1.1" 9 | # Control plane socket port 10 | port: 9090 11 | # Name of the secret containing the auth key 12 | # Create a secret with a key named "auth" and put the value there 13 | # auth: SECRET-NAME 14 | -------------------------------------------------------------------------------- /example/whoami.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: whoami 5 | namespace: default 6 | # annotations: 7 | # chisel-operator.io/exit-node-name: "my-exit-node" 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: whoami 13 | template: 14 | metadata: 15 | labels: 16 | app: whoami 17 | spec: 18 | containers: 19 | - name: whoami 20 | image: containous/whoami 21 | ports: 22 | - containerPort: 80 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: whoami 28 | # annotations: 29 | # chisel-operator.io/exit-node-name: "my-exit-node" 30 | spec: 31 | selector: 32 | app: whoami 33 | ports: 34 | - port: 80 35 | targetPort: 80 36 | type: LoadBalancer 37 | 38 | -------------------------------------------------------------------------------- /kustomization.yaml: -------------------------------------------------------------------------------- 1 | namespace: chisel-operator-system 2 | 3 | resources: 4 | - ./deploy -------------------------------------------------------------------------------- /site/.gitignore: -------------------------------------------------------------------------------- 1 | # build output 2 | dist/ 3 | # generated types 4 | .astro/ 5 | 6 | # dependencies 7 | node_modules/ 8 | 9 | # logs 10 | npm-debug.log* 11 | yarn-debug.log* 12 | yarn-error.log* 13 | pnpm-debug.log* 14 | 15 | 16 | # environment variables 17 | .env 18 | .env.production 19 | 20 | # macOS-specific files 21 | .DS_Store 22 | -------------------------------------------------------------------------------- /site/README.md: -------------------------------------------------------------------------------- 1 | # Starlight Starter Kit: Basics 2 | 3 | [![Built with Starlight](https://astro.badg.es/v2/built-with-starlight/tiny.svg)](https://starlight.astro.build) 4 | 5 | ``` 6 | npm create astro@latest -- --template starlight 7 | ``` 8 | 9 | [![Open in StackBlitz](https://developer.stackblitz.com/img/open_in_stackblitz.svg)](https://stackblitz.com/github/withastro/starlight/tree/main/examples/basics) 10 | [![Open with CodeSandbox](https://assets.codesandbox.io/github/button-edit-lime.svg)](https://codesandbox.io/p/sandbox/github/withastro/starlight/tree/main/examples/basics) 11 | [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fwithastro%2Fstarlight%2Ftree%2Fmain%2Fexamples%2Fbasics&project-name=my-starlight-docs&repository-name=my-starlight-docs) 12 | 13 | > 🧑‍🚀 **Seasoned astronaut?** Delete this file. Have fun! 14 | 15 | ## 🚀 Project Structure 16 | 17 | Inside of your Astro + Starlight project, you'll see the following folders and files: 18 | 19 | ``` 20 | . 21 | ├── public/ 22 | ├── src/ 23 | │ ├── assets/ 24 | │ ├── content/ 25 | │ │ ├── docs/ 26 | │ │ └── config.ts 27 | │ └── env.d.ts 28 | ├── astro.config.mjs 29 | ├── package.json 30 | └── tsconfig.json 31 | ``` 32 | 33 | Starlight looks for `.md` or `.mdx` files in the `src/content/docs/` directory. Each file is exposed as a route based on its file name. 34 | 35 | Images can be added to `src/assets/` and embedded in Markdown with a relative link. 36 | 37 | Static assets, like favicons, can be placed in the `public/` directory. 38 | 39 | ## 🧞 Commands 40 | 41 | All commands are run from the root of the project, from a terminal: 42 | 43 | | Command | Action | 44 | | :------------------------ | :----------------------------------------------- | 45 | | `npm install` | Installs dependencies | 46 | | `npm run dev` | Starts local dev server at `localhost:4321` | 47 | | `npm run build` | Build your production site to `./dist/` | 48 | | `npm run preview` | Preview your build locally, before deploying | 49 | | `npm run astro ...` | Run CLI commands like `astro add`, `astro check` | 50 | | `npm run astro -- --help` | Get help using the Astro CLI | 51 | 52 | ## 👀 Want to learn more? 53 | 54 | Check out [Starlight’s docs](https://starlight.astro.build/), read [the Astro documentation](https://docs.astro.build), or jump into the [Astro Discord server](https://astro.build/chat). 55 | -------------------------------------------------------------------------------- /site/astro.config.mjs: -------------------------------------------------------------------------------- 1 | import { defineConfig } from "astro/config"; 2 | import starlight from "@astrojs/starlight"; 3 | 4 | // https://astro.build/config 5 | export default defineConfig({ 6 | site: "https://chisel.fyralabs.com", 7 | integrations: [ 8 | starlight({ 9 | title: "Chisel Operator", 10 | editLink: { 11 | baseUrl: "https://github.com/FyraLabs/chisel-operator/edit/main/site/", 12 | }, 13 | social: { 14 | github: "https://github.com/fyralabs/chisel-operator", 15 | discord: "https://discord.com/invite/5fdPuxTg5Q", 16 | matrix: "https://matrix.to/#/#hub:fyralabs.com", 17 | twitter: "https://twitter.com/teamfyralabs", 18 | mastodon: "https://fedi.fyralabs.com/@hq", 19 | }, 20 | head: [ 21 | { 22 | tag: "script", 23 | attrs: { 24 | src: "https://plausible.fyralabs.com/js/script.js", 25 | "data-domain": "chisel.fyralabs.com", 26 | defer: true, 27 | }, 28 | }, 29 | ], 30 | sidebar: [ 31 | { 32 | label: "Guides", 33 | items: [ 34 | { label: "Installation", link: "/guides/installation/" }, 35 | { 36 | label: "Exposing a Service", 37 | link: "/guides/exposing-a-service/", 38 | }, 39 | { 40 | label: "Self Hosting an Exit Node", 41 | link: "/guides/self-host-exit-node/", 42 | }, 43 | { 44 | label: "Using Cloud Provisioning", 45 | link: "/guides/using-cloud-provisioning/", 46 | }, 47 | ], 48 | }, 49 | { 50 | label: "Reference", 51 | autogenerate: { directory: "reference" }, 52 | }, 53 | { 54 | label: "Cloud Provisioning Reference", 55 | autogenerate: { directory: "cloud" }, 56 | }, 57 | ], 58 | }), 59 | ], 60 | }); 61 | -------------------------------------------------------------------------------- /site/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "site", 3 | "type": "module", 4 | "version": "0.0.1", 5 | "scripts": { 6 | "dev": "astro dev", 7 | "start": "astro dev", 8 | "build": "astro check && astro build", 9 | "preview": "astro preview", 10 | "astro": "astro" 11 | }, 12 | "dependencies": { 13 | "@astrojs/starlight": "^0.21.3", 14 | "astro": "^4.16.18", 15 | "sharp": "^0.33.5", 16 | "@astrojs/check": "^0.8.1", 17 | "typescript": "^5.4.4" 18 | } 19 | } -------------------------------------------------------------------------------- /site/public/favicon.svg: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /site/src/assets/houston.webp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FyraLabs/chisel-operator/8d7a157a74be0c0b48c1ac8e2c7da1884b21aef0/site/src/assets/houston.webp -------------------------------------------------------------------------------- /site/src/content/config.ts: -------------------------------------------------------------------------------- 1 | import { defineCollection } from 'astro:content'; 2 | import { docsSchema, i18nSchema } from '@astrojs/starlight/schema'; 3 | 4 | export const collections = { 5 | docs: defineCollection({ schema: docsSchema() }), 6 | i18n: defineCollection({ type: 'data', schema: i18nSchema() }), 7 | }; 8 | -------------------------------------------------------------------------------- /site/src/content/docs/cloud/aws.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: AWS 3 | description: A guide of how to install Chisel Operator. 4 | --- 5 | 6 | ## Fields 7 | 8 | | path | type | description | 9 | | -------------- | -------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | 10 | | auth | string | Reference to a secret containing the AWS access key ID and secret access key, under the `access_key_id` and `secret_access_key` secret keys | 11 | | region | string | Region ID for the AWS region to provision the exit node in. See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html | 12 | | security_group | string? | Security group name to use for the exit node, uses the default security group if not specified | 13 | | size | string? = "t2.micro" | Size for the EC2 instance. See https://aws.amazon.com/ec2/instance-types/ | 14 | 15 | ## Examples 16 | 17 | ```yaml 18 | apiVersion: chisel-operator.io/v1 19 | kind: ExitNodeProvisioner 20 | metadata: 21 | name: aws-provisioner 22 | namespace: default 23 | spec: 24 | AWS: 25 | auth: aws-auth 26 | region: us-east-1 27 | --- 28 | apiVersion: v1 29 | kind: Secret 30 | metadata: 31 | name: aws-auth 32 | namespace: default 33 | type: Opaque 34 | stringData: 35 | access_key_id: xxxxx 36 | secret_access_key: xxxxx 37 | ``` 38 | -------------------------------------------------------------------------------- /site/src/content/docs/cloud/digitalocean.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: DigitalOcean 3 | description: A guide of how to install Chisel Operator. 4 | --- 5 | 6 | ## Fields 7 | 8 | | path | type | description | 9 | | ---------------- | ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 10 | | auth | string | Reference to a secret containing the DigitalOcean API token, under the `DIGITALOCEAN_TOKEN` secret key | 11 | | region | string? | Region ID of the DigitalOcean datacenter to provision the exit node in. If empty, DigitalOcean will randomly select a region for you, which might not be what you want. See https://slugs.do-api.dev/ | 12 | | size | string? = "s-1vcpu-1gb" | Size for the DigitalOcean droplet. See https://slugs.do-api.dev/ | 13 | | ssh_fingerprints | string[]? = [] | SSH key fingerprints to add to the exit node | 14 | 15 | ## Examples 16 | 17 | ```yaml 18 | apiVersion: chisel-operator.io/v1 19 | kind: ExitNodeProvisioner 20 | metadata: 21 | name: digitalocean-provisioner 22 | namespace: default 23 | spec: 24 | DigitalOcean: 25 | auth: digitalocean-auth 26 | region: nyc2 27 | --- 28 | apiVersion: v1 29 | kind: Secret 30 | metadata: 31 | name: digitalocean-auth 32 | namespace: default 33 | type: Opaque 34 | stringData: 35 | DIGITALOCEAN_TOKEN: xxxxx 36 | ``` 37 | -------------------------------------------------------------------------------- /site/src/content/docs/cloud/linode.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Linode 3 | description: A guide of how to install Chisel Operator. 4 | --- 5 | 6 | ## Fields 7 | 8 | | path | type | description | 9 | | ------ | ----------------------- | ------------------------------------------------------------------------------------------------------- | 10 | | auth | string | Name of the secret containing the Linode API token, under the `LINODE_TOKEN` secret key | 11 | | region | string | Region ID of the Linode datacenter to provision the exit node in. See https://api.linode.com/v4/regions | 12 | | size | string? = "g6-nanode-1" | Size for the Linode. instance See https://api.linode.com/v4/linode/ | 13 | 14 | ## Examples 15 | 16 | ```yaml 17 | apiVersion: chisel-operator.io/v1 18 | kind: ExitNodeProvisioner 19 | metadata: 20 | name: linode-provisioner 21 | namespace: default 22 | spec: 23 | Linode: 24 | auth: linode-auth 25 | region: us-east 26 | ``` 27 | 28 | ```yaml 29 | apiVersion: chisel-operator.io/v1 30 | kind: ExitNodeProvisioner 31 | metadata: 32 | name: linode-provisioner 33 | namespace: default 34 | spec: 35 | Linode: 36 | auth: linode-auth 37 | region: us-east 38 | --- 39 | apiVersion: v1 40 | kind: Secret 41 | metadata: 42 | name: linode-auth 43 | namespace: default 44 | type: Opaque 45 | stringData: 46 | LINODE_TOKEN: xxxxx 47 | ``` 48 | -------------------------------------------------------------------------------- /site/src/content/docs/guides/exposing-a-service.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Exposing a Service 3 | description: A guide of how to get started with Chisel Operator. 4 | --- 5 | 6 | Once you have a `ExitNode` or `ExitNodeProvisioner` set up in your cluster, you're ready to begin exposing services! 7 | 8 | Here's an example service: 9 | 10 | ```yaml 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: whoami 15 | # annotations: 16 | # chisel-operator.io/exit-node-name: "my-exit-node" 17 | spec: 18 | selector: 19 | app: whoami 20 | ports: 21 | - port: 80 22 | targetPort: 80 23 | type: LoadBalancer 24 | ``` 25 | 26 | As you can see, the type of this service is `LoadBalancer`, which is required for chisel-operator to pick up on the service. 27 | Note that Chisel Operator acts on all LoadBalancer services in the cluster by default. 28 | 29 | Additionally, there's also a commented out annotation, `chisel-operator.io/exit-node-name`. 30 | By default, Chisel Operator will automatically select a random, unused `ExitNode` on the cluster if a cloud provisioner or exit node annotation is not set. 31 | If you'd like to force the service to a particular exit node, you can uncomment out the annotation, setting it to the name of the `ExitNode` to target. 32 | 33 | > As of Chisel Operator 0.4.0, you can now force multiple services to use the same exit node by setting the `chisel-operator.io/exit-node-name` annotation to the same value on each service, this can be useful by allowing you to group services together on the same exit node, saving resources by only running one exit node for multiple services. 34 | 35 | 36 | 37 | Let's look at another example, this time using the automatic cloud provisioning functionality: 38 | 39 | ```yaml 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: whoami 44 | annotations: 45 | chisel-operator.io/exit-node-provisioner: "my-do-provisioner" 46 | spec: 47 | selector: 48 | app: whoami 49 | ports: 50 | - port: 80 51 | targetPort: 80 52 | type: LoadBalancer 53 | ``` 54 | 55 | The only difference in the cloud case is the `chisel-operator.io/exit-node-provisioner` annotation, pointing to the name of the `ExitNodeProvisioner` resource you would like to use. 56 | 57 | Chisel Operator will automatically use the specified provisioner to create a server in configured cloud, populating and managing a corresponding `ExitNode` resource in your cluster, which gets assigned to this service. 58 | 59 | Please note that if the provisioner is in a different namespace than the service resource, you'll have to specify that in the annotation value. 60 | For example, if the provisioner is in the `testing` namespace and has the name `my-do-provisioner`, the annotation value would be: `testing/my-do-provisioner`. 61 | 62 | That's all for now! 63 | -------------------------------------------------------------------------------- /site/src/content/docs/guides/installation.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installation 3 | description: A guide of how to install Chisel Operator. 4 | --- 5 | 6 | Getting started with Chisel Operator is easy! We currently support the current (plus the last two versions) of Kubernetes. Kustomize is only supported at the moment, but a Helm chart is in progress. 7 | 8 | ## Kustomize 9 | Install using the Kustomize config from the stable branch: 10 | 11 | ```bash 12 | kubectl apply -k https://github.com/FyraLabs/chisel-operator?ref=stable 13 | ``` 14 | 15 | Or if you would like to go straight to the latest commit: 16 | 17 | ```bash 18 | kubectl apply -k https://github.com/FyraLabs/chisel-operator 19 | ``` 20 | 21 | ## Helm 22 | 23 | To install using Helm, you can use the Chisel Operator Helm chart from the OCI registry: 24 | 25 | ```bash 26 | helm install chisel-operator oci://ghcr.io/fyralabs/chisel-operator/chisel-operator 27 | ``` 28 | 29 | You can configure the helm chart values by creating a `values.yaml` file and passing it to the `helm install` command: 30 | 31 | ```bash 32 | helm install chisel-operator oci://ghcr.io/fyralabs/chisel-operator/chisel-operator -f values.yaml 33 | ``` 34 | 35 | See [the Helm chart directory](https://github.com/FyraLabs/chisel-operator/tree/main/charts/chisel-operator) for more information on the Helm chart. 36 | -------------------------------------------------------------------------------- /site/src/content/docs/guides/self-host-exit-node.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Self-hosting an Exit Node 3 | description: A guide of how you can self-host your own Chisel exit node. 4 | --- 5 | 6 | First, you'll need a machine where you can run [Chisel](https://github.com/jpillora/chisel), the software that Chisel Operator uses to tunnel to your server. 7 | We assume that you're running a Linux distribution with systemd. 8 | 9 | To install Chisel, you can use your distribution's Chisel package or the official install script. 10 | For the sake of this guide, we'll be using the install script: 11 | 12 | ```bash 13 | curl https://i.jpillora.com/chisel! | bash 14 | ``` 15 | 16 | You'll probably want to make a systemd service to manage the Chisel process. 17 | On the system, you can create a file called `/etc/systemd/system/chisel.service` with the following content: 18 | 19 | ```ini 20 | [Unit] 21 | Description=Chisel Tunnel 22 | Wants=network-online.target 23 | After=network-online.target 24 | StartLimitIntervalSec=0 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | 29 | [Service] 30 | Restart=always 31 | RestartSec=1 32 | User=root 33 | # You can add any additional flags here 34 | # This example uses port 9090 for the tunnel socket. `--reverse` is required for our use case. 35 | # `foo:bar` is an example of the authentication credentials. 36 | # The format is `username:password`. 37 | # You may also create an auth file with the `--authfile` flag. 38 | ExecStart=/usr/local/bin/chisel server --port=9090 --reverse --auth foo:bar 39 | ``` 40 | 41 | Then run `systemctl daemon-reload` and `systemctl enable --now chisel.service` to enable and start the service. The Chisel server will be accessible on all addresses on port `9000`, although, you may need to configure your firewall settings to allow this. 42 | 43 | Now, we can finally let Chisel Operator know about our exit node, by creating a corresponding `ExitNode` resource: 44 | 45 | ```yaml 46 | apiVersion: chisel-operator.io/v1 47 | kind: ExitNode 48 | metadata: 49 | name: my-exit-node 50 | namespace: default 51 | spec: 52 | # IP address of exit node 53 | host: "192.168.1.1" # Set to the public IP of your exit node! 54 | # Control plane socket port 55 | port: 9090 56 | # Name of the secret containing the auth key 57 | # Create a secret with a key named "auth" and put the value there 58 | auth: my-exit-node-secret 59 | ``` 60 | 61 | We'll also need to create a secret with our credentials: 62 | 63 | ```yaml 64 | apiVersion: v1 65 | kind: Secret 66 | metadata: 67 | name: my-exit-node-secret 68 | namespace: default 69 | type: Opaque 70 | stringData: 71 | auth: user:password 72 | ``` 73 | 74 | And congratulations, you're ready to start tunneling services! That wasn't too hard, was it? 75 | -------------------------------------------------------------------------------- /site/src/content/docs/guides/using-cloud-provisioning.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Using Cloud Provisioning 3 | description: A guide of how you can use cloud provisioning to automatically setup exit nodes. 4 | --- 5 | 6 | Chisel Operator makes it easy to integrate into your preferred cloud provider using our exit node provisioning functionality. 7 | Let's look at an example of how this works. 8 | 9 | First, we'll want to create an `ExitNodeProvisioner` resource. 10 | For this guide, I'll be using DigitalOcean, but if you'd like to use a different provider, please look at the reference for the provisioner's config: 11 | 12 | ```yaml 13 | apiVersion: chisel-operator.io/v1 14 | kind: ExitNodeProvisioner 15 | metadata: 16 | name: digitalocean 17 | namespace: default 18 | spec: 19 | DigitalOcean: 20 | auth: digitalocean-auth 21 | region: sgp1 22 | ``` 23 | 24 | Most provisioners will also require a form of authentication. 25 | In the case of DigitalOcean, you need a personal access token with read/write permissions, which can be created in the API tab of the dashboard. 26 | 27 | Next, we'll create a secret with our token, using the secret key expected for the provisioner, in this case `DIGITALOCEAN_TOKEN`: 28 | 29 | ```yaml 30 | apiVersion: v1 31 | kind: Secret 32 | metadata: 33 | name: digitalocean-auth 34 | namespace: default 35 | type: Opaque 36 | stringData: 37 | DIGITALOCEAN_TOKEN: xxxxx 38 | ``` 39 | 40 | And, that's it, we're ready for provisioning! 41 | -------------------------------------------------------------------------------- /site/src/content/docs/index.mdx: -------------------------------------------------------------------------------- 1 | --- 2 | title: Chisel Operator 3 | description: The easiest way to tunnel any Kubernetes cluster to the internet. 4 | template: splash 5 | hero: 6 | tagline: The easiest way to tunnel any Kubernetes cluster to the internet. 7 | # image: 8 | # file: ../../assets/houston.webp 9 | actions: 10 | - text: Get Started 11 | link: /guides/installation 12 | icon: right-arrow 13 | variant: primary 14 | - text: Join our community 15 | link: https://discord.com/invite/5fdPuxTg5Q 16 | icon: external 17 | --- 18 | 19 | import { Card, CardGrid } from "@astrojs/starlight/components"; 20 | 21 | {/* ## Next steps */} 22 | 23 | 24 | 25 | Chisel Operator integrates with your favorite cloud provider to 26 | automatically create and manage tunnels. 27 | 28 | 29 | Chisel Operator is proudly libre and open source software licensed under the 30 | GPLv3. 31 | 32 | 33 | Once the initial setup is complete, Chisel Operator handles the rest. 34 | 35 | 36 | Whether you're behind CGNAT or an enterprise network, Chisel Operator works 37 | in any enviroment. 38 | 39 | 40 | -------------------------------------------------------------------------------- /site/src/content/docs/reference/exitnode.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: ExitNode 3 | description: A reference page about the ExitNode resource. 4 | --- 5 | 6 | An `ExitNode` is a resource representing a Chisel exit node that the operator can use for tunneling. 7 | It contains the configuration required to connect to the remote Chisel server. 8 | 9 | ## Fields 10 | 11 | | path | type | description | 12 | | ------------- | ---------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 13 | | auth | string? | Optional authentication secret name to connect to the control plane | 14 | | chisel_image | string? | Optional value for the chisel client image used to connect to the chisel server If not provided, jpillora/chisel:latest is used | 15 | | default_route | boolean? = false | Optional boolean value for whether to make the exit node the default route for the cluster If true, the exit node will be the default route for the cluster default value is false | 16 | | external_host | string? | Optional real external hostname/IP of exit node If not provided, the host field will be used | 17 | | fingerprint | string? | Optional but highly recommended fingerprint to perform host-key validation against the server's public key | 18 | | host | string | Hostname or IP address of the chisel server | 19 | | port | uint16 | Control plane port of the chisel server | 20 | 21 | ## Annotations 22 | 23 | | name | description | 24 | | ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 25 | | chisel-operator.io/exit-node-provisioner | The exit node provisioner to use to provision this node. Example: "default/my-exit-node-provisioner". Most users won't need to use this unless they want to pre-provision nodes. | 26 | 27 | ## Examples 28 | 29 | ```yaml 30 | apiVersion: chisel-operator.io/v1 31 | kind: ExitNode 32 | metadata: 33 | name: my-exit-node 34 | namespace: default 35 | spec: 36 | # IP address of exit node 37 | host: "192.168.1.1" 38 | # Control plane socket port 39 | port: 9090 40 | # Name of the secret containing the auth key 41 | # Create a secret with a key named "auth" and put the value there 42 | # auth: SECRET-NAME 43 | ``` 44 | 45 | ```yaml 46 | apiVersion: chisel-operator.io/v1 47 | kind: ExitNode 48 | metadata: 49 | name: my-exit-node 50 | namespace: default 51 | annotations: 52 | chisel-operator.io/exit-node-provisioner: "digitalocean" 53 | spec: 54 | # IP address of exit node 55 | host: "" 56 | # Control plane socket port 57 | port: 9090 58 | # Name of the secret containing the auth key 59 | # Create a secret with a key named "auth" and put the value there 60 | # This value is now required for security reasons, if there's no secret 61 | # Chisel Operator will fail. 62 | auth: SECRET-NAME 63 | ``` 64 | -------------------------------------------------------------------------------- /site/src/content/docs/reference/exitnodeprovisioner.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: ExitNodeProvisioner 3 | description: A reference page about the ExitNodeProvsioner resource. 4 | --- 5 | 6 | An `ExitNodeProvisioner` is a resource representing an external provider that Chisel Operator can use to automatically provision exit nodes. 7 | It contains the configuration required to provision nodes on the external provider. 8 | 9 | ## Fields 10 | 11 | The fields of the `ExitNodeProvisioner` are dependent on the provider you would like to use. 12 | To see the fields for a provisioner, please see the provider-specific documentation. 13 | 14 | ## Examples 15 | 16 | ```yaml 17 | apiVersion: chisel-operator.io/v1 18 | kind: ExitNodeProvisioner 19 | metadata: 20 | name: digitalocean-provisioner 21 | namespace: default 22 | spec: 23 | DigitalOcean: 24 | auth: digitalocean-auth 25 | region: sgp1 26 | ``` 27 | 28 | ```yaml 29 | apiVersion: chisel-operator.io/v1 30 | kind: ExitNodeProvisioner 31 | metadata: 32 | name: linode-provider 33 | namespace: default 34 | spec: 35 | Linode: 36 | auth: linode-auth 37 | region: us-east 38 | ``` 39 | -------------------------------------------------------------------------------- /site/src/env.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | -------------------------------------------------------------------------------- /site/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "astro/tsconfigs/strictest" 3 | } -------------------------------------------------------------------------------- /src/bin/crdgen.rs: -------------------------------------------------------------------------------- 1 | use chisel_operator::ops; 2 | use kube::CustomResourceExt; 3 | use std::fs::File; 4 | use std::io::prelude::*; 5 | 6 | // todo: Make this a cargo xtask, maybe? 7 | 8 | fn main() { 9 | let mut exit_node_provisioner = File::create("deploy/crd/exit-node-provisioner.yaml").unwrap(); 10 | exit_node_provisioner 11 | .write_all( 12 | serde_yaml::to_string(&ops::ExitNodeProvisioner::crd()) 13 | .unwrap() 14 | .as_bytes(), 15 | ) 16 | .unwrap(); 17 | 18 | let mut exit_node = File::create("deploy/crd/exit-node.yaml").unwrap(); 19 | exit_node 20 | .write_all( 21 | serde_yaml::to_string(&ops::ExitNode::crd()) 22 | .unwrap() 23 | .as_bytes(), 24 | ) 25 | .unwrap(); 26 | } 27 | -------------------------------------------------------------------------------- /src/cloud/aws.rs: -------------------------------------------------------------------------------- 1 | use super::{cloud_init::generate_cloud_init_config, Provisioner}; 2 | use crate::{ 3 | cloud::CHISEL_PORT, 4 | ops::{parse_provisioner_label_value, ExitNode, ExitNodeStatus, EXIT_NODE_PROVISIONER_LABEL}, 5 | }; 6 | use async_trait::async_trait; 7 | use aws_config::{BehaviorVersion, Region}; 8 | use aws_sdk_ec2::types::{Tag, TagSpecification}; 9 | use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder; 10 | use base64::Engine; 11 | use color_eyre::eyre::anyhow; 12 | use k8s_openapi::api::core::v1::Secret; 13 | use kube::ResourceExt; 14 | use schemars::JsonSchema; 15 | use serde::{Deserialize, Serialize}; 16 | use tracing::{debug, warn}; 17 | 18 | const DEFAULT_SIZE: &str = "t2.micro"; 19 | const UBUNTU_AMI_SSM_KEY: &str = 20 | "/aws/service/canonical/ubuntu/server/24.04/stable/current/amd64/hvm/ebs-gp2/ami-id"; 21 | 22 | fn default_size() -> String { 23 | String::from(DEFAULT_SIZE) 24 | } 25 | 26 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 27 | pub struct AWSProvisioner { 28 | /// Reference to a secret containing the AWS access key ID and secret access key, under the `access_key_id` and `secret_access_key` secret keys 29 | pub auth: String, 30 | /// Region ID for the AWS region to provision the exit node in 31 | /// See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html 32 | pub region: String, 33 | /// Security group name to use for the exit node, uses the default security group if not specified 34 | pub security_group: Option, 35 | /// Size for the EC2 instance 36 | /// See https://aws.amazon.com/ec2/instance-types/ 37 | #[serde(default = "default_size")] 38 | pub size: String, 39 | } 40 | 41 | #[derive(Serialize, Deserialize, Debug, Clone)] 42 | pub struct AWSIdentity { 43 | access_key_id: String, 44 | secret_access_key: String, 45 | pub region: String, 46 | } 47 | 48 | impl AWSIdentity { 49 | pub fn new(access_key_id: String, secret_access_key: String, region: String) -> Self { 50 | Self { 51 | access_key_id, 52 | secret_access_key, 53 | region, 54 | } 55 | } 56 | /// Generate an AWS config from the access key ID and secret access key 57 | pub async fn generate_aws_config(self) -> color_eyre::Result { 58 | // We use our own hyper client and TLS config in order to use webpki-roots instead of the system ones 59 | // This let's us use a sane set of root certificates instead of the ones that come with the OS 60 | let tls_connector = hyper_rustls::HttpsConnectorBuilder::new() 61 | .with_webpki_roots() 62 | .https_only() 63 | .enable_http1() 64 | .enable_http2() 65 | .build(); 66 | 67 | let hyper_client = HyperClientBuilder::new().build(tls_connector); 68 | 69 | // set access key id and secret access key as environment variables 70 | std::env::set_var("AWS_ACCESS_KEY_ID", &self.access_key_id); 71 | std::env::set_var("AWS_SECRET_ACCESS_KEY", &self.secret_access_key); 72 | let region: String = self.region.clone(); 73 | Ok(aws_config::defaults(BehaviorVersion::latest()) 74 | .region(Region::new(region)) 75 | .http_client(hyper_client) 76 | .load() 77 | .await) 78 | } 79 | 80 | // This code is very unholy, but thanks to Jeff Bezos for making the AWS SDK so complicated 81 | pub fn from_secret(secret: &Secret, region: String) -> color_eyre::Result { 82 | let aws_access_key_id: String = secret 83 | .data 84 | .as_ref() 85 | .and_then( 86 | |f: &std::collections::BTreeMap| { 87 | f.get("AWS_ACCESS_KEY_ID") 88 | }, 89 | ) 90 | .ok_or_else(|| anyhow!("AWS_ACCESS_KEY_ID not found in secret")) 91 | // into String 92 | .and_then(|key| String::from_utf8(key.clone().0.to_vec()).map_err(|e| e.into()))?; 93 | 94 | let aws_secret_access_key: String = secret 95 | .data 96 | .as_ref() 97 | .and_then(|f| f.get("AWS_SECRET_ACCESS_KEY")) 98 | .ok_or_else(|| anyhow!("AWS_SECRET_ACCESS_KEY not found in secret")) 99 | .and_then(|key| String::from_utf8(key.clone().0.to_vec()).map_err(|e| e.into()))?; 100 | 101 | Ok(Self { 102 | access_key_id: aws_access_key_id, 103 | secret_access_key: aws_secret_access_key, 104 | region, 105 | }) 106 | } 107 | } 108 | 109 | #[async_trait] 110 | impl Provisioner for AWSProvisioner { 111 | async fn create_exit_node( 112 | &self, 113 | auth: Secret, 114 | exit_node: ExitNode, 115 | node_password: String, 116 | ) -> color_eyre::Result { 117 | let provisioner = exit_node 118 | .metadata 119 | .annotations 120 | .as_ref() 121 | .and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL)) 122 | .ok_or_else(|| { 123 | anyhow!( 124 | "No provisioner found in annotations for exit node {}", 125 | exit_node.metadata.name.as_ref().unwrap() 126 | ) 127 | })?; 128 | 129 | let cloud_init_config = generate_cloud_init_config(&node_password, CHISEL_PORT); 130 | let user_data = base64::engine::general_purpose::STANDARD.encode(cloud_init_config); 131 | 132 | let aws_api: aws_config::SdkConfig = AWSIdentity::from_secret(&auth, self.region.clone())? 133 | .generate_aws_config() 134 | .await?; 135 | 136 | let ssm_client = aws_sdk_ssm::Client::new(&aws_api); 137 | let parameter_response = ssm_client 138 | .get_parameter() 139 | .name(UBUNTU_AMI_SSM_KEY) 140 | .send() 141 | .await?; 142 | let ami = parameter_response.parameter.unwrap().value.unwrap(); 143 | 144 | let ec2_client = aws_sdk_ec2::Client::new(&aws_api); 145 | 146 | let current_namespace = exit_node.namespace().unwrap(); 147 | let (_provisioner_namespace, provsioner_name) = 148 | parse_provisioner_label_value(¤t_namespace, provisioner); 149 | 150 | let name = format!( 151 | "{}-{}", 152 | provsioner_name, 153 | exit_node.metadata.name.as_ref().unwrap() 154 | ); 155 | 156 | let tag_specification = TagSpecification::builder() 157 | .resource_type("instance".into()) 158 | .tags(Tag::builder().key("Name").value(name.clone()).build()) 159 | .build(); 160 | 161 | let mut instance_builder = ec2_client 162 | .run_instances() 163 | .tag_specifications(tag_specification) 164 | .image_id(ami) 165 | .instance_type(self.size.as_str().into()) 166 | .min_count(1) 167 | .max_count(1) 168 | .user_data(&user_data); 169 | 170 | if let Some(security_group) = &self.security_group { 171 | instance_builder = instance_builder.security_group_ids(security_group); 172 | } 173 | 174 | let instance_response = instance_builder.send().await?; 175 | 176 | let instance = instance_response 177 | .instances 178 | .unwrap() 179 | .into_iter() 180 | .next() 181 | .unwrap(); 182 | 183 | // TODO: Refactor this to run on a reconcile update instead 184 | let public_ip = loop { 185 | let describe_response = ec2_client 186 | .describe_instances() 187 | .instance_ids(instance.instance_id.clone().unwrap()) 188 | .send() 189 | .await?; 190 | let reservation = describe_response 191 | .reservations 192 | .unwrap() 193 | .into_iter() 194 | .next() 195 | .unwrap(); 196 | let instance = reservation.instances.unwrap().into_iter().next().unwrap(); 197 | 198 | debug!(?instance, "Getting instance data"); 199 | 200 | if let Some(ip) = instance.public_ip_address { 201 | break ip; 202 | } else { 203 | warn!("Waiting for instance to get IP address"); 204 | tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; 205 | } 206 | }; 207 | 208 | // let exit_node = ExitNodeStatus { 209 | // name: name.clone(), 210 | // ip: public_ip, 211 | // id: Some(instance.instance_id.unwrap()), 212 | // provider: provisioner.clone(), 213 | // service_binding: vec![], 214 | // }; 215 | let exit_node = ExitNodeStatus::new( 216 | provisioner.clone(), 217 | name.clone(), 218 | public_ip, 219 | // needless conversion? 220 | // todo: Clean this up, minor performance hit 221 | instance.instance_id, 222 | ); 223 | 224 | Ok(exit_node) 225 | } 226 | 227 | async fn update_exit_node( 228 | &self, 229 | auth: Secret, 230 | exit_node: ExitNode, 231 | node_password: String, 232 | ) -> color_eyre::Result { 233 | let aws_api: aws_config::SdkConfig = AWSIdentity::from_secret(&auth, self.region.clone())? 234 | .generate_aws_config() 235 | .await?; 236 | let ec2_client = aws_sdk_ec2::Client::new(&aws_api); 237 | 238 | let node = exit_node.clone(); 239 | 240 | if let Some(ref status) = &exit_node.status { 241 | let instance_id = status.id.as_ref().ok_or_else(|| { 242 | anyhow!( 243 | "No instance ID found in status for exit node {}", 244 | node.metadata.name.as_ref().unwrap() 245 | ) 246 | })?; 247 | 248 | let describe_response = ec2_client 249 | .describe_instances() 250 | .instance_ids(instance_id) 251 | .send() 252 | .await?; 253 | let reservation = describe_response 254 | .reservations 255 | .unwrap() 256 | .into_iter() 257 | .next() 258 | .unwrap(); 259 | let instance = reservation.instances.unwrap().into_iter().next().unwrap(); 260 | 261 | let mut status = status.clone(); 262 | 263 | if let Some(ip) = instance.public_ip_address { 264 | status.ip = ip; 265 | } 266 | 267 | Ok(status) 268 | } else { 269 | warn!("No status found for exit node, creating new instance"); 270 | // TODO: this should be handled by the controller logic 271 | return self.create_exit_node(auth, exit_node, node_password).await; 272 | } 273 | } 274 | 275 | async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> { 276 | let aws_api: aws_config::SdkConfig = AWSIdentity::from_secret(&auth, self.region.clone())? 277 | .generate_aws_config() 278 | .await?; 279 | let ec2_client = aws_sdk_ec2::Client::new(&aws_api); 280 | 281 | let instance_id = exit_node 282 | .status 283 | .as_ref() 284 | .and_then(|status| status.id.as_ref()); 285 | 286 | if let Some(instance_id) = instance_id { 287 | ec2_client 288 | .terminate_instances() 289 | .instance_ids(instance_id) 290 | .send() 291 | .await?; 292 | } 293 | 294 | Ok(()) 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /src/cloud/cloud_init.rs: -------------------------------------------------------------------------------- 1 | pub fn generate_cloud_init_config(auth_string: &str, port: u16) -> String { 2 | let cloud_config = serde_json::json!({ 3 | "runcmd": ["curl https://i.jpillora.com/chisel! | bash", "systemctl enable --now chisel"], 4 | "write_files": [{ 5 | "path": "/etc/systemd/system/chisel.service", 6 | "content": format!(r#" 7 | [Unit] 8 | Description=Chisel Tunnel 9 | Wants=network-online.target 10 | After=network-online.target 11 | StartLimitIntervalSec=0 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | [Service] 17 | Restart=always 18 | RestartSec=1 19 | User=root 20 | # You can add any additional flags here 21 | # This example uses port 9090 for the tunnel socket. `--reverse` is required for our use case. 22 | ExecStart=/usr/local/bin/chisel server --port={port} --reverse --auth {auth_string} 23 | # Additional .env file for auth and secrets 24 | EnvironmentFile=-/etc/sysconfig/chisel 25 | PassEnvironment=AUTH 26 | "#) 27 | }, { 28 | "path": "/etc/sysconfig/chisel", 29 | "content": format!("AUTH={auth_string}\n") 30 | }] 31 | }); 32 | 33 | "#cloud-config\n".to_string() + &cloud_config.to_string() 34 | } 35 | 36 | #[test] 37 | fn test_generate_cloud_init_config() { 38 | let password = "chisel:test"; 39 | let config = generate_cloud_init_config(password, 9090); 40 | println!("{}", config); 41 | assert!(config.contains("AUTH=chisel:test")); 42 | assert!(config.contains( 43 | "ExecStart=/usr/local/bin/chisel server --port=9090 --reverse --auth chisel:test" 44 | )); 45 | } 46 | -------------------------------------------------------------------------------- /src/cloud/digitalocean.rs: -------------------------------------------------------------------------------- 1 | use super::{cloud_init::generate_cloud_init_config, Provisioner}; 2 | use crate::ops::{ 3 | parse_provisioner_label_value, ExitNode, ExitNodeStatus, EXIT_NODE_PROVISIONER_LABEL, 4 | }; 5 | use async_trait::async_trait; 6 | use color_eyre::eyre::{anyhow, Error}; 7 | use digitalocean_rs::DigitalOceanApi; 8 | use k8s_openapi::api::core::v1::Secret; 9 | use kube::ResourceExt; 10 | use schemars::JsonSchema; 11 | use serde::{Deserialize, Serialize}; 12 | use tracing::{debug, info, warn}; 13 | 14 | const DROPLET_SIZE: &str = "s-1vcpu-1gb"; 15 | const DROPLET_IMAGE: &str = "ubuntu-24-04-x64"; 16 | 17 | const TOKEN_KEY: &str = "DIGITALOCEAN_TOKEN"; 18 | 19 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 20 | pub struct DigitalOceanProvisioner { 21 | /// Region ID of the DigitalOcean datacenter to provision the exit node in 22 | /// If empty, DigitalOcean will randomly select a region for you, which might not be what you want 23 | /// See https://slugs.do-api.dev/ 24 | #[serde(default)] 25 | pub region: String, 26 | /// Reference to a secret containing the DigitalOcean API token, under the `DIGITALOCEAN_TOKEN` secret key 27 | pub auth: String, 28 | /// SSH key fingerprints to add to the exit node 29 | #[serde(default)] 30 | pub ssh_fingerprints: Vec, 31 | /// Size for the DigitalOcean droplet 32 | /// See https://slugs.do-api.dev/ 33 | #[serde(default = "default_size")] 34 | pub size: String, 35 | } 36 | 37 | fn default_size() -> String { 38 | String::from(DROPLET_SIZE) 39 | } 40 | 41 | // each provider must support create, update, delete operations 42 | 43 | impl DigitalOceanProvisioner { 44 | // gets token from Secret 45 | pub async fn get_token(&self, secret: Secret) -> color_eyre::Result { 46 | let data = secret 47 | .data 48 | .ok_or_else(|| Error::msg("No data found in secret"))?; 49 | let token = data 50 | .get(TOKEN_KEY) 51 | .ok_or_else(|| Error::msg("No token found in secret"))?; 52 | 53 | let token = String::from_utf8(token.clone().0)?; 54 | Ok(token) 55 | } 56 | } 57 | 58 | #[async_trait] 59 | impl Provisioner for DigitalOceanProvisioner { 60 | async fn create_exit_node( 61 | &self, 62 | auth: Secret, 63 | exit_node: ExitNode, 64 | node_password: String, 65 | ) -> color_eyre::Result { 66 | let config = generate_cloud_init_config(&node_password, exit_node.spec.port); 67 | 68 | // TODO: Secret reference, not plaintext 69 | let api: DigitalOceanApi = DigitalOceanApi::new(self.get_token(auth).await?); 70 | 71 | // get exit node provisioner from label 72 | 73 | let provisioner = exit_node 74 | .metadata 75 | .annotations 76 | .as_ref() 77 | .and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL)) 78 | .ok_or_else(|| { 79 | anyhow!( 80 | "No provisioner found in annotations for exit node {}", 81 | exit_node.metadata.name.as_ref().unwrap() 82 | ) 83 | })?; 84 | 85 | let current_namespace = exit_node.namespace().unwrap(); 86 | let (_provisioner_namespace, provsioner_name) = 87 | parse_provisioner_label_value(¤t_namespace, provisioner); 88 | 89 | let name = format!( 90 | "{}-{}", 91 | provsioner_name, 92 | exit_node.metadata.name.as_ref().unwrap() 93 | ); 94 | 95 | let droplet = { 96 | let mut droplet = api 97 | .create_droplet(&name, &self.size, DROPLET_IMAGE) 98 | .user_data(&config) 99 | .ssh_keys(self.ssh_fingerprints.clone()) 100 | .tags(vec![format!("chisel-operator-provisioner:{}", provisioner)]); 101 | 102 | if !self.region.is_empty() { 103 | droplet = droplet.region(&self.region); 104 | } 105 | 106 | droplet.run_async().await? 107 | }; 108 | 109 | // now that we finally got the thing, now keep polling until it has an IP address 110 | 111 | let droplet_id = droplet.id.to_string(); 112 | 113 | let droplet_ip = loop { 114 | let droplet = api.get_droplet_async(&droplet_id).await?; 115 | 116 | debug!(?droplet, "Getting droplet data"); 117 | 118 | if let Some(droplet_public_net) = 119 | droplet.networks.v4.iter().find(|net| net.ntype == "public") 120 | { 121 | let droplet_ip = droplet_public_net.ip_address.clone(); 122 | break droplet_ip; 123 | } else { 124 | warn!("Waiting for droplet to get IP address"); 125 | tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; 126 | } 127 | }; 128 | 129 | let exit_node = ExitNodeStatus::new( 130 | provisioner.clone(), 131 | name.clone(), 132 | droplet_ip.clone(), 133 | Some(droplet_id), 134 | ); 135 | 136 | debug!(?exit_node, "Created exit node!!"); 137 | 138 | Ok(exit_node) 139 | } 140 | 141 | async fn update_exit_node( 142 | &self, 143 | auth: Secret, 144 | exit_node: ExitNode, 145 | node_password: String, 146 | ) -> color_eyre::Result { 147 | // check if droplet exists, then update it 148 | let api: DigitalOceanApi = DigitalOceanApi::new(self.get_token(auth.clone()).await?); 149 | let node = exit_node.clone(); 150 | 151 | if let Some(ref status) = &exit_node.status { 152 | let droplet_id = status.id.as_ref().ok_or_else(|| { 153 | anyhow!( 154 | "No droplet ID found in status for exit node {}", 155 | node.metadata.name.as_ref().unwrap() 156 | ) 157 | })?; 158 | 159 | let droplet = api.get_droplet_async(droplet_id).await?; 160 | 161 | let mut status = status.clone(); 162 | 163 | if let Some(ip) = droplet.networks.v4.iter().find(|net| net.ntype == "public") { 164 | status.ip.clone_from(&ip.ip_address) 165 | } 166 | 167 | Ok(status) 168 | } else { 169 | warn!("No status found for exit node, creating new droplet"); 170 | // TODO: this should be handled by the controller logic 171 | return self.create_exit_node(auth, exit_node, node_password).await; 172 | } 173 | } 174 | 175 | async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> { 176 | // do nothing if no status, or no id, or droplet doesn't exist 177 | let api: DigitalOceanApi = DigitalOceanApi::new(self.get_token(auth).await?); 178 | let droplet_id = exit_node 179 | .status 180 | .as_ref() 181 | .and_then(|status| status.id.as_ref()); 182 | 183 | if let Some(droplet_id) = droplet_id { 184 | info!("Deleting droplet with ID {}", droplet_id); 185 | api.delete_droplet_async(droplet_id).await?; 186 | } 187 | Ok(()) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /src/cloud/linode.rs: -------------------------------------------------------------------------------- 1 | use super::{cloud_init::generate_cloud_init_config, Provisioner}; 2 | use crate::ops::{ 3 | parse_provisioner_label_value, ExitNode, ExitNodeStatus, EXIT_NODE_PROVISIONER_LABEL, 4 | }; 5 | use async_trait::async_trait; 6 | use base64::Engine; 7 | use color_eyre::eyre::{anyhow, Error}; 8 | use k8s_openapi::api::core::v1::Secret; 9 | use kube::ResourceExt; 10 | use linode_rs::LinodeApi; 11 | use schemars::JsonSchema; 12 | use serde::{Deserialize, Serialize}; 13 | use tracing::{debug, info, warn}; 14 | 15 | const TOKEN_KEY: &str = "LINODE_TOKEN"; 16 | const INSTANCE_TYPE: &str = "g6-nanode-1"; 17 | const IMAGE_ID: &str = "linode/ubuntu24.04"; 18 | 19 | fn default_size() -> String { 20 | String::from(INSTANCE_TYPE) 21 | } 22 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 23 | pub struct LinodeProvisioner { 24 | /// Name of the secret containing the Linode API token, under the `LINODE_TOKEN` secret key 25 | pub auth: String, 26 | /// Region ID of the Linode datacenter to provision the exit node in 27 | /// See https://api.linode.com/v4/regions 28 | pub region: String, 29 | /// Size for the Linode instance 30 | /// See https://api.linode.com/v4/linode/ 31 | #[serde(default = "default_size")] 32 | pub size: String, 33 | } 34 | 35 | impl LinodeProvisioner { 36 | // gets token from Secret 37 | pub async fn get_token(&self, secret: &Secret) -> color_eyre::Result { 38 | let data = secret 39 | .data 40 | .clone() 41 | .ok_or_else(|| Error::msg("No data found in secret"))?; 42 | let token = data 43 | .get(TOKEN_KEY) 44 | .ok_or_else(|| Error::msg("No token found in secret"))?; 45 | 46 | let token = String::from_utf8(token.clone().0)?; 47 | Ok(token) 48 | } 49 | } 50 | 51 | #[async_trait] 52 | impl Provisioner for LinodeProvisioner { 53 | async fn create_exit_node( 54 | &self, 55 | auth: Secret, 56 | exit_node: ExitNode, 57 | chisel_auth_string: String, 58 | ) -> color_eyre::Result { 59 | let config = generate_cloud_init_config(&chisel_auth_string, exit_node.spec.port); 60 | 61 | // Okay, so apparently Linode uses base64 for user_data, so let's 62 | // base64 encode the config 63 | 64 | let user_data = base64::engine::general_purpose::STANDARD.encode(config); 65 | 66 | let api = LinodeApi::new(self.get_token(&auth).await?); 67 | 68 | let provisioner = exit_node 69 | .metadata 70 | .annotations 71 | .as_ref() 72 | .and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL)) 73 | .unwrap(); 74 | 75 | let current_namespace = exit_node.namespace().unwrap(); 76 | let (_provisioner_namespace, provsioner_name) = 77 | parse_provisioner_label_value(¤t_namespace, provisioner); 78 | 79 | let name: String = format!( 80 | "{}-{}", 81 | provsioner_name, 82 | exit_node.metadata.name.as_ref().unwrap() 83 | ); 84 | 85 | // Since we now directly pass in the chisel auth string with the `chisel:` prefix, let's remove the prefix 86 | let root_password = chisel_auth_string 87 | .strip_prefix("chisel:") 88 | .unwrap_or(&chisel_auth_string); 89 | 90 | let mut instance = api 91 | .create_instance(&self.region, &self.size) 92 | .root_pass(root_password) 93 | .label(&name) 94 | .user_data(&user_data) 95 | .tags(vec![format!("chisel-operator-provisioner:{}", provisioner)]) 96 | .image(IMAGE_ID) 97 | .booted(true) 98 | .run_async() 99 | .await?; 100 | 101 | info!("Created instance: {:?}", instance); 102 | 103 | let instance_ip = loop { 104 | instance = api.get_instance_async(instance.id).await?; 105 | 106 | debug!("Instance status: {:?}", instance.status); 107 | 108 | if !instance.ipv4.is_empty() { 109 | break instance.ipv4[0].clone(); 110 | } else { 111 | warn!("Waiting for instance to get IP address"); 112 | tokio::time::sleep(tokio::time::Duration::from_secs(5)).await; 113 | } 114 | }; 115 | 116 | // let status = ExitNodeStatus { 117 | // ip: instance_ip, 118 | // name: instance.label, 119 | // provider: provisioner.to_string(), 120 | // id: Some(instance.id.to_string()), 121 | // service_binding: vec![], 122 | // }; 123 | 124 | let status = ExitNodeStatus::new( 125 | instance_ip, 126 | instance.label, 127 | provisioner.to_string(), 128 | Some(instance.id.to_string()), 129 | ); 130 | 131 | Ok(status) 132 | } 133 | 134 | async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()> { 135 | let api = LinodeApi::new(self.get_token(&auth).await?); 136 | 137 | let instance_id = exit_node 138 | .status 139 | .as_ref() 140 | .and_then(|status| status.id.as_ref()) 141 | .and_then(|id| id.parse::().ok()); 142 | 143 | // okay, so Linode IDs will be u64, so let's parse it 144 | 145 | if let Some(instance_id) = instance_id { 146 | info!("Deleting Linode instance with ID {}", instance_id); 147 | api.delete_instance_async(instance_id).await?; 148 | } 149 | 150 | Ok(()) 151 | } 152 | 153 | async fn update_exit_node( 154 | &self, 155 | auth: Secret, 156 | exit_node: ExitNode, 157 | node_password: String, 158 | ) -> color_eyre::Result { 159 | let api = LinodeApi::new(self.get_token(&auth).await?); 160 | 161 | if let Some(status) = exit_node.status { 162 | let instance_id = status 163 | .id 164 | .as_ref() 165 | .and_then(|id| id.parse::().ok()) 166 | .ok_or_else(|| { 167 | anyhow!( 168 | "No instance ID found in status for exit node {}", 169 | exit_node.metadata.name.as_ref().unwrap() 170 | ) 171 | })?; 172 | 173 | let instance = api.get_instance_async(instance_id).await; 174 | 175 | let mut status = status.clone(); 176 | 177 | if let Some(ip) = instance?.ipv4.first() { 178 | status.ip.clone_from(ip); 179 | } 180 | 181 | Ok(status) 182 | } else { 183 | warn!("No instance status found, creating new instance"); 184 | return self 185 | .create_exit_node(auth.clone(), exit_node, node_password) 186 | .await; 187 | } 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /src/cloud/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::ops::ExitNode; 2 | use crate::ops::ExitNodeStatus; 3 | use async_trait::async_trait; 4 | use k8s_openapi::api::core::v1::Secret; 5 | use schemars::JsonSchema; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | pub mod aws; 9 | mod cloud_init; 10 | pub mod digitalocean; 11 | pub mod linode; 12 | pub mod pwgen; 13 | 14 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 15 | pub enum CloudProvider { 16 | DigitalOcean, 17 | Linode, 18 | AWS, 19 | } 20 | 21 | // This code was actually used, weirdly enough 22 | pub const CHISEL_PORT: u16 = 9090; 23 | 24 | #[async_trait] 25 | pub trait Provisioner { 26 | async fn create_exit_node( 27 | &self, 28 | auth: Secret, 29 | exit_node: ExitNode, 30 | node_password: String, 31 | ) -> color_eyre::Result; 32 | async fn update_exit_node( 33 | &self, 34 | auth: Secret, 35 | exit_node: ExitNode, 36 | node_password: String, 37 | ) -> color_eyre::Result; 38 | async fn delete_exit_node(&self, auth: Secret, exit_node: ExitNode) -> color_eyre::Result<()>; 39 | } 40 | 41 | // Each LB service binds to an exit node, which will be a many-to-one relationship 42 | // An LB can annotate a specific exit node to bind to, or it can specify a provider to automatically provision a new exit node 43 | // if no specific exit node is specified and a provider is not specified, then the first exit node returned by the K8S API will be used 44 | // but if provider is specified, then a new exit node will be provisioned on that provider 45 | // A provisioner can have many exit nodes that it manages 46 | // each exit node can be manually managed or automatically managed by a provisioner 47 | // you can request a new exit node from a provisioner by simply creating a LB service without specifying a specific exit node 48 | // or you can create a new managed exit node 49 | 50 | // Take LB1 which has annotation chisel-operator.io/cloud-provisioner: do 51 | // Take LB2 which has annotation chisel-operator.io/cloud-provisioner: do ON A DIFFERENT PORT 52 | // what if I want to use the same exit node for both LB1 and LB2? 53 | // maybe we can introduce a new annotation chisel-operator.io/cloud-exit-node: 54 | // if two LBs have the same cloud-exit-node annotation, then they will use the same exit node, WHEN THE PROVISIONER IS THE SAME 55 | -------------------------------------------------------------------------------- /src/cloud/pwgen.rs: -------------------------------------------------------------------------------- 1 | use rand::Rng; 2 | 3 | pub const DEFAULT_USERNAME: &str = "chisel"; 4 | 5 | const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ 6 | abcdefghijklmnopqrstuvwxyz\ 7 | 0123456789-_/()*&#@"; 8 | /// Generates a random password of the specified length. 9 | /// 10 | /// # Arguments 11 | /// 12 | /// * `length` - The length of the password to generate. 13 | /// 14 | /// # Returns 15 | /// 16 | /// A randomly generated password as a `String`. 17 | pub fn generate_password(length: usize) -> String { 18 | let mut rng = rand::thread_rng(); 19 | 20 | let password: String = (0..length) 21 | .map(|_| { 22 | let idx = rng.gen_range(0..CHARSET.len()); 23 | CHARSET[idx] as char 24 | }) 25 | .collect(); 26 | 27 | password 28 | } 29 | -------------------------------------------------------------------------------- /src/daemon.rs: -------------------------------------------------------------------------------- 1 | // Daemon module 2 | // watch for changes in all LoadBalancer services and update the IP addresses 3 | 4 | /* 5 | notes: 6 | so the way this works is that the user deploys a ExitNodeProvisioner resource 7 | and then set an annotation on the service to use that provisioner 8 | the chisel operator will then watch for that annotation and then create a new exit node 9 | for that service 10 | the exit node will then be annotated with the name of the service 11 | if the service is deleted, the exit node will also be deleted, and the actual cloud resource will also be deleted 12 | 13 | honestly this whole logic is kinda confusing but I don't know how to make it less clunky 14 | 15 | 16 | There can also be a case where the user creates an exit node manually, 17 | with the provisioner annotation set, in that case chisel operator will 18 | create a cloud resource for that exit node and manages it. 19 | 20 | todo: properly handle all this logic 21 | 22 | todo: use `tracing` and put every operation in a span to make debugging easier 23 | */ 24 | 25 | use color_eyre::Result; 26 | use futures::{FutureExt, StreamExt}; 27 | use k8s_openapi::api::{ 28 | apps::v1::Deployment, 29 | core::v1::{LoadBalancerIngress, LoadBalancerStatus, Secret, Service, ServiceStatus}, 30 | }; 31 | use kube::{ 32 | api::{Api, ListParams, Patch, PatchParams, ResourceExt}, 33 | core::ObjectMeta, 34 | error::ErrorResponse, 35 | runtime::{ 36 | controller::Action, 37 | finalizer::{self, Event}, 38 | reflector::ObjectRef, 39 | watcher::{self, Config}, 40 | Controller, 41 | }, 42 | Client, Resource, 43 | }; 44 | use std::{collections::BTreeMap, sync::Arc}; 45 | 46 | use std::time::Duration; 47 | use tracing::{debug, error, info, instrument, trace, warn}; 48 | 49 | use crate::{ 50 | cloud::{pwgen::generate_password, Provisioner}, 51 | ops::{ 52 | parse_provisioner_label_value, ExitNode, ExitNodeProvisioner, ExitNodeSpec, ExitNodeStatus, 53 | EXIT_NODE_NAME_LABEL, EXIT_NODE_PROVISIONER_LABEL, 54 | }, 55 | }; 56 | use crate::{deployment::create_owned_deployment, error::ReconcileError}; 57 | 58 | pub const EXIT_NODE_FINALIZER: &str = "exitnode.chisel-operator.io/finalizer"; 59 | pub const SVCS_FINALIZER: &str = "service.chisel-operator.io/finalizer"; 60 | 61 | // pub fn get_trace_id() -> opentelemetry::trace::TraceId { 62 | // // opentelemetry::Context -> opentelemetry::trace::Span 63 | // use opentelemetry::trace::TraceContextExt as _; 64 | // // tracing::Span -> opentelemetry::Context 65 | // use tracing_opentelemetry::OpenTelemetrySpanExt as _; 66 | 67 | // tracing::Span::current() 68 | // .context() 69 | // .span() 70 | // .span_context() 71 | // .trace_id() 72 | // } 73 | 74 | // this is actually used to pass clients around 75 | pub struct Context { 76 | pub client: Client, 77 | // Let's implement a lock here to prevent multiple reconciles assigning the same exit node 78 | // to multiple services implicitly (#143) 79 | pub exit_node_lock: Arc>>, 80 | } 81 | 82 | /// Parses the `query` string to extract the namespace and name. 83 | /// If the `query` contains a '/', it splits the `query` into two parts: 84 | /// the namespace and the name. Otherwise, it uses the `og_namespace` 85 | /// as the namespace and the entire `query` as the name. 86 | /// 87 | /// # Arguments 88 | /// 89 | /// * `query` - A string slice that holds the query to be parsed. 90 | /// * `og_namespace` - A string slice that holds the original namespace. 91 | /// 92 | /// # Returns 93 | /// 94 | /// A tuple containing the namespace and name as string slices. 95 | #[instrument(skip(ctx))] 96 | async fn find_exit_node_from_label( 97 | ctx: Arc, 98 | query: &str, 99 | og_namespace: &str, 100 | ) -> Option { 101 | // parse the query to get the namespace and name 102 | let (namespace, name) = if let Some((ns, nm)) = query.split_once('/') { 103 | (ns, nm) 104 | } else { 105 | // if the query does not contain a '/', use the original namespace 106 | (og_namespace, query) 107 | }; 108 | 109 | let nodes: Api = Api::namespaced(ctx.client.clone(), namespace); 110 | let node_list = nodes.list(&ListParams::default().timeout(30)).await.ok()?; 111 | node_list.items.into_iter().find(|node| { 112 | node.metadata 113 | .name 114 | .as_ref() 115 | .map(|n| n == name) 116 | .unwrap_or(false) 117 | }) 118 | } 119 | 120 | #[instrument(skip(ctx))] 121 | async fn find_exit_node_provisioner_from_label( 122 | ctx: Arc, 123 | default_namespace: &str, 124 | query: &str, 125 | ) -> Option { 126 | let span = tracing::debug_span!("find_exit_node_provisioner_from_label", ?query); 127 | let _enter = span.enter(); 128 | 129 | let (namespace, name) = parse_provisioner_label_value(default_namespace, query); 130 | 131 | let nodes: Api = Api::namespaced(ctx.client.clone(), namespace); 132 | let node_list = nodes.list(&ListParams::default().timeout(30)).await.ok()?; 133 | info!(node_list = ?node_list, "node list"); 134 | let result = node_list.items.into_iter().find(|node| { 135 | node.metadata 136 | .name 137 | .as_ref() 138 | .map(|n| n == name) 139 | .unwrap_or(false) 140 | }); 141 | debug!(name = ?name, ?result, "Query result"); 142 | 143 | result 144 | } 145 | /// Check whether the exit node was managed by a provisioner 146 | #[instrument] 147 | async fn check_exit_node_managed(node: &ExitNode) -> bool { 148 | // returns false if there's no annotation, true if annotation exists, simple logic 149 | node.metadata 150 | .annotations 151 | .as_ref() 152 | .map(|annotations| annotations.contains_key(EXIT_NODE_PROVISIONER_LABEL)) 153 | .unwrap_or(false) 154 | } 155 | #[instrument] 156 | async fn check_service_managed(service: &Service) -> bool { 157 | // returns false if there's no annotation, true if annotation exists, simple logic 158 | service 159 | .metadata 160 | .annotations 161 | .as_ref() 162 | .map(|annotations| annotations.contains_key(EXIT_NODE_PROVISIONER_LABEL)) 163 | .unwrap_or(false) 164 | } 165 | 166 | // Let's not use magic values, so we can change this later or if someone wants to fork this for something else 167 | 168 | const OPERATOR_CLASS: &str = "chisel-operator.io/chisel-operator-class"; 169 | const OPERATOR_MANAGER: &str = "chisel-operator"; 170 | 171 | const BACKOFF_TIME_SECS: u64 = 5; 172 | 173 | async fn find_free_exit_nodes(ctx: Arc) -> Result, ReconcileError> { 174 | let svc_api: Api = Api::all(ctx.client.clone()); 175 | let exit_node_api: Api = Api::all(ctx.client.clone()); 176 | 177 | let svc_list = svc_api.list(&ListParams::default().timeout(30)).await?; 178 | let exit_node_list = exit_node_api 179 | .list(&ListParams::default().timeout(30)) 180 | .await?; 181 | 182 | let svc_list_filtered = svc_list 183 | .items 184 | .into_iter() 185 | .flat_map(|svc| { 186 | svc.status 187 | .and_then(|status| status.load_balancer) 188 | .and_then(|lb| lb.ingress) 189 | .and_then(|ingress| ingress.first().cloned()) 190 | .and_then(|ingress| ingress.ip) 191 | // .map(|ip| ip) 192 | }) 193 | .collect::>(); 194 | 195 | let exit_node_list_filtered = exit_node_list.items.into_iter().filter(|node| { 196 | let host = node.get_host(); 197 | !svc_list_filtered.contains(&host) 198 | }); 199 | 200 | Ok(exit_node_list_filtered.collect()) 201 | } 202 | 203 | #[instrument(skip(ctx))] 204 | async fn select_exit_node_local( 205 | ctx: &Arc, 206 | service: &Service, 207 | ) -> Result { 208 | // Lock to prevent race conditions when assigning exit nodes to services 209 | let mut lock = match ctx.exit_node_lock.try_lock() { 210 | Ok(lock) => lock, 211 | Err(_) => { 212 | warn!("Exit node lock is already held, requeuing"); 213 | return Err(ReconcileError::NoAvailableExitNodes); 214 | } 215 | }; 216 | 217 | let already_bound_exit_node = 218 | crate::util::get_svc_bound_exit_node(ctx.clone(), service).await?; 219 | 220 | if let Some(node) = already_bound_exit_node { 221 | info!("Service already bound to an exit node, using that now"); 222 | *lock = Some((std::time::Instant::now(), node.get_host())); 223 | return Ok(node); 224 | } 225 | 226 | // if service has label with exit node name, use that and error if not found 227 | let exit_node_selection = { 228 | if let Some(exit_node_name) = service 229 | .metadata 230 | .labels 231 | .as_ref() 232 | .and_then(|labels| labels.get(EXIT_NODE_NAME_LABEL)) 233 | { 234 | info!( 235 | ?exit_node_name, 236 | "Service explicitly set to use a named exit node, using that" 237 | ); 238 | find_exit_node_from_label( 239 | ctx.clone(), 240 | exit_node_name, 241 | &service.namespace().expect("Service namespace not found"), 242 | ) 243 | .await 244 | .ok_or(ReconcileError::NoAvailableExitNodes) 245 | } else { 246 | // otherwise, use the first available exit node 247 | // (one to one mapping) 248 | // let nodes: Api = Api::all(ctx.client.clone()); 249 | // let node_list: kube::core::ObjectList = 250 | // nodes.list(&ListParams::default().timeout(30)).await?; 251 | let node_list = find_free_exit_nodes(ctx.clone()).await?; 252 | debug!(?node_list, "Exit node list"); 253 | node_list 254 | .into_iter() 255 | .filter(|node| { 256 | let is_cloud_provisioned = node 257 | .metadata 258 | .annotations 259 | .as_ref() 260 | .map(|annotations: &BTreeMap| { 261 | annotations.contains_key(EXIT_NODE_PROVISIONER_LABEL) 262 | }) 263 | .unwrap_or(false); 264 | 265 | // Is the ExitNode not cloud provisioned or is the status set? 266 | !is_cloud_provisioned || node.status.is_some() 267 | }) 268 | .filter(|node| { 269 | // debug!(?node, "Checking exit node"); 270 | let host = node.get_host(); 271 | if let Some((instant, ip_filter)) = lock.as_ref() { 272 | // Skip this exit node if it was recently assigned and the backoff period hasn't elapsed 273 | if instant.elapsed().as_secs() < BACKOFF_TIME_SECS { 274 | host != *ip_filter 275 | } else { 276 | true 277 | } 278 | } else { 279 | // No lock present, this exit node is available 280 | true 281 | } 282 | }) 283 | .collect::>() 284 | .first() 285 | .ok_or(ReconcileError::NoAvailableExitNodes) 286 | .cloned() 287 | } 288 | }; 289 | // .inspect(|node| { 290 | // let exit_node_ip = node.get_host(); 291 | // debug!(?exit_node_ip, "Selected exit node"); 292 | // drop(lock); 293 | // }) 294 | 295 | // Add the selected exit node to the lock, with the current time and hostname 296 | // This will prevent other services within the backoff period from selecting the same exit node 297 | // Fixes #143 by filtering out exit nodes that were recently assigned 298 | // when applying multiple objects in parallel 299 | exit_node_selection.inspect(|node| { 300 | let exit_node_ip = node.get_host(); 301 | debug!(?exit_node_ip, "Selected exit node"); 302 | *lock = Some((std::time::Instant::now(), node.get_host())); 303 | }) 304 | } 305 | 306 | #[instrument(skip(ctx))] 307 | /// Generates or returns an ExitNode resource for a Service resource, either finding an existing one or creating a new one 308 | async fn exit_node_for_service( 309 | ctx: Arc, 310 | service: &Service, 311 | ) -> Result { 312 | let nodes: Api = Api::namespaced(ctx.client.clone(), &service.namespace().unwrap()); 313 | 314 | // check if annotation was set 315 | let provisioner_name = service 316 | .metadata 317 | .annotations 318 | .as_ref() 319 | .and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL)) 320 | .ok_or_else(|| ReconcileError::CloudProvisionerNotFound)?; 321 | 322 | let exit_node_name = service 323 | .metadata 324 | .annotations 325 | .as_ref() 326 | .and_then(|annotations| annotations.get(EXIT_NODE_NAME_LABEL)) 327 | .unwrap_or({ 328 | let service_name = service.metadata.name.as_ref().unwrap(); 329 | &format!("service-{}", service_name) 330 | }) 331 | .to_owned(); 332 | 333 | let oref = service.controller_owner_ref(&()).ok_or_else(|| { 334 | ReconcileError::KubeError(kube::Error::Api(ErrorResponse { 335 | code: 500, 336 | message: "Service is missing owner reference".to_string(), 337 | reason: "MissingOwnerReference".to_string(), 338 | status: "Failure".to_string(), 339 | })) 340 | })?; 341 | 342 | // try to find exit node from name within the service's namespace, and return if found 343 | if let Ok(exit_node) = nodes.get(&exit_node_name).await { 344 | return Ok(exit_node); 345 | } 346 | 347 | let mut exit_node_tmpl = ExitNode { 348 | metadata: ObjectMeta { 349 | name: Some(exit_node_name.clone()), 350 | namespace: service.namespace(), 351 | annotations: Some({ 352 | let mut map = BTreeMap::new(); 353 | map.insert( 354 | EXIT_NODE_PROVISIONER_LABEL.to_string(), 355 | format!("{}/{}", service.namespace().unwrap(), provisioner_name), // Fixes #38 356 | ); 357 | map 358 | }), 359 | owner_references: Some(vec![oref]), 360 | ..Default::default() 361 | }, 362 | spec: ExitNodeSpec { 363 | host: "".to_string(), 364 | port: crate::cloud::CHISEL_PORT, 365 | auth: None, 366 | external_host: None, 367 | default_route: true, 368 | fingerprint: None, 369 | chisel_image: None, 370 | }, 371 | status: None, 372 | }; 373 | 374 | let password = generate_password(32); 375 | let secret = exit_node_tmpl.generate_secret(password.clone()).await?; 376 | 377 | exit_node_tmpl.spec.auth = Some(secret.metadata.name.unwrap()); 378 | 379 | let serverside = PatchParams::apply(OPERATOR_MANAGER).validation_strict(); 380 | 381 | let exit_node = nodes 382 | .patch( 383 | &exit_node_tmpl.name_any(), 384 | &serverside, 385 | &Patch::Apply(exit_node_tmpl.clone()), 386 | ) 387 | .await?; 388 | 389 | Ok(exit_node) 390 | } 391 | // #[instrument(skip(ctx), fields(trace_id))] 392 | /// Reconcile cluster state 393 | #[instrument(skip(ctx, obj))] 394 | async fn reconcile_svcs(obj: Arc, ctx: Arc) -> Result { 395 | // Return if service is not LoadBalancer or if the loadBalancerClass is not blank or set to $OPERATOR_CLASS 396 | 397 | // todo: is there anything different need to be done for OpenShift? We use vanilla k8s and k3s/rke2 so we don't know 398 | if obj 399 | .spec 400 | .as_ref() 401 | .filter(|spec| spec.type_ == Some("LoadBalancer".to_string())) 402 | .is_none() 403 | || obj 404 | .spec 405 | .as_ref() 406 | .filter(|spec| { 407 | spec.load_balancer_class.is_none() 408 | || spec.load_balancer_class == Some(OPERATOR_CLASS.to_string()) 409 | }) 410 | .is_none() 411 | { 412 | return Ok(Action::await_change()); 413 | } 414 | 415 | info!("reconcile request: {}", obj.name_any()); 416 | 417 | // We can unwrap safely since Service is namespaced scoped 418 | let services: Api = Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap()); 419 | let nodes: Api = Api::all(ctx.client.clone()); 420 | 421 | let mut svc = services.get_status(&obj.name_any()).await?; 422 | 423 | let obj = svc.clone(); 424 | 425 | let node_list = nodes.list(&ListParams::default().timeout(30)).await?; 426 | 427 | // Find service binding of svc name/namespace? 428 | let named_exit_node = node_list.iter().find(|node| { 429 | node.metadata 430 | .annotations 431 | .as_ref() 432 | .map(|annotations| annotations.contains_key(EXIT_NODE_NAME_LABEL)) 433 | .unwrap_or(false) 434 | }); 435 | 436 | // XXX: Exit node manifest generation starts here 437 | let node = { 438 | if let Some(node) = named_exit_node { 439 | info!("Service explicitly set to use a named exit node, using that now"); 440 | node.clone() 441 | } else if check_service_managed(&obj).await { 442 | info!("Service is managed by a cloud provider, Resolving exit node..."); 443 | // Remove attached exit node if the service was managed by a cloud provider and when it is removed 444 | let mut exit_node = exit_node_for_service(ctx.clone(), &obj).await?; 445 | 446 | while exit_node.status.is_none() { 447 | warn!("Waiting for exit node to be provisioned"); 448 | tokio::time::sleep(Duration::from_secs(5)).await; 449 | exit_node = exit_node_for_service(ctx.clone(), &obj).await?; 450 | } 451 | 452 | exit_node 453 | } else { 454 | info!("Selecting an exit node for the service"); 455 | select_exit_node_local(&ctx, &obj).await? 456 | } 457 | }; 458 | 459 | let exit_node_ip = node.get_host(); 460 | 461 | // check if status is the same as the one we're about to patch 462 | 463 | let obj_ip = obj.clone().status; 464 | 465 | debug!(?exit_node_ip, ?obj_ip, "Exit node IP"); 466 | 467 | let serverside = PatchParams::apply(OPERATOR_MANAGER).validation_strict(); 468 | 469 | svc.status = Some(ServiceStatus { 470 | load_balancer: Some(LoadBalancerStatus { 471 | ingress: Some(vec![LoadBalancerIngress { 472 | ip: Some(exit_node_ip.clone()), 473 | // hostname: Some(node.get_external_host()), 474 | ..Default::default() 475 | }]), 476 | }), 477 | ..Default::default() 478 | }); 479 | 480 | // Update the status for the LoadBalancer service 481 | // The ExitNode IP will always be set, so it is safe to unwrap the host 482 | 483 | debug!(status = ? svc.status, "Service status"); 484 | 485 | // debug!("Patching status for {}", obj.name_any()); 486 | 487 | let _svcs = services 488 | .patch_status( 489 | // We can unwrap safely since Service is guaranteed to have a name 490 | obj.name_any().as_str(), 491 | &serverside.clone(), 492 | &Patch::Merge(&svc), 493 | ) 494 | .await?; 495 | 496 | info!(status = ?obj, "Patched status for service {}", obj.name_any()); 497 | 498 | // We can unwrap safely since ExitNode is namespaced scoped 499 | let deployments: Api = 500 | Api::namespaced(ctx.client.clone(), &node.namespace().unwrap()); 501 | 502 | // TODO: We should refactor this such that each deployment of Chisel corresponds to an exit node 503 | // Currently each deployment of Chisel corresponds to a service, which means duplicate deployments of Chisel 504 | // This also caused some issues, where we (intuitively) made the owner ref of the deployment the service 505 | // which breaks since a service can be in a seperate namespace from the deployment (k8s disallows this) 506 | let deployment_data = create_owned_deployment(&obj, &node).await?; 507 | let _deployment = deployments 508 | .patch( 509 | &deployment_data.name_any(), 510 | &serverside, 511 | &Patch::Apply(deployment_data.clone()), 512 | ) 513 | .await?; 514 | 515 | tracing::trace!(?_deployment); 516 | 517 | finalizer::finalizer( 518 | &services, 519 | SVCS_FINALIZER, 520 | obj.clone().into(), 521 | |event| async move { 522 | let m: std::prelude::v1::Result = match event { 523 | Event::Apply(_svc) => { 524 | info!(status = ?node, "Patched status for ExitNode {}", node.name_any()); 525 | Ok(Action::requeue(Duration::from_secs(3600))) 526 | } 527 | Event::Cleanup(svc) => { 528 | info!("Cleanup finalizer triggered for {}", svc.name_any()); 529 | 530 | // Clean up deployment when service is deleted 531 | let deployments: Api = 532 | Api::namespaced(ctx.client.clone(), &node.namespace().unwrap()); 533 | 534 | info!("Deleting deployment for {}", svc.name_any()); 535 | 536 | let _deployment = deployments 537 | .delete(deployment_data.name_any().as_str(), &Default::default()) 538 | .await?; 539 | Ok(Action::requeue(Duration::from_secs(3600))) 540 | } 541 | }; 542 | m 543 | }, 544 | ) 545 | .await 546 | .map_err(|e| { 547 | crate::error::ReconcileError::KubeError(kube::Error::Api(kube::error::ErrorResponse { 548 | code: 500, 549 | message: format!("Error applying finalizer for {}", obj.name_any()), 550 | reason: e.to_string(), 551 | status: "Failure".to_string(), 552 | })) 553 | }) 554 | } 555 | 556 | #[instrument(skip(_object, err, _ctx))] 557 | fn error_policy(_object: Arc, err: &ReconcileError, _ctx: Arc) -> Action { 558 | error!(err = ?err); 559 | Action::requeue(Duration::from_secs(5)) 560 | } 561 | 562 | #[instrument(skip(_object, err, _ctx))] 563 | fn error_policy_exit_node( 564 | _object: Arc, 565 | err: &ReconcileError, 566 | _ctx: Arc, 567 | ) -> Action { 568 | error!(err = ?err); 569 | Action::requeue(Duration::from_secs(5)) 570 | } 571 | const UNMANAGED_PROVISIONER: &str = "unmanaged"; 572 | 573 | #[instrument(skip(ctx, obj))] 574 | async fn reconcile_nodes(obj: Arc, ctx: Arc) -> Result { 575 | info!("exit node reconcile request: {}", obj.name_any()); 576 | let is_managed = check_exit_node_managed(&obj).await; 577 | debug!(?is_managed, "exit node is managed by cloud provisioner?"); 578 | let exit_nodes: Api = Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap()); 579 | 580 | // finalizer for exit node 581 | let serverside = PatchParams::apply(OPERATOR_MANAGER).validation_strict(); 582 | 583 | if !is_managed && obj.status.is_none() { 584 | // add status to exit node if it's not managed 585 | // This is the case for self-hosted exit nodes (Manually ) 586 | 587 | let nodes: Api = Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap()); 588 | 589 | let mut exitnode_patchtmpl = nodes.get(&obj.name_any()).await?; 590 | 591 | // now we set the status, but the provisioner is unmanaged 592 | // so we just copy the IP from the exit node config to the status 593 | 594 | let exit_node_ip = obj.get_host(); 595 | 596 | exitnode_patchtmpl.status = Some(ExitNodeStatus { 597 | provider: UNMANAGED_PROVISIONER.to_string(), 598 | name: obj.name_any(), 599 | ip: exit_node_ip, 600 | id: None, 601 | }); 602 | 603 | let serverside = PatchParams::apply(OPERATOR_MANAGER).validation_strict(); 604 | 605 | let _node = nodes 606 | .patch_status( 607 | // We can unwrap safely since Service is guaranteed to have a name 608 | &obj.name_any(), 609 | &serverside.clone(), 610 | &Patch::Merge(exitnode_patchtmpl), 611 | ) 612 | .await?; 613 | 614 | return Ok(Action::await_change()); 615 | } else if is_managed { 616 | let provisioner = obj 617 | .metadata 618 | .annotations 619 | .as_ref() 620 | .and_then(|annotations| annotations.get(EXIT_NODE_PROVISIONER_LABEL)) 621 | .unwrap(); 622 | 623 | // We should assume that every managed exit node comes with an `auth` key, which is a reference to a Secret 624 | // that contains the password for the exit node. 625 | // If it doesn't exist, then it's probably bugged, and we should return and error 626 | let node_password = { 627 | let Some(ref node_password_secret_name) = obj.clone().spec.auth else { 628 | return Err(ReconcileError::ManagedExitNodeNoPasswordSet); 629 | }; 630 | let secrets_api = Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap()); 631 | let secret: Secret = secrets_api.get(node_password_secret_name).await?; 632 | let Some(node_password) = secret.data.as_ref().unwrap().get("auth") else { 633 | return Err(ReconcileError::AuthFieldNotSet); 634 | }; 635 | String::from_utf8_lossy(&node_password.0).to_string() 636 | }; 637 | 638 | trace!(?provisioner, "Provisioner"); 639 | if let Some(status) = &obj.status { 640 | // Check for mismatch between annotation's provisioner and status' provisioner 641 | if &status.provider != provisioner { 642 | // Destroy cloud resource 643 | warn!("Cloud provisioner mismatch, destroying cloud resource found in status"); 644 | 645 | let old_provider = status.provider.clone(); 646 | 647 | let old_provisioner = find_exit_node_provisioner_from_label( 648 | ctx.clone(), 649 | &obj.namespace().unwrap(), 650 | &old_provider, 651 | ) 652 | .await 653 | .ok_or(ReconcileError::CloudProvisionerNotFound)?; 654 | 655 | let old_provisioner_api: Box = 656 | old_provisioner.clone().spec.get_inner(); 657 | 658 | let secret = old_provisioner 659 | .find_secret() 660 | .await 661 | .map_err(|_| crate::error::ReconcileError::CloudProvisionerSecretNotFound)? 662 | .ok_or(ReconcileError::CloudProvisionerSecretNotFound)?; 663 | 664 | old_provisioner_api 665 | .delete_exit_node(secret, (*obj).clone()) 666 | .await?; 667 | 668 | // Now blank out the status 669 | 670 | let nodes: Api = 671 | Api::namespaced(ctx.client.clone(), &obj.namespace().unwrap()); 672 | 673 | let exitnode_patch = serde_json::json!({ 674 | "status": None:: 675 | }); 676 | 677 | info!("Clearing status for exit node {}", obj.name_any()); 678 | 679 | let _node = nodes 680 | .patch_status( 681 | // We can unwrap safely since Service is guaranteed to have a name 682 | &obj.name_any(), 683 | &serverside.clone(), 684 | &Patch::Merge(exitnode_patch), 685 | ) 686 | .await?; 687 | } 688 | } 689 | 690 | let provisioner = find_exit_node_provisioner_from_label( 691 | ctx.clone(), 692 | &obj.namespace().unwrap(), 693 | provisioner, 694 | ) 695 | .await 696 | .ok_or(ReconcileError::CloudProvisionerNotFound)?; 697 | 698 | let provisioner_api = provisioner.clone().spec.get_inner(); 699 | 700 | // API key secret, do not use for node password 701 | let api_key_secret = provisioner 702 | .find_secret() 703 | .await 704 | .map_err(|_| crate::error::ReconcileError::CloudProvisionerSecretNotFound)? 705 | .ok_or(ReconcileError::CloudProvisionerSecretNotFound)?; 706 | 707 | finalizer::finalizer( 708 | &exit_nodes.clone(), 709 | EXIT_NODE_FINALIZER, 710 | obj.clone(), 711 | |event| async move { 712 | let m: Result<_, crate::error::ReconcileError> = match event { 713 | Event::Apply(node) => { 714 | let _ = { 715 | // XXX: We should get the value of the Secret and pass it in as node_password 716 | let cloud_resource = if let Some(_status) = node.status.as_ref() { 717 | info!("Updating cloud resource for {}", node.name_any()); 718 | provisioner_api 719 | .update_exit_node( 720 | api_key_secret.clone(), 721 | (*node).clone(), 722 | node_password, 723 | ) 724 | .await? 725 | } else { 726 | info!("Creating cloud resource for {}", node.name_any()); 727 | provisioner_api 728 | .create_exit_node( 729 | api_key_secret.clone(), 730 | (*node).clone(), 731 | node_password, 732 | ) 733 | .await? 734 | }; 735 | 736 | // unwrap should be safe here since in k8s it is infallible for a Secret to not have a name 737 | // TODO: Don't replace the entire status and object, sadly JSON is better here 738 | let exitnode_patch = serde_json::json!({ 739 | "status": cloud_resource, 740 | }); 741 | 742 | exit_nodes 743 | .patch_status( 744 | // We can unwrap safely since Service is guaranteed to have a name 745 | &node.name_any(), 746 | &serverside.clone(), 747 | &Patch::Merge(exitnode_patch), 748 | ) 749 | .await? 750 | }; 751 | 752 | Ok(Action::requeue(Duration::from_secs(3600))) 753 | } 754 | Event::Cleanup(node) => { 755 | info!("Cleanup finalizer triggered for {}", node.name_any()); 756 | 757 | if is_managed { 758 | info!("Deleting cloud resource for {}", node.name_any()); 759 | provisioner_api 760 | .delete_exit_node(api_key_secret, (*node).clone()) 761 | .await 762 | .unwrap_or_else(|e| { 763 | error!(?e, "Error deleting exit node {}", node.name_any()) 764 | }); 765 | } 766 | Ok(Action::requeue(Duration::from_secs(3600))) 767 | } 768 | }; 769 | m 770 | }, 771 | ) 772 | .await 773 | .map_err(|e| { 774 | crate::error::ReconcileError::KubeError(kube::Error::Api(kube::error::ErrorResponse { 775 | code: 500, 776 | message: format!("Error applying finalizer for {}", obj.name_any()), 777 | reason: e.to_string(), 778 | status: "Failure".to_string(), 779 | })) 780 | }) 781 | } else { 782 | Ok(Action::requeue(Duration::from_secs(3600))) 783 | } 784 | } 785 | 786 | /// watches for Kubernetes service resources and runs a controller to reconcile them. 787 | #[instrument] 788 | pub async fn run() -> color_eyre::Result<()> { 789 | let client = Client::try_default().await?; 790 | // watch for K8s service resources (default) 791 | let services: Api = Api::all(client.clone()); 792 | 793 | let exit_nodes: Api = Api::all(client.clone()); 794 | 795 | let mut reconcilers = vec![]; 796 | 797 | let lock = Arc::new(tokio::sync::Mutex::new(None)); 798 | 799 | info!("Starting reconcilers..."); 800 | 801 | // TODO: figure out how to do this in a single controller because there is a potential race where the exit node reconciler runs at the same time as the service one 802 | // This is an issue because both of these functions patch the status of the exit node 803 | // or if we can figure out a way to atomically patch the status of the exit node, that could be fine too, since both ops are just updates anyways lmfao 804 | // NOTE: Maybe we could use a lock to prevent this. This will be implemented only for local exit nodes for now. 805 | 806 | reconcilers.push( 807 | Controller::new(services, Config::default()) 808 | .watches( 809 | Api::::all(client.clone()), 810 | watcher::Config::default(), 811 | |node: ExitNode| { 812 | node.metadata 813 | .annotations 814 | .as_ref() 815 | .unwrap_or(&BTreeMap::new()) 816 | .get(EXIT_NODE_PROVISIONER_LABEL) 817 | .map(String::as_str) 818 | .map(ObjectRef::new) 819 | }, 820 | ) 821 | .run( 822 | reconcile_svcs, 823 | error_policy, 824 | Arc::new(Context { 825 | client: client.clone(), 826 | exit_node_lock: lock.clone(), 827 | }), 828 | ) 829 | .for_each(|_| futures::future::ready(())) 830 | .boxed(), 831 | ); 832 | 833 | // I actually don't know from which way the watcher goes, so I'm just gonna put it here 834 | reconcilers.push( 835 | Controller::new(exit_nodes, Config::default()) 836 | .watches( 837 | Api::::all(client.clone()), 838 | watcher::Config::default(), 839 | |node: Service| { 840 | node.metadata 841 | .annotations 842 | .as_ref() 843 | .unwrap_or(&BTreeMap::new()) 844 | .get(EXIT_NODE_PROVISIONER_LABEL) 845 | .map(String::as_str) 846 | .map(ObjectRef::new) 847 | }, 848 | ) 849 | .run( 850 | reconcile_nodes, 851 | error_policy_exit_node, 852 | Arc::new(Context { 853 | client, 854 | exit_node_lock: lock, 855 | }), 856 | ) 857 | .for_each(|_| futures::future::ready(())) 858 | .boxed(), 859 | ); 860 | 861 | futures::future::join_all(reconcilers).await; 862 | 863 | Ok(()) 864 | } 865 | -------------------------------------------------------------------------------- /src/deployment.rs: -------------------------------------------------------------------------------- 1 | //! Chisel pod deployment 2 | 3 | use crate::{ 4 | error::ReconcileError, 5 | ops::{ExitNode, EXIT_NODE_PROXY_PROTOCOL_LABEL}, 6 | }; 7 | use color_eyre::Result; 8 | use k8s_openapi::{ 9 | api::{ 10 | apps::v1::{Deployment, DeploymentSpec}, 11 | core::v1::{ 12 | Container, EnvVar, EnvVarSource, PodSpec, PodTemplateSpec, SecretKeySelector, Service, 13 | ServicePort, 14 | }, 15 | }, 16 | apimachinery::pkg::apis::meta::v1::LabelSelector, 17 | }; 18 | use kube::{api::ResourceExt, core::ObjectMeta, error::ErrorResponse, Resource}; 19 | use tracing::{info, instrument, trace}; 20 | 21 | const CHISEL_IMAGE: &str = "jpillora/chisel"; 22 | 23 | /// The function takes a ServicePort struct and returns a string representation of the port number and 24 | /// protocol (if specified). 25 | /// 26 | /// Arguments: 27 | /// 28 | /// * `svcport`: `svcport` is a variable of type `ServicePort`, which is likely a struct or enum that 29 | /// represents a service port in a network application. The function `convert_service_port` takes this 30 | /// `svcport` as input and returns a string representation of the port number and protocol (if 31 | /// specified). 32 | /// 33 | /// Returns: 34 | /// 35 | /// a string that represents the service port. The string contains the port number and, if applicable, 36 | /// the protocol (TCP or UDP) in the format "port/protocol". 37 | fn convert_service_port(svcport: ServicePort) -> String { 38 | let mut port = String::new(); 39 | 40 | // get port number 41 | port.push_str(&svcport.port.to_string()); 42 | 43 | if let Some(protocol) = svcport.protocol { 44 | match protocol.as_str() { 45 | // todo: we probably want to imply none by default 46 | "TCP" => port.push_str("/tcp"), 47 | "UDP" => port.push_str("/udp"), 48 | _ => (), 49 | }; 50 | } 51 | 52 | port 53 | } 54 | 55 | /// This function generates a remote argument string using an ExitNode's host and port information. 56 | /// 57 | /// Arguments: 58 | /// 59 | /// * `node`: `node` is a reference to an `ExitNode` struct, which contains information about a specific 60 | /// exit node in a network. The function `generate_remote_arg` takes this node as input and generates a 61 | /// remote argument that can be used to connect to the exit node. 62 | /// 63 | /// Returns: 64 | /// 65 | /// The function `generate_remote_arg` is returning a `String`. The `String` 66 | /// contains the formatted remote argument which is a combination of the `lb_ip` and `chisel_port` 67 | /// values obtained from the `node` parameter. 68 | use std::net::IpAddr; 69 | 70 | pub fn generate_remote_arg(node: &ExitNode) -> String { 71 | // todo: what about ECDSA keys? 72 | 73 | let host = node.get_host(); 74 | 75 | trace!(host = ?host, "Host"); 76 | 77 | // Determine if the host is an IPv6 address and format accordingly 78 | let formatted_host = match host.parse::() { 79 | Ok(IpAddr::V6(_)) => format!("[{}]", host), 80 | _ => host.to_string(), 81 | }; 82 | 83 | let output = format!("{}:{}", formatted_host, node.spec.port); 84 | trace!(output = ?output, "Output"); 85 | output 86 | } 87 | 88 | /// This function generates arguments for a tunnel based on a given service. 89 | /// 90 | /// Arguments: 91 | /// 92 | /// * `svc`: `svc` is a reference to a `Service` object, which represents a set of pods that provide a 93 | /// common network service. The function `generate_tunnel_args` takes this `Service` object as input and 94 | /// generates a set of arguments that can be used to create a tunnel to the service. 95 | /// 96 | /// Returns: 97 | /// 98 | /// a `Result` containing a `Vec` of `String`s. The `Vec` contains arguments for a tunnel, which are 99 | /// generated based on the input `Service`. 100 | pub fn generate_tunnel_args(svc: &Service) -> Result, ReconcileError> { 101 | // We can unwrap safely since Service is guaranteed to have a name 102 | let service_name = svc.metadata.name.clone().unwrap(); 103 | // We can unwrap safely since Service is namespaced scoped 104 | let service_namespace = svc.namespace().unwrap(); 105 | 106 | // this feels kind of janky, will need to refactor this later 107 | 108 | // check if there's a custom IP set 109 | // let target_ip = svc 110 | // .spec 111 | // .as_ref() 112 | // .map(|spec| spec.load_balancer_ip.clone()) 113 | // .flatten() 114 | // .unwrap_or_else(|| "R".to_string()); 115 | 116 | let proxy_protocol = svc.metadata.annotations.as_ref().and_then(|annotations| { 117 | annotations 118 | .get(EXIT_NODE_PROXY_PROTOCOL_LABEL) 119 | .map(String::as_ref) 120 | }) == Some("true"); 121 | let target_ip = if proxy_protocol { "RP" } else { "R" }; 122 | 123 | // We can unwrap safely since Service is guaranteed to have a spec 124 | let ports = svc 125 | .spec 126 | .as_ref() 127 | .unwrap() 128 | .ports 129 | .as_ref() 130 | .ok_or(ReconcileError::NoPortsSet)? 131 | .iter() 132 | .map(|p| { 133 | format!( 134 | "{}:{}:{}.{}:{}", 135 | target_ip, 136 | p.port, 137 | service_name, 138 | service_namespace, 139 | convert_service_port(p.clone()) 140 | ) 141 | }) 142 | .collect(); 143 | 144 | info!("Generated arguments: {:?}", ports); 145 | trace!(svc = ?svc, "Source service"); 146 | Ok(ports) 147 | } 148 | 149 | /// This function generates Chisel flags using various options set on an ExitNode's spec. 150 | /// 151 | /// Arguments: 152 | /// 153 | /// * `node`: `node` is a reference to an `ExitNode` struct, which contains information about a specific 154 | /// exit node in a network. The function `generate_remote_arg` takes this node as input and generates a 155 | /// Chisel flags that are used when connecting to the exit node. 156 | /// 157 | /// Returns: 158 | /// 159 | /// The function `generate_chisel_flags` is returning `Vec` of `String`s. 160 | /// The `Vec` contains chisel flags for the client, which are 161 | /// generated based on the input `ExitNode`'s spec. 162 | #[instrument] 163 | pub fn generate_chisel_flags(node: &ExitNode) -> Vec { 164 | let mut flags = vec!["-v".to_string()]; 165 | 166 | if let Some(fingerprint) = node.spec.fingerprint.to_owned() { 167 | flags.push("--fingerprint".to_string()); 168 | flags.push(fingerprint) 169 | } 170 | 171 | flags 172 | } 173 | 174 | /// This function creates a PodTemplateSpec for a chisel container to be used as a tunnel between a 175 | /// source service and an exit node. 176 | /// 177 | /// Arguments: 178 | /// 179 | /// * `source`: The `source` parameter is a reference to a `Service` object, which represents a set of 180 | /// pods that provide a single, stable network endpoint for accessing a Kubernetes service. 181 | /// * `exit_node`: `exit_node` is a reference to an `ExitNode` struct, which contains information about 182 | /// the exit node that the pod will connect to. This includes the exit node's IP address, port, and 183 | /// authentication credentials. The `generate_remote_arg` function is used to generate the command line 184 | /// argument that 185 | /// 186 | /// Returns: 187 | /// 188 | /// a `PodTemplateSpec` object. 189 | #[instrument(skip(source, exit_node))] 190 | pub async fn create_pod_template( 191 | source: &Service, 192 | exit_node: &ExitNode, 193 | ) -> Result { 194 | let service_name = source.metadata.name.as_ref().ok_or_else(|| { 195 | ReconcileError::KubeError(kube::Error::Api(ErrorResponse { 196 | code: 500, 197 | message: "Service is missing name".to_string(), 198 | reason: "MissingServiceName".to_string(), 199 | status: "Failure".to_string(), 200 | })) 201 | })?; 202 | 203 | let mut args = vec!["client".to_string()]; 204 | args.extend(generate_chisel_flags(exit_node)); 205 | args.push(generate_remote_arg(exit_node)); 206 | args.extend(generate_tunnel_args(source)?.iter().map(|s| s.to_string())); 207 | 208 | let env = exit_node.spec.auth.clone().map(|secret_name| { 209 | vec![EnvVar { 210 | name: "AUTH".to_string(), 211 | value_from: Some(EnvVarSource { 212 | secret_key_ref: Some(SecretKeySelector { 213 | name: secret_name, 214 | key: "auth".to_string(), 215 | optional: Some(false), 216 | }), 217 | ..Default::default() 218 | }), 219 | ..Default::default() 220 | }] 221 | }); 222 | 223 | // Warn when auth is not set 224 | if env.is_none() { 225 | tracing::warn!("No auth secret set for exit node! Tunnel will not be secure! This is a security risk!!!"); 226 | } 227 | 228 | Ok(PodTemplateSpec { 229 | metadata: Some(ObjectMeta { 230 | labels: Some([("tunnel".to_string(), service_name.to_owned())].into()), 231 | ..Default::default() 232 | }), 233 | spec: Some(PodSpec { 234 | containers: vec![Container { 235 | args: Some(args), 236 | image: Some( 237 | exit_node 238 | .spec 239 | .chisel_image 240 | .clone() 241 | .unwrap_or_else(|| CHISEL_IMAGE.to_string()), 242 | ), 243 | name: "chisel".to_string(), 244 | env, 245 | ..Default::default() 246 | }], 247 | ..Default::default() 248 | }), 249 | }) 250 | } 251 | 252 | /// The function creates a deployment object for a service and exit node in Rust programming language. 253 | /// 254 | /// Arguments: 255 | /// 256 | /// * `source`: The `source` parameter is a reference to a `Service` object, which represents a set of 257 | /// pods that perform the same function and are exposed by a common IP address and port. 258 | /// * `exit_node`: An ExitNode is a node in a network that allows traffic to exit the network and reach 259 | /// external services. In this context, it is likely being used to specify the node that the deployment 260 | /// will be running on. 261 | /// 262 | /// Returns: 263 | /// 264 | /// a `Deployment` object. 265 | #[instrument(skip(source, exit_node))] 266 | pub async fn create_owned_deployment( 267 | source: &Service, 268 | exit_node: &ExitNode, 269 | ) -> Result { 270 | let oref = exit_node.controller_owner_ref(&()).ok_or_else(|| { 271 | ReconcileError::KubeError(kube::Error::Api(ErrorResponse { 272 | code: 500, 273 | message: "Service is missing owner reference".to_string(), 274 | reason: "MissingOwnerReference".to_string(), 275 | status: "Failure".to_string(), 276 | })) 277 | })?; 278 | 279 | // cross namespace owner reference is not allowed so we link to exit node as its owner 280 | 281 | let service_name = source.metadata.name.as_ref().ok_or_else(|| { 282 | ReconcileError::KubeError(kube::Error::Api(ErrorResponse { 283 | code: 500, 284 | message: "Service is missing name".to_string(), 285 | reason: "MissingServiceName".to_string(), 286 | status: "Failure".to_string(), 287 | })) 288 | })?; 289 | 290 | Ok(Deployment { 291 | metadata: ObjectMeta { 292 | name: Some(format!("chisel-{}", service_name)), 293 | owner_references: Some(vec![oref]), 294 | // namespace: exit_node.metadata.namespace.clone(), 295 | ..ObjectMeta::default() 296 | }, 297 | spec: Some(DeploymentSpec { 298 | template: create_pod_template(source, exit_node).await?, 299 | selector: LabelSelector { 300 | match_labels: Some([("tunnel".to_string(), service_name.to_owned())].into()), 301 | ..Default::default() 302 | }, 303 | ..Default::default() 304 | }), 305 | ..Default::default() 306 | }) 307 | } 308 | 309 | // #[cfg(test)] 310 | // mod tests { 311 | // use crate::ops::ExitNodeSpec; 312 | 313 | // use super::*; 314 | // use k8s_openapi::api::core::v1::Service; 315 | // use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference; 316 | 317 | // // TODO: ExitNode is missing owner reference, test fails 318 | // // TODO: implement more tests 319 | // #[test] 320 | // fn test_create_owned_deployment() { 321 | // let service = Service { 322 | // metadata: ObjectMeta { 323 | // name: Some("test-service".to_string()), 324 | // ..Default::default() 325 | // }, 326 | // ..Default::default() 327 | // }; 328 | // let exit_node = ExitNode { 329 | // spec: ExitNodeSpec { 330 | // host: "127.0.0.1".to_string(), 331 | // external_host: None, 332 | // port: 8080, 333 | // auth: None, 334 | // fingerprint: None, 335 | // default_route: true, 336 | // }, 337 | // metadata: ObjectMeta { 338 | // owner_references: Some(vec![OwnerReference { 339 | // kind: "ExitNode".to_string(), 340 | // api_version: "v1".to_string(), 341 | // name: "test-node".to_string(), 342 | // uid: uuid::Uuid::nil().to_string(), 343 | // controller: Some(true), 344 | // block_owner_deletion: Some(true), 345 | // }]), 346 | // namespace: Some("default".to_string()), 347 | // ..Default::default() 348 | // }, 349 | // status: None, 350 | // }; 351 | // let deployment = create_owned_deployment(&service, &exit_node).await.unwrap(); 352 | // assert_eq!( 353 | // deployment.metadata.name.unwrap(), 354 | // "chisel-test-service".to_string() 355 | // ); 356 | // let owner_ref = deployment.metadata.owner_references.unwrap().pop().unwrap(); 357 | // assert_eq!(owner_ref.kind, "ExitNode"); 358 | // assert_eq!(owner_ref.api_version, "v1"); 359 | // assert_eq!(owner_ref.name, ""); 360 | // assert_eq!(owner_ref.uid, uuid::Uuid::nil().to_string()); 361 | // } 362 | // } 363 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error; 2 | 3 | #[derive(Error, Debug)] 4 | pub enum ReconcileError { 5 | #[error("Kube Error: {0}")] 6 | KubeError(#[from] kube::Error), 7 | 8 | #[error("There are no exit nodes available to assign")] 9 | NoAvailableExitNodes, 10 | 11 | #[error("Exit Node being already assigned and its backoff time has not expired")] 12 | ExitNodeBackoff, 13 | 14 | #[error("There are no ports set on this LoadBalancer")] 15 | NoPortsSet, 16 | 17 | #[error("The provided cloud provisioner was not found in the cluster")] 18 | CloudProvisionerNotFound, 19 | #[error("The secret keys for the cloud provisioner were not found in the cluster")] 20 | CloudProvisionerSecretNotFound, 21 | 22 | #[error("The managed exit node spec does not have a password set")] 23 | ManagedExitNodeNoPasswordSet, 24 | 25 | #[error("The Secret could not be found in the resource's namespace")] 26 | SecretNotFound, 27 | 28 | #[error("The `auth` field is not set in the Secret intended for the password")] 29 | AuthFieldNotSet, 30 | 31 | #[error("The operator has encountered an unknown error, this is most likely a bug: {0}")] 32 | UnknownError(#[from] color_eyre::Report), 33 | } 34 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod cloud; 2 | pub mod daemon; 3 | pub mod deployment; 4 | pub mod error; 5 | pub mod ops; 6 | pub mod util; 7 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use chisel_operator::daemon; 4 | use color_eyre::Result; 5 | use tracing::info; 6 | // use opentelemetry::sdk::export::metrics::StdoutExporterBuilder; 7 | // use opentelemetry_api::trace::{ 8 | // noop::{NoopTracer, NoopTracerProvider}, 9 | // TracerProvider, 10 | // }; 11 | // use tracing::info; 12 | use tracing_subscriber::{prelude::*, EnvFilter, Registry}; 13 | // Main entrypoint for operator 14 | 15 | // TODO: OpenTelemetry is broken 16 | 17 | // async fn init_tracer() -> opentelemetry::sdk::trace::Tracer { 18 | // let mut pipeline = opentelemetry_otlp::new_pipeline() 19 | // .tracing() 20 | // .with_trace_config(opentelemetry::sdk::trace::config().with_resource( 21 | // opentelemetry::sdk::Resource::new(vec![opentelemetry::KeyValue::new( 22 | // "service.name", 23 | // "chisel-operator", 24 | // )]), 25 | // )); 26 | 27 | // if let Ok(otlp_endpoint) = std::env::var("OPENTELEMETRY_ENDPOINT_URL") { 28 | // let channel = tonic::transport::Channel::from_shared(otlp_endpoint) 29 | // .unwrap() 30 | // .connect() 31 | // .await 32 | // .unwrap(); 33 | 34 | // pipeline = pipeline.with_exporter( 35 | // opentelemetry_otlp::new_exporter() 36 | // .tonic() 37 | // .with_channel(channel), 38 | // ) 39 | // } else { 40 | // pipeline = pipeline.with_exporter(opentelemetry_otlp::new_exporter().tonic()) 41 | // } 42 | 43 | // pipeline 44 | // .install_batch(opentelemetry::runtime::Tokio) 45 | // .unwrap() 46 | // } 47 | 48 | #[tokio::main] 49 | async fn main() -> Result<()> { 50 | color_eyre::install()?; 51 | dotenvy::dotenv().ok(); 52 | 53 | let logger_env = env::var("LOGGER").unwrap_or_else(|_| "logfmt".to_string()); 54 | 55 | let logfmt_logger = tracing_logfmt::layer().boxed(); 56 | 57 | let pretty_logger = tracing_subscriber::fmt::layer() 58 | .pretty() 59 | .with_thread_ids(true) 60 | .with_thread_names(true) 61 | .boxed(); 62 | 63 | let json_logger = tracing_subscriber::fmt::layer().json().boxed(); 64 | 65 | let compact_logger = tracing_subscriber::fmt::layer() 66 | .compact() 67 | .with_thread_ids(true) 68 | .with_thread_names(true) 69 | .boxed(); 70 | 71 | let logger = match logger_env.as_str() { 72 | "logfmt" => logfmt_logger, 73 | "pretty" => pretty_logger, 74 | "json" => json_logger, 75 | "compact" => compact_logger, 76 | _ => logfmt_logger, 77 | }; 78 | 79 | let env_filter = EnvFilter::try_from_default_env() 80 | .or_else(|_| EnvFilter::try_new("info"))? 81 | .add_directive("tower=off".parse().unwrap()) 82 | .add_directive("hyper=error".parse().unwrap()) 83 | .add_directive("kube_client=info".parse().unwrap()) 84 | .add_directive("h2=error".parse().unwrap()) 85 | .add_directive("tokio_util=error".parse().unwrap()); 86 | 87 | // let telemetry = tracing_opentelemetry::layer().with_tracer(init_tracer().await); 88 | let collector = Registry::default() 89 | // .with(telemetry) 90 | .with(logger) 91 | .with(env_filter); 92 | tracing::subscriber::set_global_default(collector)?; 93 | 94 | info!( 95 | "Fyra Labs Chisel Operator, version {}", 96 | env!("CARGO_PKG_VERSION") 97 | ); 98 | info!("Starting up..."); 99 | 100 | // Set up a handler for graceful pod termination 101 | tokio::spawn(async move { 102 | tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) 103 | .unwrap() 104 | .recv() 105 | .await; 106 | info!("Received termination signal, shutting down..."); 107 | std::process::exit(0); 108 | }); 109 | 110 | daemon::run().await 111 | } 112 | -------------------------------------------------------------------------------- /src/ops.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | use crate::cloud::{ 4 | aws::AWSProvisioner, digitalocean::DigitalOceanProvisioner, linode::LinodeProvisioner, 5 | Provisioner, 6 | }; 7 | use color_eyre::Result; 8 | use itertools::Itertools; 9 | use k8s_openapi::api::core::v1::Secret; 10 | use kube::{core::ObjectMeta, Api, CustomResource}; 11 | use schemars::JsonSchema; 12 | use serde::{Deserialize, Serialize}; 13 | use tracing::debug; 14 | 15 | pub const EXIT_NODE_NAME_LABEL: &str = "chisel-operator.io/exit-node-name"; 16 | pub const EXIT_NODE_PROVISIONER_LABEL: &str = "chisel-operator.io/exit-node-provisioner"; 17 | pub const EXIT_NODE_PROXY_PROTOCOL_LABEL: &str = "chisel-operator.io/proxy-protocol"; 18 | 19 | pub fn parse_provisioner_label_value<'a>( 20 | default_namespace: &'a str, 21 | value: &'a str, 22 | ) -> (&'a str, &'a str) { 23 | if let Some(pair) = value.split('/').collect_tuple() { 24 | pair 25 | } else { 26 | (default_namespace, value) 27 | } 28 | } 29 | 30 | #[derive(Serialize, Deserialize, Debug, CustomResource, Clone, JsonSchema)] 31 | #[kube( 32 | group = "chisel-operator.io", 33 | version = "v1", 34 | kind = "ExitNode", 35 | singular = "exitnode", 36 | // struct = "ExitNode", 37 | status = "ExitNodeStatus", 38 | namespaced 39 | )] 40 | /// ExitNode is a custom resource that represents a Chisel exit node. 41 | /// It will be used as the reverse proxy for all services in the cluster. 42 | pub struct ExitNodeSpec { 43 | /// Hostname or IP address of the chisel server 44 | pub host: String, 45 | /// Optional real external hostname/IP of exit node 46 | /// If not provided, the host field will be used 47 | #[serde(default)] 48 | pub external_host: Option, 49 | /// Control plane port of the chisel server 50 | pub port: u16, 51 | /// Optional but highly recommended fingerprint to perform host-key validation against the server's public key 52 | pub fingerprint: Option, 53 | /// Optional authentication secret name to connect to the control plane 54 | pub auth: Option, 55 | /// Optional value for the chisel client image used to connect to the chisel server 56 | /// If not provided, jpillora/chisel:latest is used 57 | pub chisel_image: Option, 58 | /// Optional boolean value for whether to make the exit node the default route for the cluster 59 | /// If true, the exit node will be the default route for the cluster 60 | /// default value is false 61 | #[serde(default)] 62 | pub default_route: bool, 63 | } 64 | 65 | impl ExitNode { 66 | /// for cloud provisioning: returns the name of the secret containing the cloud provider auth token 67 | /// 68 | /// if not exists, generates a new name using the ExitNode name 69 | pub fn get_secret_name(&self) -> String { 70 | match &self.spec.auth { 71 | Some(auth) => auth.clone(), 72 | None => format!("{}-auth", self.metadata.name.as_ref().unwrap()), 73 | } 74 | } 75 | 76 | /// returns the host 77 | pub fn get_host(&self) -> String { 78 | // check if status.ip exists 79 | // if it does, use that 80 | // otherwise use self.host 81 | debug!(status = ?self.status, "ExitNode status",); 82 | match &self.status { 83 | Some(status) => status.ip.clone(), 84 | None => self.spec.host.clone(), 85 | } 86 | } 87 | 88 | /// For cloud provisioning: 89 | /// 90 | /// Generates a new secret with the `auth` key containing the auth string for chisel in the same namespace as the ExitNode 91 | pub async fn generate_secret(&self, password: String) -> Result { 92 | let secret_name = self.get_secret_name(); 93 | 94 | let auth_tmpl = format!("{}:{}", crate::cloud::pwgen::DEFAULT_USERNAME, password); 95 | 96 | let mut map = BTreeMap::new(); 97 | map.insert(String::from("auth"), auth_tmpl); 98 | 99 | let secret = Secret { 100 | metadata: ObjectMeta { 101 | name: Some(secret_name.clone()), 102 | namespace: self.metadata.namespace.clone(), 103 | ..Default::default() 104 | }, 105 | string_data: Some(map), 106 | ..Default::default() 107 | }; 108 | 109 | let client = kube::Client::try_default().await?; 110 | 111 | // add secret to k8s 112 | 113 | let secret_api = Api::::namespaced( 114 | client.clone(), 115 | &self.metadata.namespace.as_ref().unwrap().clone(), 116 | ); 117 | 118 | // force overwrite 119 | 120 | if let Ok(_existing_secret) = secret_api.get(&secret_name).await { 121 | debug!("Secret already exists, deleting"); 122 | secret_api.delete(&secret_name, &Default::default()).await?; 123 | } 124 | 125 | let secret = secret_api 126 | .create(&kube::api::PostParams::default(), &secret) 127 | .await?; 128 | 129 | Ok(secret) 130 | } 131 | } 132 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 133 | pub struct ExitNodeStatus { 134 | pub provider: String, 135 | pub name: String, 136 | // pub password: String, 137 | pub ip: String, 138 | pub id: Option, 139 | // pub service_binding: Vec, 140 | } 141 | 142 | impl ExitNodeStatus { 143 | // pub fn find_svc_binding(&self, namespace: &str, name: &str) -> Option { 144 | // self.service_binding 145 | // .iter() 146 | // .find(|svc| svc.namespace == namespace && svc.name == name) 147 | // .cloned() 148 | // } 149 | 150 | // It is indeed being used in cloud/* 151 | pub fn new(provider: String, name: String, ip: String, id: Option) -> Self { 152 | Self { 153 | provider, 154 | name, 155 | ip, 156 | id, 157 | // service_binding: vec![], 158 | } 159 | } 160 | } 161 | 162 | #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] 163 | 164 | pub struct ServiceBinding { 165 | pub namespace: String, 166 | pub name: String, 167 | } 168 | 169 | #[derive(Serialize, Deserialize, Debug, CustomResource, Clone, JsonSchema)] 170 | #[kube( 171 | group = "chisel-operator.io", 172 | version = "v1", 173 | kind = "ExitNodeProvisioner", 174 | singular = "exitnodeprovisioner", 175 | // struct = "ExitNodeProvisioner", 176 | namespaced 177 | )] 178 | /// ExitNodeProvisioner is a custom resource that represents a Chisel exit node provisioner on a cloud provider. 179 | pub enum ExitNodeProvisionerSpec { 180 | DigitalOcean(DigitalOceanProvisioner), 181 | Linode(LinodeProvisioner), 182 | AWS(AWSProvisioner), 183 | } 184 | 185 | impl ExitNodeProvisionerSpec { 186 | pub fn get_inner(self) -> Box<(dyn Provisioner + Send + Sync)> { 187 | // Can we somehow not have to match on this? 188 | match self { 189 | ExitNodeProvisionerSpec::DigitalOcean(a) => Box::new(a), 190 | ExitNodeProvisionerSpec::Linode(a) => Box::new(a), 191 | ExitNodeProvisionerSpec::AWS(a) => Box::new(a), 192 | } 193 | } 194 | } 195 | 196 | pub trait ProvisionerSecret { 197 | fn find_secret(&self) -> Result>; 198 | } 199 | 200 | impl ExitNodeProvisioner { 201 | pub async fn find_secret(&self) -> Result> { 202 | let secret_name = match &self.spec { 203 | ExitNodeProvisionerSpec::DigitalOcean(a) => a.auth.clone(), 204 | ExitNodeProvisionerSpec::Linode(a) => a.auth.clone(), 205 | ExitNodeProvisionerSpec::AWS(a) => a.auth.clone(), 206 | }; 207 | 208 | // Find a k8s secret with the name of the secret reference 209 | 210 | let client = kube::Client::try_default().await?; 211 | 212 | let secret = Api::::namespaced( 213 | client.clone(), 214 | &self.metadata.namespace.as_ref().unwrap().clone(), 215 | ); 216 | 217 | let secret = secret.get(&secret_name).await?; 218 | 219 | Ok(Some(secret)) 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeMap, sync::Arc}; 2 | 3 | use k8s_openapi::api::core::v1::Service; 4 | use kube::{api::ListParams, Api}; 5 | 6 | use crate::{daemon::Context, ops::ExitNode}; 7 | use color_eyre::Result; 8 | 9 | /// Fetch exit nodes from the Kubernetes API in a map keyed by IP address 10 | /// This is useful for quickly looking up exit nodes by IP address 11 | /// 12 | /// # Arguments 13 | /// 14 | /// * `ctx` - A shared context object 15 | /// * `namespace` - An optional namespace to filter exit nodes by. If None, all namespaces are looked up 16 | pub async fn get_exit_nodes_by_ip( 17 | ctx: Arc, 18 | namespace: Option<&str>, 19 | ) -> Result> { 20 | let exit_node_api: Api = { 21 | if let Some(namespace) = namespace { 22 | Api::namespaced(ctx.client.clone(), namespace) 23 | } else { 24 | Api::all(ctx.client.clone()) 25 | } 26 | }; 27 | Ok(exit_node_api 28 | .list(&ListParams::default().timeout(30)) 29 | .await? 30 | .items 31 | .into_iter() 32 | .filter_map(|node| { 33 | let host = node.get_host(); 34 | if let Some(_status) = &node.status { 35 | Some((host, node)) 36 | } else { 37 | None 38 | } 39 | }) 40 | .collect()) 41 | } 42 | 43 | pub fn get_svc_lb_ip(svc: &Service) -> Option { 44 | svc.status.as_ref().and_then(|status| { 45 | status 46 | .load_balancer 47 | .as_ref() 48 | .and_then(|lb| lb.ingress.as_ref()) 49 | .and_then(|ingress| ingress.first()) 50 | .and_then(|ingress| ingress.ip.as_ref()) 51 | .cloned() 52 | }) 53 | } 54 | 55 | pub async fn get_svc_bound_exit_node(ctx: Arc, svc: &Service) -> Result> { 56 | let exit_nodes = get_exit_nodes_by_ip(ctx, None).await?; 57 | let svc_lb_ip = get_svc_lb_ip(svc); 58 | Ok(svc_lb_ip.and_then(|ip| exit_nodes.get(&ip).cloned())) 59 | } 60 | --------------------------------------------------------------------------------