├── .github └── workflows │ ├── release.yml │ └── test.yml ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── Torb.png ├── cli ├── .gitignore ├── Cargo.lock ├── Cargo.toml └── src │ ├── animation.rs │ ├── artifacts.rs │ ├── builder.rs │ ├── cli.rs │ ├── composer.rs │ ├── config.rs │ ├── deployer.rs │ ├── initializer.rs │ ├── main.rs │ ├── resolver.rs │ ├── resolver │ └── inputs.rs │ ├── utils.rs │ ├── vcs.rs │ └── watcher.rs └── license_header.txt /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Publish Torb CLI Release 2 | concurrency: 3 | group: tagged-release 4 | cancel-in-progress: true 5 | on: 6 | push: 7 | tags: 8 | - 'v[0-9]+.[0-9]+.[0-9]+-[0-9][0-9].[0-9][0-9]' 9 | jobs: 10 | build_nomac: 11 | strategy: 12 | matrix: 13 | include: 14 | - TARGET: aarch64-unknown-linux-gnu 15 | OS: ubuntu-latest 16 | - TARGET: x86_64-unknown-linux-gnu 17 | OS: ubuntu-latest 18 | name: Build Release Non Mac 19 | defaults: 20 | run: 21 | working-directory: ./cli 22 | runs-on: ${{matrix.OS}} 23 | steps: 24 | - name: Checkout 25 | uses: actions/checkout@v3 26 | - name: Build 27 | run: | 28 | cargo install -f cross 29 | cross build --release --target ${{matrix.TARGET}} 30 | - name: Zip Artifact 31 | run: | 32 | zip target/${{matrix.TARGET}}/release/torb_${{github.ref_name}}_${{matrix.TARGET}} target/${{matrix.TARGET}}/release/torb 33 | - name: Archive Release Artifact 34 | uses: actions/upload-artifact@v3 35 | with: 36 | name: torb_${{github.ref_name}}_${{matrix.TARGET}} 37 | path: cli/target/${{matrix.TARGET}}/release/torb_${{github.ref_name}}_${{matrix.TARGET}} 38 | 39 | build_mac: 40 | strategy: 41 | matrix: 42 | include: 43 | - TARGET: x86_64-apple-darwin 44 | OS: macos-12 45 | name: Build Release Mac 46 | defaults: 47 | run: 48 | working-directory: ./cli 49 | runs-on: ${{matrix.OS}} 50 | steps: 51 | - name: Checkout 52 | uses: actions/checkout@v3 53 | - name: Build 54 | run: | 55 | cargo build --release --target ${{matrix.TARGET}} 56 | - name: Zip Artifact 57 | run: | 58 | zip target/${{matrix.TARGET}}/release/torb_${{github.ref_name}}_${{matrix.TARGET}} target/${{matrix.TARGET}}/release/torb 59 | - name: Archive Release Artifact 60 | uses: actions/upload-artifact@v3 61 | with: 62 | name: torb_${{github.ref_name}}_${{matrix.TARGET}} 63 | path: cli/target/${{matrix.TARGET}}/release/torb_${{github.ref_name}}_${{matrix.TARGET}} 64 | 65 | publish: 66 | runs-on: ubuntu-latest 67 | name: Publish Release 68 | needs: [build_nomac, build_mac] 69 | steps: 70 | - name: Checkout 71 | uses: actions/checkout@v3 72 | - name: Download Build Artifacts 73 | uses: actions/download-artifact@v3 74 | with: 75 | path: build/releases 76 | - name: Publish release 77 | uses: eloquent/github-release-action@v3 78 | with: 79 | generateReleaseNotes: "true" 80 | reactions: +1,hooray,heart,rocket,eyes 81 | discussionCategory: Releases 82 | discussionReactions: +1,laugh,hooray,heart,rocket,eyes 83 | assets: | 84 | - path: build/releases/torb_${{github.ref_name}}_* 85 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Torb CLI Test Pipeline 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | defaults: 15 | run: 16 | working-directory: ./cli 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v3 22 | - name: Build 23 | run: cargo build --verbose 24 | - name: Run tests 25 | run: cargo test --verbose 26 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/Lucas-C/pre-commit-hooks 3 | rev: v1.4.2 4 | hooks: 5 | - id: forbid-crlf 6 | - id: remove-crlf 7 | - id: forbid-tabs 8 | - id: remove-tabs 9 | args: [--whitespaces-count, '4'] 10 | - id: insert-license 11 | files: \.rs$ 12 | args: 13 | - --license-filepath 14 | - license_header.txt 15 | - --comment-style 16 | - // 17 | - --use-current-year 18 | - --detect-license-in-X-top-lines=10 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | TORB FOUNDRY BUSINESS SOURCE LICENSE AGREEMENT 2 | 3 | Business Source License 1.1 4 | Licensor: Torb Foundry 5 | Licensed Work: Torb v0.3.0-02.22 6 | The Licensed Work is © 2023-Present Torb Foundry 7 | 8 | 9 | Change License: GNU Affero General Public License Version 3 10 | Additional Use Grant: None 11 | Change Date: Feb 22, 2024 12 | 13 | License text copyright © 2023 MariaDB plc, All Rights Reserved. 14 | “Business Source License” is a trademark of MariaDB plc. 15 | 16 | Terms 17 | 18 | The Licensor hereby grants You the right to copy, modify, create derivative 19 | works, redistribute, and make non-production use of the Licensed Work. The 20 | Licensor may make an Additional Use Grant, above, permitting limited production 21 | use. 22 | 23 | Effective on the Change Date, or the fourth anniversary of the first publicly 24 | available distribution of a specific version of the Licensed Work under this 25 | License, whichever comes first, the Licensor hereby grants you rights under the 26 | terms of the Change License, and the rights granted in the paragraph above 27 | terminate. 28 | 29 | If your use of the Licensed Work does not comply with the requirements currently 30 | in effect as described in this License, you must purchase a commercial license 31 | from the Licensor, its affiliated entities, or authorized resellers, or you must 32 | refrain from using the Licensed Work. 33 | 34 | All copies of the original and modified Licensed Work, and derivative works of 35 | the Licensed Work, are subject to this License. This License applies separately 36 | for each version of the Licensed Work and the Change Date may vary for each 37 | version of the Licensed Work released by Licensor. 38 | 39 | You must conspicuously display this License on each original or modified copy of 40 | the Licensed Work. If you receive the Licensed Work in original or modified form 41 | from a third party, the terms and conditions set forth in this License apply to 42 | your use of that work. 43 | 44 | Any use of the Licensed Work in violation of this License will automatically 45 | terminate your rights under this License for the current and all other versions 46 | of the Licensed Work. 47 | 48 | This License does not grant you any right in any trademark or logo of Licensor 49 | or its affiliates (provided that you may use a trademark or logo of Licensor as 50 | expressly required by this License). 51 | 52 | TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON AN 53 | "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, EXPRESS 54 | OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF MERCHANTABILITY, 55 | FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND TITLE. 56 | 57 | MariaDB hereby grants you permission to use this License's text to license your 58 | works, and to refer to it using the trademark "Business Source License", as long 59 | as you comply with the Covenants of Licensor below. 60 | 61 | Covenants of Licensor 62 | 63 | In consideration of the right to use this License's text and the "Business 64 | Source License" name and trademark, Licensor covenants to MariaDB, and to all 65 | other recipients of the licensed work to be provided by Licensor: 66 | 67 | 1. To specify as the Change License the GPL Version 2.0 or any later version, or 68 | a license that is compatible with GPL Version 2.0 or a later version, where 69 | "compatible" means that software provided under the Change License can be 70 | included in a program with software provided under GPL Version 2.0 or a later 71 | version. Licensor may specify additional Change Licenses without limitation. 72 | 73 | 2. To either: (a) specify an additional grant of rights to use that does not 74 | impose any additional restriction on the right granted in this License, as the 75 | Additional Use Grant; or (b) insert the text "None". 76 | 77 | 3. To specify a Change Date. 78 | 79 | 4. Not to modify this License in any other way. 80 | 81 | Notice 82 | 83 | The Business Source License (this document, or the "License") is not an Open 84 | Source license. However, the Licensed Work will eventually be made available 85 | under an Open Source License, as stated in this License. 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Torb 2 | 3 | Shoutout to my friend @SystemOverlord \[REAL NAME REDACTED\] for the anvil and sparks image for the org. This shoutout and the beer I have yet to get him satisfy our arrangement. 4 | 5 | ## What 6 | 7 | Torb is a tool for quickly setting up best practice development infrastructure along with development stacks that have reasonably sane defaults. Instead of taking a couple hours to get a project started and then a week to get your infrastructure correct, do all of that in a couple minutes. 8 | 9 | ## Mission 10 | 11 | Make it simple and easy for software engineers to create and deploy infrastructure with best practices in place. The ideal would be 10 minutes or less to do so and the point where I'd consider Torb a failure is if it takes more than an hour to have dev, staging and prod with best practices in place. 12 | 13 | In addition to the above Torb needs to provide an easy way of adding additional infrastructure and requirements as a project scales. On day one you probably have logs and something like Sentry or Rollbar but you might not have great CI/CD or more complex distributed tracing or bill of materials or an artifact repository or whatever. It should be stupid simple to add these to an existing system. Infrastructure needs change as a project changes and we should be flexible enough to accomodate that. 14 | 15 | ## Getting Started 16 | 17 | First download the appropriate binary for your OS. 18 | 19 | After that's downloaded you can either run Torb from your preferred directory or copy it somewhere on your PATH. 20 | 21 | 1. Run `torb`. You'll see that the CLI is broken into nouns such as "repo" and "stack". Under each noun are the verbs that act upon it and let you do useful things. 22 | 2. Now run `torb init`. This will create a .torb folder located in your user's home directory. Inside of this we download a version of Terraform and pull our artifacts repo which contains community contributed Stacks, [Services](Torb#services) and [Projects](Torb#Projects). Finally this creates a `config.yaml` file which is where all of the CLI configuration is kept. 23 | 3. Now you're ready to begin setting up a project using Torb. 24 | 25 | ## Configuring Torb 26 | 27 | Earlier we mentioned a `config.yaml` file located in `~/.torb`, currently this file is pretty simple. It has two keys: 28 | 29 | - githubToken - a PAT with access to read, write and admin. 30 | - githubUser - The username of the user we are acting on behalf of. 31 | 32 | ## Repos 33 | 34 | ### Creating 35 | 36 | Torb can create both a local repo and a repo on a service such as GitHub automatically. This: 37 | 38 | - Creates the local folder 39 | - Initializes Git, 40 | - Creates a blank README 41 | - Creates the remote repository on GitHub. 42 | - Links local and remote 43 | - Pushes first commit 44 | 45 | Currently this doesn't happen under an organization, but that is on the list of things to tackle as config. It may be sufficient to provid and Organization token in the above config, but as of now it has not been tested. 46 | 47 | **Note: Providing the full path for the local repo instead of just a name is currently required.** 48 | 49 | torb repo create ~/example/path/to/new_repo 50 | 51 | This will create a local repo `new_repo` at the path provided and handle everything listed above. 52 | 53 | ## Stacks 54 | 55 | ### Checking-out and Initializing 56 | 57 | #### Checking-out 58 | 59 | First change directory into the repo where you'd like the stack to live. 60 | 61 | Next list the available stacks with: 62 | 63 | torb stack list 64 | 65 | This will output something like: 66 | 67 | ``` 68 | Torb Stacks: 69 | 70 | - Flask App w/ React Frontend 71 | - Rook Ceph Cluster 72 | ``` 73 | 74 | For this example we're going to choose `flask-react` 75 | 76 | Run: 77 | 78 | torb stack checkout flask-react 79 | 80 | **Note: Depending on your shell you may need different quotes for names with spaces.** 81 | 82 | This will produce a new file `stack.yaml` in your current directory, if you cat the file you can see the structure of a stack: 83 | 84 | ``` 85 | ➜ test_repo git:(main) ✗ ls 86 | stack.yaml 87 | 88 | ➜ test_repo git:(main) ✗ cat stack.yaml 89 | version: v1.0.0 90 | kind: stack 91 | name: "Flask App w/ React Frontend" 92 | description: "Production ready flask web app." 93 | services: 94 | postgres_1: 95 | service: postgresql 96 | values: {} 97 | inputs: 98 | port: "5432" 99 | user: postgres 100 | password: postgres 101 | database: postgres 102 | num_replicas: "1" 103 | nginx_ingress_controller_1: 104 | service: nginx-ingress-controller 105 | values: 106 | controller: 107 | admissionWebhooks: 108 | enabled: false 109 | inputs: {} 110 | projects: 111 | flaskapp_1: 112 | project: flaskapp 113 | inputs: 114 | name: flaskapp 115 | db_host: self.service.postgres_1.output.host 116 | db_port: self.service.postgres_1.output.port 117 | db_user: self.service.postgres_1.output.user 118 | db_pass: self.service.postgres_1.output.password 119 | db_database: self.service.postgres_1.output.database 120 | values: {} 121 | build: 122 | tag: latest 123 | registry: "" 124 | createreactapp_1: 125 | project: createreactapp 126 | inputs: 127 | name: createreactapp 128 | ingress: "true" 129 | values: 130 | ingress: 131 | hosts: 132 | - name: localhost 133 | path: / 134 | servicePort: "8000" 135 | extraEnvVars: 136 | - name: API_HOST 137 | value: self.project.flaskapp_1.output.host 138 | - name: API_PORT 139 | value: "5000" 140 | build: 141 | tag: latest 142 | registry: "" 143 | deps: 144 | services: 145 | - nginx_ingress_controller_1 146 | ``` 147 | 148 | A stack is comprised of `services` and `projects` as the basic units. We won't go into their structure deeply here, but approximately a service is something a user would just configure and deploy and a project is anything where a user is modifying source and building. 149 | 150 | Each stack is a DAG and dependencies can either be explicitly listed as they are above or Torb can figure them out implicitly based on references in the inputs sections and the values overrides in any of the units. Each unit in a stack is referenced internally by it's fully qualified name comprised of .. 151 | 152 | When a stack is initialized, built or deployed the dependency chain is walked to the end and executed, this is then unwound all the way to the initial starting unit(s). 153 | 154 | #### Initializing 155 | 156 | After you've checked out a stack you need to initialize it before you can proceed to build and deploy the stack. Each unit can in it's definition include an initialization step to help set it up in your project. Most of the time for `projects` this means creating the folder, running a generator of somekind to create default code and copying over any config or build files it will need. If you need to examine a particular unit to see what it does you can check it out in [Torb Artifacts](https://github.com/TorbFoundry/torb-artifacts) 157 | 158 | To initialize your stack run: 159 | 160 | torb stack init stack.yaml 161 | 162 | With the stack that we're using your repo will look something like this: 163 | 164 | ``` 165 | ➜ test_repo git:(main) ✗ ls 166 | createreactapp flaskapp flaskapp_venv stack.yaml 167 | ``` 168 | 169 | You'll need to install npm and node for this tutorial, if you don't already have recent versions you can follow their [guide](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) 170 | 171 | Each folder is the name of the unit in [Torb Artifacts](https://github.com/TorbFoundry/torb-artifacts) 172 | 173 | Depending on the unit, you'll need to build the artifact like `npm build` before you are able to deploy. Go ahead and change directory into `createreactapp` and run `npm run build`. Torb will not install programming languages, libraries or anything else for working with projects so make sure you have these things installed. 174 | 175 | 176 | Next we'll look at building and deploying. 177 | 178 | ### Building and Deploying 179 | 180 | #### Building 181 | 182 | Currently Torb supports building an image with Docker, or a build script, but not both. The resulting artifact must be a docker image regardless. Using a build script will let you do additional steps such as have torb automatically run the `npm run build` step from above in addition to building the image. 183 | 184 | Building your stack will recurse through the graph and run whatever is configured in the `build` section for the individual unit defined in the `stack.yaml` 185 | 186 | ``` 187 | flaskapp_1: 188 | project: flaskapp 189 | inputs: 190 | name: flaskapp 191 | db_host: self.service.postgres_1.output.host 192 | db_port: self.service.postgres_1.output.port 193 | db_user: self.service.postgres_1.output.user 194 | db_pass: self.service.postgres_1.output.password 195 | db_database: self.service.postgres_1.output.database 196 | values: {} 197 | build: 198 | tag: latest 199 | registry: "" 200 | deps: 201 | services: 202 | - postgres_1 203 | ``` 204 | 205 | **Note: To use a script instead, set script_path instead of tag and registry.** 206 | 207 | You can see in the above unit that build is configured to tag the docker image with `latest` and since the registry is empty it will push the image to the default docker hub repository you are currently signed in to. 208 | 209 | If you just want to have the image locally and skip pushing to a registry you can change registry to `local`. This is useful is you're running a kubernetes cluster that can read your local docker images like the cluster that can be enabled with Docker Destkop on mac and wsl. 210 | 211 | If you're running a kubernetes cluster on a remote server you will need to make sure the appropriate registry is configured here and that you are authenticated to it as this will also be used to pull the image on your cluster later on. 212 | 213 | **Note: If you're using Minikube you will either need to use remote registry or load the local image with `minikube image load `** 214 | 215 | To build your stack run 216 | 217 | torb stack build stack.yaml 218 | 219 | **Note: If your image registry is a separate locally hosted service like the one found in our quickstart stack you will need to pass `--local-hosted-registry` 220 | 221 | Expect the first build to take some time as this will be building the docker images from scratch. 222 | 223 | If all goes well you should see output for the main IAC (Terraform) file torb generates for it's internal build state. 224 | 225 | **Note: All build state is kept in a hidden folder .torb_buildstate in your repo. Currently this isn't intended to be exposed to users, but that may change in the future. We want to add eject functionality if people choose to opt out of using Torb and at that time this will be more up front.*** 226 | 227 | 228 | #### Deploying 229 | 230 | ##### Foreword 231 | 232 | Torb currently deploys only to Kubernetes, we use Terraform to handle the deploy and bundle a version of terraform in the .torb folder in your home directory. This is so we can control what version Torb is using and keep it separate from any other version of Terraform you might already be using on your system. 233 | 234 | Torb respects the `KUBECONFIG` env var and if that is not set we default to `~/.kube/config` whatever your active context is will be used so make sure you're set to the right cluster. This also makes us fairly cluster agnostic as we rely on you to have access and connectivity to the cluster you want to deploy to. 235 | 236 | Deploys respect the dependency ordering set in the `stack.yaml` we use the same method for detecing implicit and explicit depencies in Torb. 237 | 238 | There are some tricky aspects of the deploy, we rely on the `helm provider` in Terraform and Helm in general to deploy to Kube. Helm itself is a good tool and handles a lot of complexities of putting together a set of artifacts in a convenient bundle, but is fairly limited and opaque when it comes to handling errors, timeouts, dealing with data persistance etc during a deploy. In that case it really is only a vehicle for applying a chart. This means we are limited by Helm AND by the respective chart maintainers for our artifacts. 239 | 240 | As an example, if the chart being applied isn't useing StatefulSets and includes PersistentVolumeClaims your PVC will be deleted when the chart is cleaned up. In a lot of ways it may be better to create a separate PVC under a StatefulSet in addition to the existing Deployment based chart and see if the chart supports passing a reference to that claim, versus relying on them to do the correct thing for your usecase. 241 | 242 | Torb does not at this moment have a way to enforce these practices but as we grow can put requirements in place for our artifacts that will help here. Hopefully this isn't too often exposed to you as end users, but is a concern for anyone who is creating stacks and units under [Torb Artifacts](https://github.com/TorbFoundry/torb-artifacts) 243 | 244 | Longer term we may work on something to replace using Helm while trying to support the chart format itself but for now it's the best we have. As an example we've looked into Kustomize and handling releases ourselves but need to further evaluate how much we will lose out on from the Helm ecosystem. 245 | 246 | ##### Deploy 247 | 248 | To deploy with Torb run 249 | 250 | torb stack deploy stack.yaml 251 | 252 | You should see Terraform initialize a workspace and begin to apply a plan. 253 | 254 | At this point you can wait until things finish or use Kubectl to check the status of the deployment. The namespace being deployed to can be configured at the stack level and a per unit level in the `stack.yaml`. 255 | 256 | Currently we are using a local backend for Terraform but do plan to support popular cloud providers, and our own cloud solution. 257 | 258 | If all is good you will eventually see a success message from Terraform with a list of new infrastructure created, changed or removed. 259 | 260 | In the event of an issue the default timeout is 5 minutes and you can safely clean up releases in Helm without impacting Torb. 261 | 262 | #### Watcher 263 | 264 | Torb supports quick iteration with our filesystem watcher. Our watcher aggregates change events to files based on configured paths, and on a set interval, also configurable in your stack.yaml, will redeploy the services and projects if changes are found. Watcher configuration at the top level in the stack.yaml looks like: 265 | 266 | ``` 267 | watcher: 268 | paths: 269 | - "./emojee" 270 | interval: 8000 271 | patch: true 272 | ``` 273 | 274 | Paths are provided as a list and watched recursively, interval is in miliseconds and patch when true will change the imagePullPolicy to Always for all projects and services in your stack.yaml. All build files and general output like IaC files are kept separate from your main buildstate. However, Terraform's buildstate *is* copied between environments and the change to image pull policies is also reflected back in your main terraform buildstate. Doing all of this ensures when you go to build and deploy your stack normally any changes are properly reverted for the cluster you're using. 275 | 276 | You can run the watcher with 277 | 278 | torb stack watch stack.yaml 279 | 280 | **Note: If your image registry is a separate locally hosted service like the one found in our quickstart stack you will need to pass `--local-hosted-registry`** 281 | 282 | The watcher will initialize it's environment and redeploy any services with changes if patch is true. This may take a few moments as resource states are reconciled. 283 | -------------------------------------------------------------------------------- /Torb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TorbFoundry/torb/6af1177a05a91d8675794b7b2269a3648c7a080f/Torb.png -------------------------------------------------------------------------------- /cli/.gitignore: -------------------------------------------------------------------------------- 1 | target/* 2 | -------------------------------------------------------------------------------- /cli/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "torb" 3 | version = "0.1.0" 4 | edition = "2021" 5 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 6 | 7 | [dependencies] 8 | tempfile = "3.3.0" 9 | dirs = "1.0.4" 10 | clap = { version = "3.1.6", features = ["derive"] } 11 | serde = { version = "1.0", features = ["derive"] } 12 | serde_yaml = "0.8" 13 | thiserror = "1.0" 14 | sha2 = "0.10.2" 15 | base64ct = { version = "1.5.1", features = ["alloc"] } 16 | serde_json = "1.0.85" 17 | hcl-rs = "0.10.0" 18 | indexmap = "1.9.1" 19 | memorable-wordlist = "0.1.7" 20 | ureq = { version = "2.5.0", features = ["json"] } 21 | once_cell = "1.15.0" 22 | chrono = "0.4.22" 23 | data-encoding = { version = "2.3.2", features = ["alloc"] } 24 | rayon = "1.6.1" 25 | notify = "5.1.0" 26 | tokio = { version = "1.26.0", features = ["full"] } 27 | colored = "2.0.0" 28 | rust-embed = "6.6.0" 29 | gif = "0.12.0" 30 | drawille = "0.3.0" 31 | image = "0.24.5" 32 | crossterm = "0.26.1" -------------------------------------------------------------------------------- /cli/src/animation.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use core::fmt::Display; 13 | use image::imageops::resize; 14 | use std::fmt::Debug; 15 | 16 | use crossterm::{cursor, terminal, ExecutableCommand, QueueableCommand}; 17 | use drawille::{Canvas, PixelColor}; 18 | use image::codecs::gif::GifDecoder; 19 | use image::{AnimationDecoder, ImageDecoder}; 20 | use std::io::{stdout, Write}; 21 | use std::sync::{ 22 | atomic::{AtomicBool, Ordering}, 23 | Arc, 24 | }; 25 | use std::{thread, time}; 26 | 27 | use crate::utils::{PrettyContext, PrettyExit}; 28 | 29 | const FRAME_HEIGHT: u16 = 16; 30 | 31 | pub struct BuilderAnimation {} 32 | 33 | pub trait Animation { 34 | fn do_with_animation(&self, f: Box Result>) -> Result 35 | where 36 | E: Debug + Display; 37 | 38 | fn start_animation( 39 | &self, 40 | animation: std::fs::File, 41 | kill_flag: Arc, 42 | ) -> std::thread::JoinHandle<()>; 43 | } 44 | 45 | impl BuilderAnimation { 46 | pub fn new() -> Self { 47 | BuilderAnimation {} 48 | } 49 | } 50 | impl Animation for BuilderAnimation 51 | where 52 | E: Debug + Display, 53 | { 54 | fn start_animation( 55 | &self, 56 | animation: std::fs::File, 57 | kill_flag: Arc, 58 | ) -> std::thread::JoinHandle<()> { 59 | let decoder = GifDecoder::new(animation).unwrap(); 60 | 61 | let (mut width, mut height) = decoder.dimensions(); 62 | 63 | let scale = 0.3; 64 | 65 | width = f32::floor(width as f32 * scale) as u32; 66 | height = f32::floor(height as f32 * scale) as u32; 67 | 68 | let mut canvas = Canvas::new(width, height); 69 | let frames = decoder.into_frames(); 70 | let frames_opt = frames.collect_frames().use_or_pretty_warn( 71 | PrettyContext::default() 72 | .warn("Warning! Unable to decode frames for animation GIF.") 73 | .pretty(), 74 | ); 75 | 76 | let kill_flag_clone = kill_flag.clone(); 77 | 78 | thread::spawn(move || { 79 | let mut thread_stdout = stdout(); 80 | 81 | loop { 82 | let kill_flag = kill_flag_clone.clone(); 83 | 84 | if kill_flag.load(Ordering::SeqCst) == true { 85 | thread_stdout.write("\r".as_bytes()).unwrap(); 86 | thread_stdout.flush().unwrap(); 87 | break; 88 | }; 89 | 90 | let frames = frames_opt.clone().unwrap_or(vec![]); 91 | 92 | for frame in frames.iter().cloned() { 93 | let mut img = frame.into_buffer(); 94 | img = resize(&img, width, height, image::imageops::FilterType::Gaussian); 95 | for x in 0..width { 96 | for y in 0..height { 97 | let pixel = img.get_pixel(x, y); 98 | let color = PixelColor::TrueColor { 99 | r: pixel[0], 100 | g: pixel[1], 101 | b: pixel[2], 102 | }; 103 | canvas.set_colored(x, y, color); 104 | } 105 | } 106 | 107 | let frame = canvas.frame(); 108 | 109 | thread_stdout.write_all(frame.as_bytes()).unwrap(); 110 | thread_stdout.flush().unwrap(); 111 | thread::sleep(time::Duration::from_millis(60)); 112 | canvas.clear(); 113 | 114 | // Move up the height of the frame in the terminal 115 | thread_stdout.queue(cursor::MoveUp(FRAME_HEIGHT)).unwrap(); 116 | } 117 | } 118 | }) 119 | } 120 | 121 | fn do_with_animation(&self, mut f: Box Result>) -> Result 122 | where 123 | E: Debug + Display, 124 | { 125 | let home_dir = dirs::home_dir().unwrap(); 126 | let torb_path = home_dir.join(".torb"); 127 | let repository_path = torb_path.join("repositories"); 128 | let repo = "torb-artifacts"; 129 | let gif_path = "torb_dwarf_animation.gif"; 130 | 131 | let artifacts_path = repository_path.join(repo); 132 | let animation_path = artifacts_path.join(gif_path); 133 | 134 | let animation_opt = std::fs::File::open(animation_path).use_or_pretty_warn( 135 | PrettyContext::default() 136 | .warn("Warning! Unable to open animation GIF.") 137 | .pretty(), 138 | ); 139 | 140 | let mut new_stdout = stdout(); 141 | new_stdout.execute(cursor::Hide).use_or_pretty_warn_send( 142 | PrettyContext::default() 143 | .warn("Warning! Unable to hide cursor for animation.") 144 | .pretty(), 145 | ); 146 | 147 | let kill_flag = Arc::new(AtomicBool::new(false)); 148 | let animation_thread_handle_opt = if animation_opt.is_some() { 149 | let animation = animation_opt.unwrap(); 150 | 151 | Some(>::start_animation( 152 | self, 153 | animation, 154 | kill_flag.clone(), 155 | )) 156 | } else { 157 | None 158 | }; 159 | 160 | let res = f(); 161 | kill_flag.store(true, Ordering::SeqCst); 162 | 163 | if animation_thread_handle_opt.is_some() { 164 | let handle = animation_thread_handle_opt.unwrap(); 165 | handle.join().use_or_pretty_warn_send( 166 | PrettyContext::default() 167 | .warn("Warning! Animation thread in an errored state when joining.") 168 | .pretty(), 169 | ); 170 | }; 171 | 172 | new_stdout.execute(cursor::Show).unwrap(); 173 | new_stdout 174 | .execute(terminal::Clear(terminal::ClearType::FromCursorDown)) 175 | .unwrap(); 176 | res 177 | } 178 | } 179 | -------------------------------------------------------------------------------- /cli/src/artifacts.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::composer::InputAddress; 13 | use crate::resolver::inputs::{InputResolver, NO_INITS_FN}; 14 | use crate::resolver::{resolve_stack, NodeDependencies, StackGraph}; 15 | use crate::utils::{buildstate_path_or_create, checksum, kebab_to_snake_case, snake_case_to_kebab}; 16 | use crate::watcher::{WatcherConfig}; 17 | 18 | use data_encoding::BASE32; 19 | use indexmap::{IndexMap, IndexSet}; 20 | use memorable_wordlist; 21 | use once_cell::sync::Lazy; 22 | use serde::ser::SerializeSeq; 23 | use serde::{de, de::SeqAccess, de::Visitor, Deserialize, Deserializer, Serialize}; 24 | use serde_yaml::{self}; 25 | use sha2::{Digest, Sha256}; 26 | use std::fs; 27 | use std::io::Write; 28 | use thiserror::Error; 29 | 30 | #[derive(Error, Debug)] 31 | pub enum TorbArtifactErrors { 32 | #[error("Hash of loaded build file does not match hash of file on disk.")] 33 | LoadChecksumFailed, 34 | } 35 | 36 | #[derive(Serialize, Deserialize, Debug, Clone)] 37 | pub struct InitStep { 38 | pub steps: Vec, 39 | } 40 | 41 | #[derive(Serialize, Deserialize, Clone, Debug, Default)] 42 | pub struct BuildStep { 43 | #[serde(default = "String::new")] 44 | pub script_path: String, 45 | #[serde(default = "String::new")] 46 | pub dockerfile: String, 47 | #[serde(default = "String::new")] 48 | pub tag: String, 49 | #[serde(default = "String::new")] 50 | pub registry: String, 51 | } 52 | 53 | fn get_types() -> IndexSet<&'static str> { 54 | IndexSet::from(["bool", "array", "string", "numeric"]) 55 | } 56 | 57 | pub static TYPES: Lazy> = Lazy::new(get_types); 58 | 59 | #[derive(Serialize, Deserialize, Debug, Clone)] 60 | pub enum TorbNumeric { 61 | Int(u64), 62 | NegInt(i64), 63 | Float(f64), 64 | } 65 | 66 | #[derive(Debug, Clone)] 67 | pub enum TorbInput { 68 | Bool(bool), 69 | Array(Vec), 70 | String(String), 71 | Numeric(TorbNumeric), 72 | } 73 | 74 | impl From for TorbInput { 75 | fn from(value: bool) -> Self { 76 | TorbInput::Bool(value) 77 | } 78 | } 79 | 80 | impl From for TorbInput { 81 | fn from(value: u64) -> Self { 82 | let wrapper = TorbNumeric::Int(value); 83 | 84 | TorbInput::Numeric(wrapper) 85 | } 86 | } 87 | 88 | impl From for TorbInput { 89 | fn from(value: i64) -> Self { 90 | let wrapper = TorbNumeric::NegInt(value); 91 | 92 | TorbInput::Numeric(wrapper) 93 | } 94 | } 95 | 96 | impl From for TorbInput { 97 | fn from(value: f64) -> Self { 98 | let wrapper = TorbNumeric::Float(value); 99 | 100 | TorbInput::Numeric(wrapper) 101 | } 102 | } 103 | 104 | impl From for TorbInput { 105 | fn from(value: String) -> Self { 106 | TorbInput::String(value) 107 | } 108 | } 109 | 110 | impl From<&str> for TorbInput { 111 | fn from(value: &str) -> Self { 112 | TorbInput::String(value.to_string()) 113 | } 114 | } 115 | 116 | 117 | impl From> for TorbInput 118 | where 119 | TorbInput: From, 120 | T: Clone, 121 | { 122 | fn from(value: Vec) -> Self { 123 | let mut new_vec = Vec::::new(); 124 | 125 | for item in value.iter().cloned() { 126 | new_vec.push(Into::::into(item)); 127 | } 128 | 129 | TorbInput::Array(new_vec) 130 | } 131 | } 132 | 133 | impl TorbInput { 134 | pub fn serialize_for_init(&self) -> String { 135 | 136 | let serde_val = serde_json::to_string(self).unwrap(); 137 | 138 | serde_json::to_string(&serde_val).expect("Unable to serialize TorbInput to JSON, this is a bug and should be reported to the project maintainer(s).") 139 | } 140 | 141 | } 142 | 143 | #[derive(Debug, Clone)] 144 | pub struct TorbInputSpec { 145 | typing: String, 146 | default: TorbInput, 147 | mapping: String, 148 | } 149 | 150 | #[derive(Serialize, Deserialize, Debug, Clone)] 151 | pub struct ArtifactNodeRepr { 152 | #[serde(default = "String::new")] 153 | pub fqn: String, 154 | pub name: String, 155 | pub version: String, 156 | pub kind: String, 157 | pub lang: Option, 158 | #[serde(alias = "init")] 159 | pub init_step: Option>, 160 | #[serde(alias = "build")] 161 | pub build_step: Option, 162 | #[serde(alias = "deploy")] 163 | pub deploy_steps: IndexMap>>, 164 | #[serde(default = "IndexMap::new")] 165 | pub mapped_inputs: IndexMap, 166 | #[serde(alias = "inputs", default = "IndexMap::new")] 167 | pub input_spec: IndexMap, 168 | #[serde(default = "Vec::new")] 169 | pub outputs: Vec, 170 | #[serde(default = "Vec::new")] 171 | pub dependencies: Vec, 172 | #[serde(default = "IndexSet::new")] 173 | pub implicit_dependency_fqns: IndexSet, 174 | #[serde(skip)] 175 | pub dependency_names: NodeDependencies, 176 | #[serde(default = "String::new")] 177 | pub file_path: String, 178 | #[serde(skip)] 179 | pub stack_graph: Option, 180 | pub files: Option>, 181 | #[serde(default = "String::new")] 182 | pub values: String, 183 | pub namespace: Option, 184 | pub source: Option, 185 | #[serde(default="bool::default")] 186 | pub expedient: bool 187 | } 188 | 189 | struct TorbInputDeserializer; 190 | impl<'de> Visitor<'de> for TorbInputDeserializer { 191 | type Value = TorbInput; 192 | 193 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 194 | formatter.write_str("a numeric value.") 195 | } 196 | 197 | fn visit_seq(self, mut seq: A) -> Result 198 | where 199 | A: SeqAccess<'de>, { 200 | let mut container = Vec::::new(); 201 | 202 | loop { 203 | let val_opt: Option = seq.next_element()?; 204 | 205 | if val_opt.is_some() { 206 | let value = val_opt.unwrap(); 207 | 208 | let input = match value { 209 | serde_yaml::Value::String(val) => { 210 | TorbInput::String(val) 211 | } 212 | serde_yaml::Value::Bool(val) => { 213 | TorbInput::Bool(val) 214 | }, 215 | serde_yaml::Value::Number(val) => { 216 | if val.is_f64() { 217 | TorbInput::Numeric(TorbNumeric::Float(val.as_f64().unwrap())) 218 | } else if val.is_u64() { 219 | TorbInput::Numeric(TorbNumeric::Int(val.as_u64().unwrap())) 220 | } else { 221 | TorbInput::Numeric(TorbNumeric::NegInt(val.as_i64().unwrap())) 222 | } 223 | }, 224 | serde_yaml::Value::Null => { 225 | panic!("Null values not acceptable as element in type Array.") 226 | }, 227 | serde_yaml::Value::Sequence(_) => { 228 | panic!("Nested Array types are not currently supported.") 229 | } 230 | serde_yaml::Value::Mapping(_val) => { 231 | panic!("Map types are not currently supported as array elements. (Or at all.)") 232 | } 233 | }; 234 | 235 | container.push(input); 236 | } else { 237 | break; 238 | } 239 | } 240 | 241 | let input = TorbInput::Array(container); 242 | 243 | Ok(input) 244 | } 245 | 246 | fn visit_f32(self, v: f32) -> Result 247 | where 248 | E: de::Error, 249 | { 250 | Ok(TorbInput::Numeric(TorbNumeric::Float(v.into()))) 251 | } 252 | 253 | fn visit_str(self, v: &str) -> Result 254 | where 255 | E: de::Error, 256 | { 257 | Ok(TorbInput::String(v.to_string())) 258 | } 259 | 260 | fn visit_string(self, v: String) -> Result 261 | where 262 | E: de::Error, 263 | { 264 | Ok(TorbInput::String(v)) 265 | } 266 | 267 | fn visit_bool(self, v: bool) -> Result 268 | where 269 | E: de::Error, 270 | { 271 | Ok(TorbInput::Bool(v)) 272 | } 273 | 274 | fn visit_f64(self, v: f64) -> Result 275 | where 276 | E: de::Error, 277 | { 278 | Ok(TorbInput::Numeric(TorbNumeric::Float(v.into()))) 279 | } 280 | 281 | fn visit_u64(self, v: u64) -> Result 282 | where 283 | E: de::Error, 284 | { 285 | Ok(TorbInput::Numeric(TorbNumeric::Int(v.into()))) 286 | } 287 | fn visit_u32(self, v: u32) -> Result 288 | where 289 | E: de::Error, 290 | { 291 | Ok(TorbInput::Numeric(TorbNumeric::Int(v.into()))) 292 | } 293 | fn visit_u16(self, v: u16) -> Result 294 | where 295 | E: de::Error, 296 | { 297 | Ok(TorbInput::Numeric(TorbNumeric::Int(v.into()))) 298 | } 299 | 300 | fn visit_u8(self, v: u8) -> Result 301 | where 302 | E: de::Error, 303 | { 304 | Ok(TorbInput::Numeric(TorbNumeric::Int(v.into()))) 305 | } 306 | 307 | fn visit_i8(self, v: i8) -> Result 308 | where 309 | E: de::Error, { 310 | if v > 0 { 311 | panic!("Only for negatives.") 312 | } 313 | Ok(TorbInput::Numeric(TorbNumeric::NegInt(v.into()))) 314 | } 315 | fn visit_i16(self, v: i16) -> Result 316 | where 317 | E: de::Error, { 318 | if v > 0 { 319 | panic!("Only for negatives.") 320 | } 321 | Ok(TorbInput::Numeric(TorbNumeric::NegInt(v.into()))) 322 | } 323 | fn visit_i32(self, v: i32) -> Result 324 | where 325 | E: de::Error, { 326 | if v > 0 { 327 | panic!("Only for negatives.") 328 | } 329 | Ok(TorbInput::Numeric(TorbNumeric::NegInt(v.into()))) 330 | } 331 | fn visit_i64(self, v: i64) -> Result 332 | where 333 | E: de::Error, { 334 | if v > 0 { 335 | panic!("Only for negatives.") 336 | } 337 | Ok(TorbInput::Numeric(TorbNumeric::NegInt(v.into()))) 338 | } 339 | } 340 | 341 | impl<'de> Deserialize<'de> for TorbInput { 342 | fn deserialize(deserializer: D) -> Result 343 | where 344 | D: Deserializer<'de>, 345 | { 346 | deserializer.deserialize_any(TorbInputDeserializer) 347 | } 348 | } 349 | 350 | struct TorbInputSpecDeserializer; 351 | impl<'de> Visitor<'de> for TorbInputSpecDeserializer { 352 | type Value = TorbInputSpec; 353 | 354 | fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { 355 | formatter.write_str("a list.") 356 | } 357 | 358 | fn visit_str(self, v: &str) -> Result 359 | where 360 | E: de::Error, 361 | { 362 | let default = TorbInput::String(String::new()); 363 | let mapping = v.to_string(); 364 | let typing = "string".to_string(); 365 | 366 | Ok(TorbInputSpec { 367 | typing, 368 | default, 369 | mapping, 370 | }) 371 | } 372 | 373 | fn visit_seq(self, mut seq: A) -> Result 374 | where 375 | A: SeqAccess<'de>, 376 | { 377 | let mut count = 0; 378 | let mut typing = String::new(); 379 | let mut mapping = String::new(); 380 | let mut default = TorbInput::String(String::new()); 381 | 382 | if seq.size_hint().is_some() && seq.size_hint() != Some(3) { 383 | return Err(de::Error::custom(format!( 384 | "Didn't find the right sequence of values to create a TorbInputSpec." 385 | ))); 386 | } 387 | 388 | while count < 3 { 389 | match count { 390 | 0 => { 391 | let value_opt = seq.next_element::()?; 392 | 393 | let value = if !value_opt.is_some() { 394 | return Err(de::Error::custom(format!( 395 | "Didn't find the right sequence of values to create a TorbInputSpec." 396 | ))); 397 | } else { 398 | value_opt.unwrap() 399 | }; 400 | 401 | if !TYPES.contains(value.as_str()) { 402 | return Err(de::Error::custom(format!( 403 | "Please set a valid type for your input spec. Valid types are {:#?}. \n If you see this as a regular user, a unit author has included a broken spec.", 404 | TYPES 405 | ))); 406 | } 407 | 408 | typing = value; 409 | count += 1; 410 | } 411 | 1 => { 412 | match typing.as_str() { 413 | "bool" => { 414 | let value_opt = seq.next_element::()?; 415 | 416 | let value = if !value_opt.is_some() { 417 | return Err(de::Error::custom(format!( 418 | "Didn't find the right sequence of values to create a TorbInputSpec." 419 | ))); 420 | } else { 421 | value_opt.unwrap() 422 | }; 423 | 424 | default = TorbInput::Bool(value); 425 | } 426 | "string" => { 427 | let value_opt = seq.next_element::()?; 428 | 429 | let value = if !value_opt.is_some() { 430 | return Err(de::Error::custom(format!( 431 | "Didn't find the right sequence of values to create a TorbInputSpec." 432 | ))); 433 | } else { 434 | value_opt.unwrap() 435 | }; 436 | 437 | default = TorbInput::String(value); 438 | } 439 | "array" => { 440 | let value = seq.next_element::()?.unwrap(); 441 | 442 | let mut new_vec = Vec::::new(); 443 | 444 | for ele in value.iter() { 445 | match ele { 446 | serde_yaml::Value::Bool(val) => { 447 | new_vec.push(TorbInput::Bool(val.clone())) 448 | } 449 | serde_yaml::Value::Number(val) => { 450 | let numeric = if val.is_f64() { 451 | TorbNumeric::Float(val.as_f64().unwrap()) 452 | } else if val.is_u64() { 453 | TorbNumeric::Int(val.as_u64().unwrap()) 454 | } else { 455 | TorbNumeric::NegInt(val.as_i64().unwrap()) 456 | }; 457 | 458 | new_vec.push(TorbInput::Numeric(numeric)) 459 | } 460 | serde_yaml::Value::String(val) => { 461 | new_vec.push(TorbInput::String(val.clone())) 462 | } 463 | _ => panic!("Typing was array, array elements are not a supported type. Supported array types are bool, numeric and string. Nesting is not supported.") 464 | } 465 | } 466 | 467 | default = TorbInput::Array(new_vec); 468 | } 469 | "numeric" => { 470 | let value = seq.next_element::()?.unwrap(); 471 | if let serde_yaml::Value::Number(val) = value { 472 | let numeric = if val.is_f64() { 473 | TorbNumeric::Float(val.as_f64().unwrap()) 474 | } else if val.is_u64() { 475 | TorbNumeric::Int(val.as_u64().unwrap()) 476 | } else { 477 | TorbNumeric::NegInt(val.as_i64().unwrap()) 478 | }; 479 | default = TorbInput::Numeric(numeric); 480 | } else { 481 | panic!("Typing was numeric, default value was not numeric.") 482 | } 483 | 484 | } 485 | _ => { 486 | panic!("Type not supported by Torb! Supported types are String, Numeric, Array, Bool.") 487 | } 488 | } 489 | count += 1; 490 | } 491 | 2 => { 492 | let value_opt = seq.next_element::()?; 493 | 494 | let value = if !value_opt.is_some() { 495 | return Err(de::Error::custom(format!( 496 | "Didn't find the right sequence of values to create a TorbInputSpec." 497 | ))); 498 | } else { 499 | value_opt.unwrap() 500 | }; 501 | 502 | mapping = value; 503 | count += 1; 504 | } 505 | _ => { 506 | return Err(de::Error::custom(format!( 507 | "Didn't find the right sequence of values to create a TorbInputSpec." 508 | ))); 509 | } 510 | } 511 | } 512 | 513 | let new_obj = TorbInputSpec { 514 | typing, 515 | mapping, 516 | default, 517 | }; 518 | 519 | Ok(new_obj) 520 | } 521 | } 522 | 523 | impl<'de> Deserialize<'de> for TorbInputSpec { 524 | fn deserialize(deserializer: D) -> Result 525 | where 526 | D: Deserializer<'de>, 527 | { 528 | deserializer.deserialize_any(TorbInputSpecDeserializer) 529 | } 530 | } 531 | 532 | impl Serialize for TorbInput { 533 | fn serialize(&self, serializer: S) -> Result 534 | where 535 | S: serde::Serializer { 536 | 537 | match self { 538 | TorbInput::Numeric(val) => { 539 | match val { 540 | TorbNumeric::Float(val) => { 541 | serializer.serialize_f64(val.clone()) 542 | }, 543 | TorbNumeric::Int(val) => { 544 | serializer.serialize_u64(val.clone()) 545 | }, 546 | TorbNumeric::NegInt(val) => { 547 | serializer.serialize_i64(val.clone()) 548 | } 549 | } 550 | }, 551 | TorbInput::Array(val) => { 552 | let len = val.len(); 553 | let mut seq = serializer.serialize_seq(Some(len))?; 554 | 555 | for input in val.iter().cloned() { 556 | let expr = match input { 557 | TorbInput::String(val) => serde_yaml::Value::String(val), 558 | TorbInput::Bool(val) => serde_yaml::Value::Bool(val), 559 | TorbInput::Numeric(val) => { 560 | match val { 561 | TorbNumeric::Float(val) => serde_yaml::Value::Number(serde_yaml::Number::from(val)), 562 | TorbNumeric::Int(val) => serde_yaml::Value::Number(serde_yaml::Number::from(val)), 563 | TorbNumeric::NegInt(val) => serde_yaml::Value::Number(serde_yaml::Number::from(val)) 564 | } 565 | } 566 | TorbInput::Array(_val) => { 567 | panic!("Nested array types are not supported.") 568 | } 569 | }; 570 | 571 | seq.serialize_element(&expr)?; 572 | } 573 | seq.end() 574 | }, 575 | TorbInput::String(val) => { 576 | serializer.serialize_str(val) 577 | }, 578 | TorbInput::Bool(val) => { 579 | serializer.serialize_bool(val.clone()) 580 | } 581 | } 582 | 583 | } 584 | } 585 | 586 | impl Serialize for TorbInputSpec { 587 | fn serialize(&self, serializer: S) -> Result 588 | where 589 | S: serde::Serializer { 590 | let mut seq = serializer.serialize_seq(Some(3))?; 591 | 592 | let typing = self.typing.clone(); 593 | let default = self.default.clone(); 594 | let mapping = self.mapping.clone(); 595 | 596 | seq.serialize_element(&typing)?; 597 | seq.serialize_element(&default)?; 598 | seq.serialize_element(&mapping)?; 599 | seq.end() 600 | 601 | } 602 | } 603 | 604 | impl ArtifactNodeRepr { 605 | pub fn display_name(&self, kebab: bool) -> String { 606 | let name = self.mapped_inputs.get("name").map(|(_, input)| { 607 | if let crate::artifacts::TorbInput::String(val) = input.clone() { 608 | val 609 | } 610 | else { 611 | self.name.clone() 612 | } 613 | }).or(Some(self.name.clone())).unwrap(); 614 | 615 | if kebab { 616 | snake_case_to_kebab(&name) 617 | } else { 618 | kebab_to_snake_case(&name) 619 | } 620 | } 621 | 622 | #[allow(dead_code)] 623 | pub fn new( 624 | fqn: String, 625 | name: String, 626 | version: String, 627 | kind: String, 628 | lang: Option, 629 | init_step: Option>, 630 | build_step: Option, 631 | deploy_steps: IndexMap>>, 632 | inputs: IndexMap, 633 | input_spec: IndexMap, 634 | outputs: Vec, 635 | file_path: String, 636 | stack_graph: Option, 637 | files: Option>, 638 | values: String, 639 | namespace: Option, 640 | source: Option, 641 | expedient: bool 642 | ) -> ArtifactNodeRepr { 643 | ArtifactNodeRepr { 644 | fqn: fqn, 645 | name: name, 646 | version: version, 647 | kind: kind, 648 | lang: lang, 649 | init_step: init_step, 650 | build_step: build_step, 651 | deploy_steps: deploy_steps, 652 | mapped_inputs: inputs, 653 | input_spec: input_spec, 654 | outputs: outputs, 655 | implicit_dependency_fqns: IndexSet::new(), 656 | dependencies: Vec::new(), 657 | dependency_names: NodeDependencies { 658 | services: None, 659 | projects: None, 660 | stacks: None, 661 | }, 662 | file_path, 663 | stack_graph, 664 | files, 665 | values, 666 | namespace, 667 | source, 668 | expedient 669 | } 670 | } 671 | 672 | fn address_to_fqn( 673 | graph_name: &String, 674 | addr_result: Result, 675 | ) -> Option { 676 | match addr_result { 677 | Ok(addr) => { 678 | let fqn = format!( 679 | "{}.{}.{}", 680 | graph_name, 681 | addr.node_type.clone(), 682 | addr.node_name.clone() 683 | ); 684 | 685 | Some(fqn) 686 | } 687 | Err(_s) => None, 688 | } 689 | } 690 | 691 | pub fn discover_and_set_implicit_dependencies( 692 | &mut self, 693 | graph_name: &String, 694 | ) -> Result<(), Box> { 695 | let mut implicit_deps_inputs = IndexSet::new(); 696 | 697 | let inputs_fn = |_spec: &String, val: Result| -> String { 698 | let fqn_option = ArtifactNodeRepr::address_to_fqn(graph_name, val); 699 | 700 | if fqn_option.is_some() { 701 | let fqn = fqn_option.unwrap(); 702 | 703 | if fqn != self.fqn { 704 | implicit_deps_inputs.insert(fqn); 705 | } 706 | }; 707 | 708 | "".to_string() 709 | }; 710 | 711 | let mut implicit_deps_values = IndexSet::new(); 712 | 713 | let values_fn = |addr: Result| -> String { 714 | let fqn_option = ArtifactNodeRepr::address_to_fqn(graph_name, addr); 715 | 716 | if fqn_option.is_some() { 717 | let fqn = fqn_option.unwrap(); 718 | if fqn != self.fqn { 719 | implicit_deps_values.insert(fqn); 720 | } 721 | }; 722 | 723 | "".to_string() 724 | }; 725 | 726 | let (_, _, _) = 727 | InputResolver::resolve(&self, Some(values_fn), Some(inputs_fn), NO_INITS_FN)?; 728 | 729 | let unioned_deps = implicit_deps_inputs.union(&mut implicit_deps_values); 730 | 731 | self.implicit_dependency_fqns = unioned_deps.cloned().collect(); 732 | 733 | Ok(()) 734 | } 735 | 736 | pub fn validate_map_and_set_inputs(&mut self, inputs: IndexMap) { 737 | if !self.input_spec.is_empty() { 738 | let input_spec = &self.input_spec.clone(); 739 | 740 | match ArtifactNodeRepr::validate_inputs(&inputs, &input_spec) { 741 | Ok(_) => { 742 | self.mapped_inputs = ArtifactNodeRepr::map_inputs(&inputs, &input_spec); 743 | } 744 | Err(e) => panic!( 745 | "Input validation failed: {} is not a valid key. Valid Keys: {}", 746 | e, 747 | input_spec 748 | .keys() 749 | .into_iter() 750 | .map(AsRef::as_ref) 751 | .collect::>() 752 | .join(", ") 753 | ), 754 | } 755 | } else { 756 | if !inputs.is_empty() { 757 | println!( 758 | "Warning: {} has inputs but no input spec, passing empty values.", 759 | &self.fqn 760 | ); 761 | } 762 | 763 | self.mapped_inputs = IndexMap::::new(); 764 | } 765 | } 766 | 767 | fn validate_inputs( 768 | inputs: &IndexMap, 769 | spec: &IndexMap, 770 | ) -> Result<(), String> { 771 | for (key, val) in inputs.iter() { 772 | if !spec.contains_key(key) { 773 | return Err(key.clone()); 774 | } 775 | 776 | let input_spec = spec.get(key).unwrap(); 777 | 778 | let val_type = match val { 779 | TorbInput::String(val) => match InputAddress::try_from(val.as_str()) { 780 | Ok(_) => "input_address", 781 | _ => "string", 782 | }, 783 | TorbInput::Bool(_val) => "bool", 784 | TorbInput::Numeric(_val) => "numeric", 785 | TorbInput::Array(_val) => "array", 786 | }; 787 | 788 | if val_type != "input_address" && input_spec.typing != val_type { 789 | return Err(format!( 790 | "{key} is type {val_type} but is supposed to be {}", 791 | input_spec.typing 792 | )); 793 | } 794 | } 795 | 796 | Ok(()) 797 | } 798 | 799 | fn map_inputs( 800 | inputs: &IndexMap, 801 | spec: &IndexMap, 802 | ) -> IndexMap { 803 | let mut mapped_inputs = IndexMap::::new(); 804 | 805 | for (key, value) in spec.iter() { 806 | let input = inputs.get(key).unwrap_or(&value.default); 807 | mapped_inputs.insert(key.to_string(), (value.mapping.clone(), input.clone())); 808 | } 809 | 810 | mapped_inputs 811 | } 812 | } 813 | 814 | #[derive(Serialize, Deserialize, Clone)] 815 | pub struct ArtifactRepr { 816 | pub torb_version: String, 817 | pub helm_version: String, 818 | pub terraform_version: String, 819 | pub commits: IndexMap, 820 | pub stack_name: String, 821 | pub meta: Box>, 822 | pub deploys: Vec, 823 | pub nodes: IndexMap, 824 | pub namespace: Option, 825 | pub release: Option, 826 | pub repositories: Option>, 827 | pub watcher: WatcherConfig 828 | } 829 | 830 | impl ArtifactRepr { 831 | fn new( 832 | torb_version: String, 833 | helm_version: String, 834 | terraform_version: String, 835 | commits: IndexMap, 836 | stack_name: String, 837 | meta: Box>, 838 | namespace: Option, 839 | release: Option, 840 | repositories: Option>, 841 | watcher: WatcherConfig, 842 | ) -> ArtifactRepr { 843 | ArtifactRepr { 844 | torb_version, 845 | helm_version, 846 | terraform_version, 847 | commits, 848 | stack_name, 849 | meta, 850 | deploys: Vec::new(), 851 | nodes: IndexMap::new(), 852 | namespace: namespace, 853 | release: release, 854 | repositories, 855 | watcher: watcher 856 | } 857 | } 858 | 859 | pub fn namespace(&self, node: &ArtifactNodeRepr) -> String { 860 | let mut namespace = node 861 | .fqn 862 | .split(".") 863 | .next() 864 | .unwrap() 865 | .to_string() 866 | .replace("_", "-"); 867 | 868 | if self.namespace.is_some() { 869 | namespace = self.namespace.clone().unwrap(); 870 | } 871 | 872 | if node.namespace.is_some() { 873 | namespace = node.namespace.clone().unwrap(); 874 | } 875 | 876 | namespace 877 | } 878 | 879 | pub fn release(&self) -> String { 880 | if self.release.is_some() { 881 | self.release.clone().unwrap() 882 | } else { 883 | memorable_wordlist::kebab_case(16) 884 | } 885 | } 886 | } 887 | 888 | fn get_start_nodes(graph: &StackGraph) -> Vec<&ArtifactNodeRepr> { 889 | let mut start_nodes = Vec::<&ArtifactNodeRepr>::new(); 890 | 891 | for (fqn, list) in graph.incoming_edges.iter() { 892 | let kind = fqn.split(".").collect::>()[1]; 893 | let node = match kind { 894 | "project" => graph.projects.get(fqn).unwrap(), 895 | "service" => graph.services.get(fqn).unwrap(), 896 | "stack" => graph.stacks.get(fqn).unwrap(), 897 | _ => panic!("Build artifact generation, unknown kind: {}", kind), 898 | }; 899 | 900 | if list.len() == 0 { 901 | start_nodes.push(node); 902 | } 903 | } 904 | 905 | start_nodes.sort_by(|a, b| b.fqn.cmp(&a.fqn)); 906 | start_nodes 907 | } 908 | 909 | fn walk_graph(graph: &StackGraph) -> Result> { 910 | let start_nodes = get_start_nodes(graph); 911 | 912 | let meta = stack_into_artifact(&graph.meta)?; 913 | 914 | let mut artifact = ArtifactRepr::new( 915 | graph.version.clone(), 916 | graph.helm_version.clone(), 917 | graph.tf_version.clone(), 918 | graph.commits.clone(), 919 | graph.name.clone(), 920 | meta, 921 | graph.namespace.clone(), 922 | graph.release.clone(), 923 | graph.repositories.clone(), 924 | graph.watcher.clone() 925 | ); 926 | 927 | let mut node_map: IndexMap = IndexMap::new(); 928 | 929 | for node in start_nodes { 930 | let artifact_node_repr = walk_nodes(node, graph, &mut node_map); 931 | artifact.deploys.push(artifact_node_repr); 932 | } 933 | 934 | artifact.nodes = node_map; 935 | 936 | Ok(artifact) 937 | } 938 | 939 | pub fn stack_into_artifact( 940 | meta: &Box>, 941 | ) -> Result>, Box> { 942 | let unboxed_meta = meta.as_ref(); 943 | match unboxed_meta { 944 | Some(meta) => { 945 | let artifact = walk_graph(&meta.stack_graph.clone().unwrap())?; 946 | Ok(Box::new(Some(artifact))) 947 | } 948 | None => Ok(Box::new(None)), 949 | } 950 | } 951 | 952 | fn walk_nodes( 953 | node: &ArtifactNodeRepr, 954 | graph: &StackGraph, 955 | node_map: &mut IndexMap, 956 | ) -> ArtifactNodeRepr { 957 | let mut new_node = node.clone(); 958 | 959 | for fqn in new_node.implicit_dependency_fqns.iter() { 960 | let kind = fqn.split(".").collect::>()[1]; 961 | let node = match kind { 962 | "project" => graph.projects.get(fqn).unwrap(), 963 | "service" => graph.services.get(fqn).unwrap(), 964 | "stack" => graph.stacks.get(fqn).unwrap(), 965 | _ => panic!("Build artifact generation, unknown kind: {}", kind), 966 | }; 967 | 968 | let node_repr = walk_nodes(node, graph, node_map); 969 | 970 | new_node.dependencies.push(node_repr) 971 | } 972 | 973 | new_node 974 | .dependency_names 975 | .projects 976 | .as_ref() 977 | .map_or((), |projects| { 978 | for project in projects { 979 | let p_fqn = format!("{}.project.{}", graph.name.clone(), project.clone()); 980 | 981 | if !new_node.implicit_dependency_fqns.contains(&p_fqn) { 982 | let p_node = graph.projects.get(&p_fqn).unwrap(); 983 | let p_node_repr = walk_nodes(p_node, graph, node_map); 984 | 985 | new_node.dependencies.push(p_node_repr); 986 | } 987 | } 988 | }); 989 | 990 | new_node 991 | .dependency_names 992 | .services 993 | .as_ref() 994 | .map_or((), |services| { 995 | for service in services { 996 | let s_fqn = format!("{}.service.{}", graph.name.clone(), service.clone()); 997 | 998 | if !new_node.implicit_dependency_fqns.contains(&s_fqn) { 999 | let s_node = graph.services.get(&s_fqn).unwrap(); 1000 | let s_node_repr = walk_nodes(s_node, graph, node_map); 1001 | 1002 | new_node.dependencies.push(s_node_repr); 1003 | } 1004 | } 1005 | }); 1006 | 1007 | node_map.insert(node.fqn.clone(), new_node.clone()); 1008 | 1009 | return new_node; 1010 | } 1011 | 1012 | pub fn load_build_file( 1013 | filename: String, 1014 | ) -> Result<(String, String, ArtifactRepr), Box> { 1015 | let buildstate_path = buildstate_path_or_create(); 1016 | let buildfiles_path = buildstate_path.join("buildfiles"); 1017 | let path = buildfiles_path.join(filename.clone()); 1018 | 1019 | let file = std::fs::File::open(path)?; 1020 | 1021 | let hash = filename.clone().split("_").collect::>()[0].to_string(); 1022 | 1023 | let reader = std::io::BufReader::new(file); 1024 | 1025 | let artifact: ArtifactRepr = serde_yaml::from_reader(reader)?; 1026 | let string_rep = serde_yaml::to_string(&artifact).unwrap(); 1027 | 1028 | if checksum(string_rep, hash.clone()) { 1029 | Ok((hash, filename, artifact)) 1030 | } else { 1031 | Err(Box::new(TorbArtifactErrors::LoadChecksumFailed)) 1032 | } 1033 | } 1034 | 1035 | pub fn deserialize_stack_yaml_into_artifact( 1036 | stack_yaml: &String, 1037 | ) -> Result> { 1038 | let graph: StackGraph = resolve_stack(stack_yaml)?; 1039 | let artifact = walk_graph(&graph)?; 1040 | Ok(artifact) 1041 | } 1042 | 1043 | pub fn get_build_file_info( 1044 | artifact: &ArtifactRepr, 1045 | ) -> Result<(String, String, String), Box> { 1046 | let string_rep = serde_yaml::to_string(&artifact).unwrap(); 1047 | let hash = Sha256::digest(string_rep.as_bytes()); 1048 | let hash_base32 = BASE32.encode(&hash); 1049 | let filename = format!("{}_{}.yaml", hash_base32, "outfile"); 1050 | 1051 | Ok((hash_base32, filename, string_rep)) 1052 | } 1053 | 1054 | pub fn write_build_file(stack_yaml: String, location: Option<&std::path::PathBuf>) -> (String, String, ArtifactRepr) { 1055 | let artifact = deserialize_stack_yaml_into_artifact(&stack_yaml).unwrap(); 1056 | let current_dir = std::env::current_dir().unwrap(); 1057 | let current_dir_state_dir = current_dir.join(".torb_buildstate"); 1058 | let outfile_dir_path = current_dir_state_dir.join("buildfiles"); 1059 | 1060 | let (hash_base32, filename, artifact_as_string) = get_build_file_info(&artifact).unwrap(); 1061 | let outfile_path = match location { 1062 | Some(loc) => { 1063 | loc.join(&filename) 1064 | }, 1065 | None => outfile_dir_path.join(&filename) 1066 | }; 1067 | 1068 | if !outfile_dir_path.is_dir() { 1069 | fs::create_dir(&outfile_dir_path).expect("Failed to create buildfile directory."); 1070 | }; 1071 | 1072 | if outfile_path.exists() { 1073 | println!("Build file already exists with same hash, skipping write."); 1074 | } else { 1075 | println!("Writing buildfile to {}", outfile_path.display()); 1076 | fs::File::create(outfile_path) 1077 | .and_then(|mut f| f.write(&artifact_as_string.as_bytes())) 1078 | .expect("Failed to create buildfile."); 1079 | } 1080 | 1081 | (hash_base32, filename, artifact) 1082 | } 1083 | -------------------------------------------------------------------------------- /cli/src/builder.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::artifacts::{ArtifactNodeRepr, ArtifactRepr}; 13 | use crate::utils::{run_command_in_user_shell, CommandConfig, CommandPipeline}; 14 | use indexmap::{IndexSet}; 15 | use std::fs; 16 | use std::process::{Command, Output}; 17 | use thiserror::Error; 18 | 19 | #[derive(Error, Debug)] 20 | pub enum TorbBuilderErrors { 21 | #[error("Unable to build from dockerfile, reason: {response}")] 22 | UnableToBuildDockerfile { response: String }, 23 | #[error("Unable to build from build script, reason: {response}")] 24 | UnableToBuildBuildScript { response: String }, 25 | #[error("Either dockerfile or script_path must be provided.")] 26 | MustDefineDockerfileOrBuildScript, 27 | #[error("The node has already been built. This theoretically should never be hit, so please ping the maintainers.")] 28 | NodeAlreadyBuilt, 29 | } 30 | 31 | pub struct StackBuilder<'a> { 32 | artifact: &'a ArtifactRepr, 33 | built: IndexSet, 34 | dryrun: bool, 35 | build_platforms: String, 36 | separate_local_registry: bool, 37 | exempt: std::collections::HashSet, 38 | } 39 | 40 | impl<'a> StackBuilder<'a> { 41 | pub fn new( 42 | artifact: &'a ArtifactRepr, 43 | build_platforms: String, 44 | dryrun: bool, 45 | separate_local_registry: bool, 46 | ) -> StackBuilder<'a> { 47 | StackBuilder { 48 | artifact: artifact, 49 | built: IndexSet::new(), 50 | dryrun: dryrun, 51 | build_platforms: build_platforms, 52 | separate_local_registry, 53 | exempt: std::collections::HashSet::new(), 54 | } 55 | } 56 | 57 | pub fn new_with_exempt_list( 58 | artifact: &'a ArtifactRepr, 59 | build_platforms: String, 60 | dryrun: bool, 61 | separate_local_registry: bool, 62 | exempt: Vec 63 | ) -> StackBuilder<'a> { 64 | StackBuilder { 65 | artifact: artifact, 66 | built: IndexSet::new(), 67 | dryrun: dryrun, 68 | build_platforms: build_platforms, 69 | separate_local_registry, 70 | exempt: std::collections::HashSet::from_iter(exempt.iter().cloned()), 71 | } 72 | } 73 | 74 | pub fn build(&mut self) -> Result<(), Box> { 75 | for node in self.artifact.deploys.iter() { 76 | if self.exempt.get(&node.fqn).is_none() { 77 | self.walk_artifact(node)?; 78 | } 79 | } 80 | 81 | Ok(()) 82 | } 83 | 84 | fn build_node(&self, node: &ArtifactNodeRepr) -> Result<(), TorbBuilderErrors> { 85 | if let Some(step) = node.build_step.clone() { 86 | if step.dockerfile != "" { 87 | let name = node.display_name(false); 88 | 89 | self.build_docker(&name, step.dockerfile, step.tag, step.registry) 90 | .and_then(|_| Ok(())) 91 | } else if step.script_path != "" { 92 | self.build_script(step.script_path).and_then(|_| Ok(())) 93 | } else { 94 | Err(TorbBuilderErrors::MustDefineDockerfileOrBuildScript) 95 | } 96 | } else { 97 | Ok(()) 98 | } 99 | } 100 | 101 | fn build_docker( 102 | &self, 103 | name: &str, 104 | dockerfile: String, 105 | tag: String, 106 | registry: String, 107 | ) -> Result, TorbBuilderErrors> { 108 | let current_dir = std::env::current_dir().unwrap(); 109 | let dockerfile_dir = current_dir.join(name); 110 | 111 | let label = if registry != "local" && registry != "" { 112 | format!("{}/{}:{}", registry, name, tag) 113 | } else { 114 | format!("{}:{}", name, tag) 115 | }; 116 | // Todo(Ian): Refactor this to not be so ugly when you feel like dealing with the lifetimes. 117 | let commands = if registry != "local" { 118 | if self.separate_local_registry { 119 | vec![ 120 | CommandConfig::new( 121 | "docker", 122 | vec![ 123 | "buildx", 124 | "--builder", 125 | "default", 126 | "build", 127 | "-t", 128 | &label, 129 | ".", 130 | "-f", 131 | &dockerfile, 132 | "--push" 133 | ], 134 | Some(&dockerfile_dir.to_str().unwrap()), 135 | ), 136 | ] 137 | } else { 138 | vec![ 139 | CommandConfig::new( 140 | "docker", 141 | vec![ 142 | "buildx", 143 | "--builder", 144 | "torb_builder", 145 | "build", 146 | "--platform", 147 | &self.build_platforms, 148 | "-t", 149 | &label, 150 | ".", 151 | "-f", 152 | &dockerfile, 153 | "--push" 154 | ], 155 | Some(&dockerfile_dir.to_str().unwrap()), 156 | ), 157 | ] 158 | } 159 | } else { 160 | vec![CommandConfig::new( 161 | "docker", 162 | vec![ 163 | "buildx", 164 | "--builder", 165 | "torb_builder", 166 | "build", 167 | "-t", 168 | &label, 169 | ".", 170 | "-f", 171 | &dockerfile, 172 | "--load", 173 | ], 174 | Some(&dockerfile_dir.to_str().unwrap()), 175 | )] 176 | }; 177 | 178 | if self.dryrun { 179 | println!("{:?}", commands); 180 | 181 | Ok(vec![]) 182 | } else { 183 | let mut pipeline = CommandPipeline::new(Some(commands)); 184 | 185 | let out = pipeline 186 | .execute() 187 | .map_err(|err| TorbBuilderErrors::UnableToBuildDockerfile { 188 | response: err.to_string(), 189 | }); 190 | 191 | out 192 | } 193 | } 194 | 195 | fn build_script(&self, script_path: String) -> Result { 196 | let contents = fs::read_to_string(script_path).unwrap(); 197 | 198 | if self.dryrun { 199 | println!("{:?}", contents); 200 | 201 | let out = Command::new("") 202 | .output() 203 | .expect("Failed to run nop command for build script dryrun."); 204 | 205 | Ok(out) 206 | } else { 207 | let lines: Vec<&str> = contents.split("\n").collect(); 208 | 209 | let script_string = lines.join("&&"); 210 | 211 | run_command_in_user_shell(script_string, None).map_err(|err| { 212 | TorbBuilderErrors::UnableToBuildBuildScript { 213 | response: err.to_string(), 214 | } 215 | }) 216 | } 217 | } 218 | 219 | fn walk_artifact(&mut self, node: &ArtifactNodeRepr) -> Result<(), Box> { 220 | // We want to walk to the end of the dependencies before we build. 221 | // This is because duplicate dependencies can exist, and we want to avoid building the same thing twice. 222 | // By walking to the end we ensure that whichever copy is built first will be in the set of seen nodes. 223 | // This let me avoid worrying about how to handle duplicate dependencies in the dependency tree data structure. 224 | // -Ian 225 | for child in node.dependencies.iter() { 226 | if self.exempt.get(&child.fqn).is_none() { 227 | self.walk_artifact(child)? 228 | } 229 | } 230 | 231 | if !self.built.contains(&node.fqn) { 232 | self.build_node(&node).and_then(|_out| { 233 | if self.built.insert(node.fqn.clone()) { 234 | Ok(()) 235 | } else { 236 | Err(TorbBuilderErrors::NodeAlreadyBuilt) 237 | } 238 | })?; 239 | } 240 | 241 | Ok(()) 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /cli/src/cli.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use clap::{AppSettings, Arg, Command, SubCommand}; 13 | 14 | pub fn cli() -> Command<'static> { 15 | Command::new("torb") 16 | .version("1.0.0") 17 | .author("Torb Foundry") 18 | .setting(AppSettings::ArgRequiredElseHelp) 19 | .subcommand(SubCommand::with_name("version").about("Get the version of this torb.")) 20 | .subcommand( 21 | SubCommand::with_name("init").about("Initialize Torb, download artifacts and tools."), 22 | ) 23 | .subcommand( 24 | SubCommand::with_name("repo") 25 | .about("Verbs for interacting with project repos.") 26 | .setting(AppSettings::ArgRequiredElseHelp) 27 | .subcommand( 28 | SubCommand::with_name("create") 29 | .about("Create a new repository for a Torb stack.") 30 | .arg( 31 | Arg::with_name("path") 32 | .takes_value(true) 33 | .required(true) 34 | .index(1) 35 | .help("Path of the repo to create."), 36 | ) 37 | .arg( 38 | Arg::new("--local-only") 39 | .short('l') 40 | .required(false) 41 | .takes_value(false) 42 | .help("Only create the repo locally."), 43 | ), 44 | ), 45 | ) 46 | .subcommand( 47 | SubCommand::with_name("artifacts") 48 | .about("Verbs for interacting with artifact repositories.") 49 | .setting(AppSettings::ArgRequiredElseHelp) 50 | .subcommand( 51 | SubCommand::with_name("clone") 52 | .about("Iterate through `repositories` config option and clone all that don't exist.") 53 | ) 54 | .subcommand( 55 | SubCommand::with_name("refresh") 56 | .about("Iterate through the .torb/repositories entries and pull --rebase to latest commit. Can be configured to act on specific repos, see help for details.") 57 | .arg( 58 | Arg::new("name") 59 | .long("name") 60 | .takes_value(true) 61 | .required(false) 62 | .default_value("") 63 | .short('n') 64 | ) 65 | ) 66 | ) 67 | .subcommand( 68 | SubCommand::with_name("stack") 69 | .about("Verbs for interacting with Torb stacks.") 70 | .setting(AppSettings::ArgRequiredElseHelp) 71 | .subcommand( 72 | SubCommand::with_name("checkout") 73 | .about("Add a stack definition template to your current directory.") 74 | .arg( 75 | Arg::with_name("name") 76 | .takes_value(true) 77 | .required(false) 78 | .index(1) 79 | .help("Name of the stack definition template to checkout."), 80 | ), 81 | ) 82 | .subcommand( 83 | SubCommand::with_name("init") 84 | .about("Run any init steps for a stack's dependencies.") 85 | .arg( 86 | Arg::with_name("file") 87 | .takes_value(true) 88 | .required(true) 89 | .index(1) 90 | .help("File path of the stack definition file."), 91 | ), 92 | ) 93 | .subcommand( 94 | SubCommand::with_name("new") 95 | .about("Create a new stack.yaml template.") 96 | ) 97 | .subcommand( 98 | SubCommand::with_name("build") 99 | .about("Build a stack from a stack definition file.") 100 | .arg( 101 | Arg::with_name("file") 102 | .takes_value(true) 103 | .required(true) 104 | .index(1) 105 | .help("File path of the stack definition file."), 106 | ) 107 | .arg( 108 | Arg::new("--dryrun") 109 | .short('d') 110 | .long("dryrun") 111 | .takes_value(false) 112 | .help("Dry run. Don't actually build the stack."), 113 | ) 114 | .arg( 115 | Arg::new("--platforms") 116 | .short('p') 117 | .default_values(&["linux/amd64", "linux/arm64"]) 118 | .help( 119 | "Comma separated list of platforms to build docker images for.", 120 | ), 121 | ) 122 | .arg( 123 | Arg::new("--local-hosted-registry") 124 | .short('l') 125 | .long("local-hosted-registry") 126 | .takes_value(false) 127 | .help("Runs the builder with the docker driver to push to a separate registry hosted on localhost (or an address pointing to localhost)"), 128 | ), 129 | ) 130 | .subcommand( 131 | SubCommand::with_name("deploy") 132 | .about("Deploy a stack from a stack definition file.") 133 | .arg( 134 | Arg::with_name("file") 135 | .takes_value(true) 136 | .required(true) 137 | .index(1) 138 | .help("File path of the stack definition file."), 139 | ) 140 | .arg( 141 | Arg::new("--dryrun") 142 | .short('d') 143 | .long("dryrun") 144 | .takes_value(false) 145 | .help("Dry run. Don't actually deploy the stack."), 146 | ), 147 | ) 148 | .subcommand( 149 | SubCommand::with_name("watch") 150 | .about("Watch files for changes and re-build and redeploy to cluster.") 151 | .arg( 152 | Arg::with_name("file") 153 | .takes_value(true) 154 | .required(true) 155 | .index(1) 156 | .help("File path of the stack definition file."), 157 | ) 158 | .arg( 159 | Arg::new("--local-hosted-registry") 160 | .short('l') 161 | .long("local-hosted-registry") 162 | .takes_value(false) 163 | .help("Runs the builder with the docker driver to push to a separate registry hosted on localhost (or an address pointing to localhost)"), 164 | ), 165 | ) 166 | .subcommand(SubCommand::with_name("list").about("List all available stacks.")), 167 | ) 168 | } 169 | -------------------------------------------------------------------------------- /cli/src/composer.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::artifacts::{ArtifactNodeRepr, ArtifactRepr, TorbInput, TorbNumeric}; 13 | use crate::resolver::inputs::{InputResolver, NO_INPUTS_FN, NO_VALUES_FN, NO_INITS_FN}; 14 | use crate::utils::{buildstate_path_or_create, for_each_artifact_repository, torb_path, kebab_to_snake_case, snake_case_to_kebab}; 15 | 16 | use hcl::{Block, Body, Expression, Object, ObjectKey, RawExpression, Number}; 17 | use serde::{Deserialize, Serialize}; 18 | use serde_yaml::{Mapping, Value}; 19 | use std::collections::{HashMap, HashSet}; 20 | use std::fs; 21 | use std::path::Path; 22 | use thiserror::Error; 23 | use indexmap::{IndexSet, IndexMap}; 24 | 25 | #[derive(Error, Debug)] 26 | pub enum TorbComposerErrors {} 27 | 28 | fn reserved_outputs() -> HashMap<&'static str, &'static str> { 29 | let reserved = vec![("host", "")]; 30 | 31 | let mut reserved_hash = HashMap::new(); 32 | 33 | for (k, v) in reserved { 34 | reserved_hash.insert(k, v); 35 | } 36 | 37 | reserved_hash 38 | } 39 | 40 | #[derive(Debug, Clone, Serialize, Deserialize)] 41 | pub struct InputAddress { 42 | pub locality: String, 43 | pub node_type: String, 44 | pub node_name: String, 45 | pub node_property: String, 46 | pub property_specifier: String, 47 | } 48 | 49 | impl<'a> InputAddress { 50 | fn new( 51 | locality: String, 52 | node_type: String, 53 | node_name: String, 54 | node_property: String, 55 | property_specifier: String, 56 | ) -> InputAddress { 57 | InputAddress { 58 | locality, 59 | node_type, 60 | node_name, 61 | node_property, 62 | property_specifier, 63 | } 64 | } 65 | 66 | fn is_init_address(vals: &Vec<&str>) -> Option { 67 | if vals.len() == 3 && vals[0] == "TORB" { 68 | let locality = vals[0].to_string(); 69 | let node_type = "".to_string(); 70 | let node_name = "".to_string(); 71 | let node_property = vals[1].to_string(); 72 | let property_specifier = vals[2].to_string(); 73 | 74 | return Some(InputAddress::new( 75 | locality, 76 | node_type, 77 | node_name, 78 | node_property, 79 | property_specifier 80 | )) 81 | } 82 | 83 | None 84 | } 85 | 86 | fn is_input_address(vals: &Vec<&str>) -> Option { 87 | if vals.len() == 5 && vals[0] == "self" { 88 | let locality = vals[0].to_string(); 89 | let node_type = vals[1].to_string(); 90 | let node_name = vals[2].to_string(); 91 | let node_property = vals[3].to_string(); 92 | let property_specifier = vals[4].to_string(); 93 | 94 | return Some(InputAddress::new( 95 | locality, 96 | node_type, 97 | node_name, 98 | node_property, 99 | property_specifier, 100 | )) 101 | } 102 | 103 | None 104 | } 105 | 106 | fn supported_localities() -> HashSet<&'a str> { 107 | let set = vec!["self", "TORB"]; 108 | 109 | set.into_iter().collect::>() 110 | } 111 | 112 | } 113 | 114 | impl TryFrom<&str> for InputAddress { 115 | type Error = TorbInput; 116 | 117 | fn try_from(input: &str) -> Result { 118 | let vals = input.split(".").collect::>(); 119 | 120 | if !InputAddress::supported_localities().contains(vals[0]) { 121 | return Err(TorbInput::String(input.to_string())) 122 | } 123 | 124 | let init_addr_opt = InputAddress::is_init_address(&vals); 125 | 126 | if init_addr_opt.is_some() { 127 | return Ok(init_addr_opt.unwrap()) 128 | } 129 | 130 | let input_addr_opt = InputAddress::is_input_address(&vals); 131 | 132 | if input_addr_opt.is_some() { 133 | return Ok(input_addr_opt.unwrap()) 134 | } 135 | 136 | Err(TorbInput::String(input.to_string())) 137 | } 138 | } 139 | 140 | impl TryFrom<&TorbInput> for InputAddress { 141 | type Error = TorbInput; 142 | 143 | fn try_from(input: &TorbInput) -> Result { 144 | if let TorbInput::String(str_input) = input { 145 | let vals = str_input.split(".").collect::>(); 146 | 147 | if !InputAddress::supported_localities().contains(vals[0]) { 148 | return Err(TorbInput::String(str_input.to_string())) 149 | } 150 | 151 | let init_addr_opt = InputAddress::is_init_address(&vals); 152 | 153 | if init_addr_opt.is_some() { 154 | return Ok(init_addr_opt.unwrap()) 155 | } 156 | 157 | let input_addr_opt = InputAddress::is_input_address(&vals); 158 | 159 | if input_addr_opt.is_some() { 160 | return Ok(input_addr_opt.unwrap()) 161 | } 162 | 163 | Err(TorbInput::String(str_input.to_string())) 164 | } else { 165 | Err(input.clone()) 166 | } 167 | } 168 | } 169 | 170 | pub struct Composer<'a> { 171 | hash: String, 172 | build_files_seen: IndexSet, 173 | fqn_seen: IndexSet, 174 | release_name: String, 175 | main_struct: hcl::BodyBuilder, 176 | artifact_repr: &'a ArtifactRepr, 177 | watcher_patch: bool, 178 | dev_mounts: IndexMap> 179 | } 180 | 181 | impl<'a> Composer<'a> { 182 | pub fn new(hash: String, artifact_repr: &ArtifactRepr, watcher_patch: bool) -> Composer { 183 | Composer { 184 | hash: hash, 185 | build_files_seen: IndexSet::new(), 186 | fqn_seen: IndexSet::new(), 187 | release_name: artifact_repr.release(), 188 | main_struct: Body::builder(), 189 | artifact_repr: artifact_repr, 190 | watcher_patch: watcher_patch, 191 | dev_mounts: IndexMap::new() 192 | } 193 | } 194 | 195 | pub fn new_with_dev_mounts(hash: String, artifact_repr: &ArtifactRepr, watcher_patch: bool, dev_mounts: IndexMap>) -> Composer { 196 | Composer { 197 | hash: hash, 198 | build_files_seen: IndexSet::new(), 199 | fqn_seen: IndexSet::new(), 200 | release_name: artifact_repr.release(), 201 | main_struct: Body::builder(), 202 | artifact_repr: artifact_repr, 203 | watcher_patch: watcher_patch, 204 | dev_mounts: dev_mounts 205 | } 206 | } 207 | 208 | fn get_node_for_output_value(&self, torb_input_address: &InputAddress) -> &ArtifactNodeRepr { 209 | let stack_name = &self.artifact_repr.stack_name; 210 | let output_node_fqn = format!( 211 | "{}.{}.{}", 212 | stack_name, &torb_input_address.node_type, &torb_input_address.node_name 213 | ); 214 | 215 | self.artifact_repr 216 | .nodes 217 | .get(&output_node_fqn) 218 | .expect("Unable to map input address to node, make sure your mapping is correct.") 219 | } 220 | 221 | fn interpolate_inputs_into_helm_values( 222 | &self, 223 | torb_input_address: Result, 224 | ) -> String { 225 | let output_value = self.input_values_from_input_address(torb_input_address.clone()); 226 | let string_value = hcl::format::to_string(&output_value).unwrap(); 227 | match torb_input_address { 228 | Ok(input_address) => { 229 | 230 | if reserved_outputs().contains_key(input_address.property_specifier.as_str()) { 231 | string_value.replace("\"", "") 232 | } else { 233 | format!("${{{}}}", string_value.replace("\"", "")) 234 | } 235 | } 236 | Err(_s) => string_value, 237 | } 238 | } 239 | 240 | fn k8s_value_from_reserved_input(&self, torb_input_address: InputAddress) -> Expression { 241 | let output_node = self.get_node_for_output_value(&torb_input_address); 242 | 243 | match torb_input_address.property_specifier.as_str() { 244 | "host" => { 245 | let name = format!("{}-{}", self.release_name, output_node.display_name(true)); 246 | 247 | let namespace = self.artifact_repr.namespace(output_node); 248 | 249 | Expression::String(format!("{}.{}.svc.cluster.local", name, namespace)) 250 | } 251 | _ => { 252 | panic!("Unable to map reserved value.") 253 | } 254 | } 255 | } 256 | 257 | fn k8s_status_values_path_from_torb_input(&self, torb_input_address: InputAddress) -> String { 258 | let output_node = self.get_node_for_output_value(&torb_input_address); 259 | 260 | let kube_value = if torb_input_address.node_property == "output" || torb_input_address.node_property == "inputs" { 261 | let (kube_val, _) = output_node 262 | .mapped_inputs 263 | .get(&torb_input_address.property_specifier) 264 | .expect("Unable to map input from output node. Key does not exist."); 265 | 266 | kube_val 267 | } else { 268 | panic!("Unable to map node property to output attribute please check your inputs, ex: 'a.b.output.c or a.b.input.c"); 269 | }; 270 | 271 | let formatted_name = kebab_to_snake_case(&self.release_name); 272 | let block_name = format!("{}_{}", formatted_name, &output_node.display_name(false)); 273 | 274 | format!( 275 | "jsondecode(data.torb_helm_release.{}.values)[\"{}\"]", 276 | block_name, kube_value 277 | ) 278 | } 279 | 280 | fn iac_environment_path(&self) -> std::path::PathBuf { 281 | let buildstate_path = buildstate_path_or_create(); 282 | if self.watcher_patch { 283 | buildstate_path.join("watcher_iac_environment") 284 | } else { 285 | buildstate_path.join("iac_environment") 286 | } 287 | } 288 | 289 | pub fn compose(&mut self) -> Result<(), Box> { 290 | println!("Composing build environment..."); 291 | let environment_path = self.iac_environment_path(); 292 | 293 | if !environment_path.exists() { 294 | std::fs::create_dir(environment_path)?; 295 | } 296 | 297 | self.add_required_providers_to_main_struct(); 298 | 299 | for node in self.artifact_repr.deploys.iter() { 300 | self.walk_artifact(node)?; 301 | } 302 | 303 | self.copy_supporting_build_files() 304 | .expect("Failed to write supporting buildfiles to new environment."); 305 | 306 | self.write_main_buildfile() 307 | .expect("Failed to write main buildfile to new environment."); 308 | 309 | Ok(()) 310 | } 311 | 312 | fn copy_supporting_build_files(&self) -> Result<(), Box> { 313 | for_each_artifact_repository(Box::new(|repos_path, repo| { 314 | let repo_path = repos_path.join(repo.file_name()); 315 | let source_path = repo_path.join("common"); 316 | let new_environment_path = self.iac_environment_path(); 317 | 318 | let repo_name = repo.file_name().into_string().unwrap(); 319 | let namespace_dir = kebab_to_snake_case(&repo_name); 320 | let dest = new_environment_path 321 | .join(namespace_dir) 322 | .join(source_path.as_path().file_name().unwrap()); 323 | 324 | if !dest.exists() { 325 | fs::create_dir_all(dest.clone()).expect("Unable to create supporting buildfile directory at destination, please check torb has been initialized properly."); 326 | } 327 | 328 | self._copy_files_recursively(source_path, dest); 329 | 330 | let provider_path = repo_path.join("common/providers"); 331 | let dest = new_environment_path.clone(); 332 | 333 | self._copy_files_recursively(provider_path, dest); 334 | }))?; 335 | 336 | Ok(()) 337 | } 338 | 339 | fn _copy_files_recursively(&self, path: std::path::PathBuf, dest: std::path::PathBuf) -> () { 340 | let error_string = format!("Failed reading dir: {}. Please check that torb is correctly initialized and that any additional artifact repos have been pulled with `torb artifacts refresh`.", path.to_str().unwrap()); 341 | for entry in path.read_dir().expect(&error_string) { 342 | let error_string = format!("Failed reading entry in dir: {}. Please check that torb is correctly initialized and that any additional artifacts repos have been pulled with `torb artifacts refresh`.", path.to_str().unwrap()); 343 | let entry = entry.expect(&error_string); 344 | if entry.path().is_dir() { 345 | let new_dest = dest.join(entry.path().file_name().unwrap()); 346 | if !new_dest.exists() { 347 | fs::create_dir(new_dest.clone()).expect("Unable to create supporting buildfile directory at destination, please check torb has been initialized properly."); 348 | } 349 | 350 | self._copy_files_recursively(entry.path(), new_dest.clone()) 351 | } else { 352 | let path = entry.path(); 353 | let new_path = dest.join(path.file_name().unwrap()); 354 | 355 | fs::copy(path, new_path).expect("Failed to copy supporting build file."); 356 | } 357 | } 358 | } 359 | 360 | fn write_main_buildfile(&mut self) -> Result> { 361 | let builder = std::mem::take(&mut self.main_struct); 362 | let environment_path = self.iac_environment_path(); 363 | 364 | let main_tf_path = environment_path.join("main.tf"); 365 | 366 | let built_content = builder.build(); 367 | 368 | let main_tf_content_hcl_string = hcl::to_string(&built_content)?; 369 | 370 | if std::env::var("TORB_DEBUG").is_ok() { 371 | println!("{}", main_tf_content_hcl_string); 372 | } 373 | 374 | fs::write(&main_tf_path, main_tf_content_hcl_string).expect("Failed to write main.tf"); 375 | 376 | Ok(main_tf_path) 377 | } 378 | 379 | fn walk_artifact(&mut self, node: &ArtifactNodeRepr) -> Result<(), Box> { 380 | // We want to walk to the end of the dependencies before we build. 381 | // This is because duplicate dependencies can exist, and we want to avoid building the same thing twice. 382 | // By walking to the end we ensure that whichever copy is built first will be in the set of seen nodes. 383 | // This let me avoid worrying about how to handle duplicate dependencies in the dependency tree data structure. 384 | // -Ian 385 | for child in node.dependencies.iter() { 386 | self.walk_artifact(child)? 387 | } 388 | 389 | if !self.build_files_seen.contains(&node.display_name(false)) { 390 | self.copy_build_files_for_node(&node).and_then(|_out| { 391 | if self.build_files_seen.insert(node.display_name(false).clone()) { 392 | Ok(()) 393 | } else { 394 | Err(Box::new(std::io::Error::new( 395 | std::io::ErrorKind::Other, 396 | "Node build files already seen.", 397 | ))) 398 | } 399 | })?; 400 | } 401 | 402 | if !self.fqn_seen.contains(&node.fqn) { 403 | self.add_stack_node_to_main_struct(node).and_then(|_out| { 404 | if self.fqn_seen.insert(node.fqn.clone()) { 405 | Ok(()) 406 | } else { 407 | Err(Box::new(std::io::Error::new( 408 | std::io::ErrorKind::Other, 409 | "Node already seen.", 410 | ))) 411 | } 412 | })?; 413 | } 414 | 415 | Ok(()) 416 | } 417 | 418 | fn create_output_data_block( 419 | &mut self, 420 | node: &ArtifactNodeRepr, 421 | ) -> Result> { 422 | let snake_case_release_name = self.release_name.clone().replace("-", "_"); 423 | let namespace = self.artifact_repr.namespace(node); 424 | 425 | let name = node.fqn.clone().replace(".", "_"); 426 | 427 | let data_block = Block::builder("data") 428 | .add_label("torb_helm_release") 429 | .add_label(format!("{}_{}", &snake_case_release_name, &node.display_name(false))) 430 | .add_attribute(( 431 | "release_name", 432 | format!("{}-{}", self.release_name.clone(), snake_case_to_kebab(&node.display_name(false))), 433 | )) 434 | .add_attribute(("namespace", namespace)) 435 | .add_attribute(( 436 | "depends_on", 437 | Expression::from(vec![RawExpression::from(format!("module.{}", name))]), 438 | )) 439 | .build(); 440 | 441 | Ok(data_block) 442 | } 443 | 444 | fn copy_build_files_for_node( 445 | &mut self, 446 | node: &ArtifactNodeRepr, 447 | ) -> Result> { 448 | let environment_path = self.iac_environment_path(); 449 | let node_source = node.source.clone().unwrap(); 450 | let namespace_dir = kebab_to_snake_case(&node_source); 451 | let repo_path = environment_path.join(namespace_dir); 452 | 453 | if !repo_path.exists() { 454 | let error = format!( 455 | "Failed to create new repository namespace directory in environment for revision {}.", 456 | &self.hash 457 | ); 458 | fs::create_dir(&repo_path).expect(&error); 459 | } 460 | 461 | let env_node_path = repo_path.join(format!("{}_module", &node.display_name(false))); 462 | 463 | if !env_node_path.exists() { 464 | let error = format!( 465 | "Failed to create new module directory in environment for revision {}.", 466 | &self.hash 467 | ); 468 | fs::create_dir(&env_node_path).expect(&error); 469 | } 470 | 471 | let tf_path = Path::new(&node.file_path) 472 | .parent() 473 | .unwrap() 474 | .join("terraform/"); 475 | 476 | if tf_path.exists() && tf_path.is_dir() { 477 | for f in fs::read_dir(tf_path)? { 478 | let f = f?; 479 | let path = f.path(); 480 | let file_name = path.file_name().unwrap().to_str().unwrap(); 481 | let new_path = env_node_path.join(file_name); 482 | fs::copy(path, new_path)?; 483 | } 484 | } 485 | 486 | Ok(true) 487 | } 488 | 489 | fn create_input_values(&self, node: &ArtifactNodeRepr) -> Vec> { 490 | let mut input_vals = Vec::>::new(); 491 | 492 | let resolver_fn = |spec: &String, input_address_result| { 493 | let mut input: Object = Object::new(); 494 | 495 | input.insert( 496 | ObjectKey::Expression(Expression::String("name".to_string())), 497 | Expression::String(spec.clone()), 498 | ); 499 | 500 | let mapped_expression = self.input_values_from_input_address(input_address_result); 501 | 502 | input.insert( 503 | ObjectKey::Expression(Expression::String("value".to_string())), 504 | mapped_expression.clone(), 505 | ); 506 | 507 | if spec != "" { 508 | input_vals.push(input); 509 | } 510 | 511 | 512 | mapped_expression.clone().to_string() 513 | }; 514 | 515 | let (_, _, _) = InputResolver::resolve(node, NO_VALUES_FN, Some(resolver_fn), NO_INITS_FN) 516 | .expect("Unable to resolve listed inputs."); 517 | 518 | input_vals 519 | } 520 | 521 | fn input_values_from_input_address( 522 | &self, 523 | input_address: Result, 524 | ) -> Expression { 525 | match input_address { 526 | Ok(input_address) => { 527 | if reserved_outputs().contains_key(input_address.property_specifier.as_str()) { 528 | let val = self.k8s_value_from_reserved_input(input_address); 529 | val.clone() 530 | } else { 531 | let val = self.k8s_status_values_path_from_torb_input(input_address); 532 | 533 | Expression::Raw(RawExpression::new(val.clone())) 534 | } 535 | } 536 | Err(input_result) => { 537 | match input_result { 538 | TorbInput::String(val) => Expression::String(val), 539 | TorbInput::Bool(val) => Expression::String(val.to_string()), 540 | TorbInput::Numeric(val) => { 541 | match val { 542 | TorbNumeric::Float(val) => Expression::String(Number::from_f64(val).unwrap().to_string()), 543 | TorbNumeric::Int(val) => Expression::String(Number::from(val).to_string()), 544 | TorbNumeric::NegInt(val) => Expression::String(Number::from(val).to_string()) 545 | } 546 | } 547 | TorbInput::Array(val) => { 548 | Expression::String(self.torb_array_to_hcl_helm_array(val)) 549 | } 550 | } 551 | 552 | } 553 | } 554 | } 555 | 556 | fn torb_array_to_hcl_helm_array(&self, arr: Vec) -> String { 557 | let mut new = Vec::::new(); 558 | for input in arr.iter().cloned() { 559 | let expr = match input { 560 | TorbInput::String(val) => Expression::String(val).to_string(), 561 | TorbInput::Bool(val) => Expression::Bool(val).to_string(), 562 | TorbInput::Numeric(val) => { 563 | match val { 564 | TorbNumeric::Float(val) => Expression::Number(Number::from_f64(val).unwrap()).to_string(), 565 | TorbNumeric::Int(val) => Expression::Number(Number::from(val)).to_string(), 566 | TorbNumeric::NegInt(val) => Expression::Number(Number::from(val)).to_string() 567 | } 568 | } 569 | TorbInput::Array(_val) => { 570 | panic!("Nested array types are not supported.") 571 | } 572 | }; 573 | 574 | new.push(expr) 575 | } 576 | 577 | "{".to_owned() + &new.join(",") + "}" 578 | } 579 | 580 | fn add_required_providers_to_main_struct(&mut self) { 581 | let required_providers = Block::builder("terraform") 582 | .add_block( 583 | Block::builder("required_providers") 584 | .add_attribute(( 585 | "torb", 586 | Expression::from_iter(vec![ 587 | ("source", "TorbFoundry/torb"), 588 | ("version", "0.1.2"), 589 | ]), 590 | )) 591 | .build(), 592 | ) 593 | .build(); 594 | 595 | let torb_provider = Block::builder("provider").add_label("torb").build(); 596 | 597 | let mut builder = std::mem::take(&mut self.main_struct); 598 | 599 | builder = builder.add_block(required_providers); 600 | builder = builder.add_block(torb_provider); 601 | 602 | self.main_struct = builder; 603 | } 604 | 605 | fn add_stack_node_to_main_struct( 606 | &mut self, 607 | node: &ArtifactNodeRepr, 608 | ) -> Result<(), Box> { 609 | let node_source = node.source.clone().unwrap(); 610 | let namespace_dir = kebab_to_snake_case(&node_source); 611 | 612 | let source = format!("./{namespace_dir}/{}_module", node.display_name(false)); 613 | let name = node.fqn.clone().replace(".", "_"); 614 | 615 | let namespace = self.artifact_repr.namespace(node); 616 | 617 | let mut values = vec![]; 618 | let mut attributes = vec![ 619 | ("source", source), 620 | ( 621 | "release_name", 622 | format!("{}-{}", self.release_name.clone(), snake_case_to_kebab(&node.display_name(false))), 623 | ), 624 | ("namespace", namespace), 625 | ]; 626 | 627 | if node.build_step.is_some() { 628 | let build_step = node.build_step.clone().unwrap(); 629 | let mut map: HashMap> = HashMap::new(); 630 | let mut image_key_map: HashMap = HashMap::new(); 631 | 632 | if build_step.tag != "" { 633 | image_key_map.insert("tag".to_string(), build_step.tag); 634 | } else { 635 | image_key_map.insert("tag".to_string(), "latest".to_string()); 636 | } 637 | 638 | if build_step.registry != "local" { 639 | let registry = format!("{}/{}", build_step.registry, node.display_name(false)); 640 | image_key_map.insert("repository".to_string(), registry); 641 | } else { 642 | image_key_map.insert("repository".to_string(), node.display_name(false).clone()); 643 | } 644 | 645 | map.insert("image".to_string(), image_key_map); 646 | 647 | values.push(serde_yaml::to_string(&map)?) 648 | } 649 | 650 | if node.deploy_steps["helm"].clone().unwrap()["repository"].clone() != "" { 651 | attributes.push(( 652 | "repository", 653 | node.deploy_steps["helm"].clone().unwrap()["repository"].clone(), 654 | )); 655 | attributes.push(( 656 | "chart_name", 657 | node.deploy_steps["helm"].clone().unwrap()["chart"].clone(), 658 | )); 659 | } else { 660 | // If repository is not specified, we assume that the chart is local. 661 | let local_path = 662 | torb_path().join(node.deploy_steps["helm"].clone().unwrap()["chart"].clone()); 663 | attributes.push(("chart_name", local_path.to_str().unwrap().to_string())); 664 | } 665 | 666 | let mut depends_on_exprs = vec![]; 667 | 668 | for dep in node.dependencies.iter() { 669 | let dep_fqn = &dep.fqn; 670 | 671 | if node.implicit_dependency_fqns.get(dep_fqn).is_none() { 672 | let dep_fqn_name = dep_fqn.clone().replace(".", "_"); 673 | depends_on_exprs.push(RawExpression::from(format!("module.{dep_fqn_name}"))) 674 | } 675 | } 676 | 677 | let module_version = node.deploy_steps["helm"] 678 | .clone() 679 | .unwrap() 680 | .get("version") 681 | .unwrap_or(&"".to_string()) 682 | .clone(); 683 | 684 | if module_version != "" { 685 | attributes.push(("version", module_version)); 686 | } 687 | 688 | let output_block = self.create_output_data_block(node)?; 689 | 690 | let inputs = self.create_input_values(node); 691 | 692 | let resolver_fn = &mut |address: Result| -> String { 693 | self.interpolate_inputs_into_helm_values(address) 694 | }; 695 | 696 | let (mapped_values, _, _) = InputResolver::resolve(node, Some(resolver_fn), NO_INPUTS_FN, NO_INITS_FN)?; 697 | 698 | 699 | if mapped_values.clone().unwrap() != "---\n~\n" { 700 | values.push(mapped_values.expect("Unable to resolve values field.")); 701 | } 702 | 703 | if self.watcher_patch { 704 | let mut image_pull_policy_map = Mapping::new(); 705 | let mut nested_image_pull_policy_map = Mapping::new(); 706 | nested_image_pull_policy_map.insert(Value::String("pullPolicy".into()), Value::String("Always".into())); 707 | image_pull_policy_map.insert(Value::String("image".into()), Value::Mapping(nested_image_pull_policy_map)); 708 | 709 | let patch_value = Value::Mapping(image_pull_policy_map); 710 | let patch_yaml = serde_yaml::to_string(&patch_value)?; 711 | 712 | values.push(patch_yaml); 713 | } 714 | 715 | let mut builder = std::mem::take(&mut self.main_struct); 716 | 717 | let mut block = Block::builder("module") 718 | .add_label(&name) 719 | .add_attributes(attributes) 720 | .add_attribute(("inputs", inputs)); 721 | 722 | if !values.is_empty() { 723 | block = block.add_attribute(("values", values)); 724 | } 725 | 726 | let postrender_conf_opt = self.dev_mounts.get(&node.fqn); 727 | if postrender_conf_opt.is_some() { 728 | let postrender_conf = postrender_conf_opt.unwrap(); 729 | 730 | block = block.add_attribute( 731 | ("postrender_path", "./torb_artifacts/common/dev/volume_and_mount/kustomize.sh".to_string()) 732 | ); 733 | 734 | block = block.add_attribute(( 735 | "postrender_args", 736 | Expression::Array(vec![ 737 | Expression::String(node.display_name(false)), 738 | Expression::String(postrender_conf.get("container_mount").unwrap().to_string()), 739 | Expression::String(postrender_conf.get("local_mount").unwrap().to_string()) 740 | ]) 741 | )) 742 | 743 | } 744 | 745 | 746 | if !depends_on_exprs.is_empty() { 747 | let depends_on = Expression::from(depends_on_exprs); 748 | 749 | block = block.add_attribute(("depends_on", depends_on)); 750 | } 751 | 752 | builder = builder.add_block( 753 | block.build() 754 | ); 755 | 756 | builder = builder.add_block(output_block); 757 | 758 | self.main_struct = builder; 759 | 760 | Ok(()) 761 | } 762 | } 763 | -------------------------------------------------------------------------------- /cli/src/config.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use serde::{Serialize, Deserialize}; 13 | use serde_yaml::{self}; 14 | use once_cell::sync::Lazy; 15 | use std::fs; 16 | use indexmap::IndexMap; 17 | 18 | use crate::utils::{torb_path}; 19 | 20 | #[derive(Serialize, Deserialize)] 21 | #[allow(non_snake_case)] 22 | pub struct Config { 23 | pub githubToken: String, 24 | pub githubUser: String, 25 | pub repositories: Option> 26 | } 27 | 28 | impl Config { 29 | fn new() -> Config { 30 | let torb_path = torb_path(); 31 | let config_path = torb_path.join("config.yaml"); 32 | 33 | let conf_str = fs::read_to_string(config_path).expect("Failed to read config.yaml"); 34 | 35 | serde_yaml::from_str(conf_str.as_str()).expect("Failed to parse config.yaml") 36 | } 37 | } 38 | 39 | pub static TORB_CONFIG: Lazy = Lazy::new(Config::new); -------------------------------------------------------------------------------- /cli/src/deployer.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::{artifacts::{ArtifactRepr}, utils::{CommandConfig, CommandPipeline}}; 13 | use std::process::Command; 14 | use crate::utils::{torb_path, buildstate_path_or_create}; 15 | use thiserror::Error; 16 | 17 | #[derive(Error, Debug)] 18 | pub enum TorbDeployErrors { 19 | #[error("Failed to deploy stack with reason: {reason}")] 20 | FailedDeployment { 21 | reason: String 22 | } 23 | } 24 | 25 | pub struct StackDeployer { 26 | watcher_patch: bool 27 | } 28 | 29 | impl StackDeployer { 30 | pub fn new(watcher_patch: bool) -> StackDeployer { 31 | StackDeployer { 32 | watcher_patch 33 | } 34 | } 35 | 36 | pub fn deploy( 37 | &mut self, 38 | artifact: &ArtifactRepr, 39 | dryrun: bool, 40 | ) -> Result<(), Box> { 41 | println!("Deploying {} stack...", artifact.stack_name.as_str()); 42 | 43 | self.init_tf()?; 44 | 45 | self.deploy_tf(dryrun)?; 46 | 47 | Ok(()) 48 | } 49 | 50 | fn init_tf(&self) -> Result> { 51 | println!("Initalizing terraform..."); 52 | let torb_path = torb_path(); 53 | let iac_env_path = self.iac_environment_path(); 54 | let mut cmd = Command::new("./terraform"); 55 | cmd.arg(format!("-chdir={}", iac_env_path.to_str().unwrap())); 56 | cmd.arg("init"); 57 | cmd.arg("-upgrade"); 58 | cmd.current_dir(torb_path); 59 | 60 | println!("Running command: {:?}", cmd); 61 | Ok(cmd.output()?) 62 | } 63 | 64 | fn iac_environment_path(&self) -> std::path::PathBuf { 65 | let buildstate_path = buildstate_path_or_create(); 66 | if self.watcher_patch { 67 | buildstate_path.join("watcher_iac_environment") 68 | } else { 69 | buildstate_path.join("iac_environment") 70 | } 71 | } 72 | 73 | fn deploy_tf( 74 | &self, 75 | dryrun: bool, 76 | ) -> Result> { 77 | let torb_path = torb_path(); 78 | let iac_env_path = self.iac_environment_path(); 79 | 80 | if self.watcher_patch { 81 | let buildstate_path = buildstate_path_or_create(); 82 | let non_watcher_iac = buildstate_path.join("iac_environment"); 83 | let tf_state_path = non_watcher_iac.join("terraform.tfstate"); 84 | 85 | if tf_state_path.exists() { 86 | let new_path = iac_env_path.join("terraform.tfstate"); 87 | std::fs::copy(tf_state_path, new_path).expect("Failed to copy supporting build file."); 88 | }; 89 | }; 90 | 91 | let iac_env_str = iac_env_path.to_str().unwrap(); 92 | let chdir_arg = format!("-chdir={}", iac_env_str); 93 | let cmd_conf = CommandConfig::new( 94 | "./terraform", 95 | vec![ 96 | chdir_arg.as_str(), 97 | "plan", 98 | "-out=./tfplan" 99 | ], 100 | torb_path.to_str() 101 | ); 102 | 103 | let out = CommandPipeline::execute_single(cmd_conf)?; 104 | 105 | if dryrun { 106 | Ok(out) 107 | } else { 108 | let mut cmd = Command::new("./terraform"); 109 | cmd.arg(format!("-chdir={}", iac_env_path.to_str().unwrap())) 110 | .arg("apply") 111 | .arg("./tfplan") 112 | .current_dir(&torb_path); 113 | 114 | let output = cmd.output()?; 115 | 116 | if output.status.success() { 117 | Ok(cmd.output()?) 118 | } else { 119 | Err(Box::new(TorbDeployErrors::FailedDeployment { reason: String::from_utf8(output.stderr).unwrap() })) 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /cli/src/initializer.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::{artifacts::{ArtifactRepr, ArtifactNodeRepr}, resolver::inputs::{InputResolver, NO_INPUTS_FN, NO_VALUES_FN}}; 13 | use std::{env::current_dir}; 14 | use crate::utils::{run_command_in_user_shell, buildstate_path_or_create}; 15 | use indexmap::IndexSet; 16 | 17 | pub struct StackInitializer<'a> { 18 | artifact: &'a ArtifactRepr, 19 | initialized: IndexSet, 20 | } 21 | 22 | impl<'a> StackInitializer<'a> { 23 | pub fn new(artifact: &'a ArtifactRepr) -> StackInitializer { 24 | StackInitializer { 25 | artifact: artifact, 26 | initialized: IndexSet::new(), 27 | } 28 | } 29 | 30 | pub fn run_node_init_steps(&mut self) -> Result<(), Box> { 31 | let buildstate_path = buildstate_path_or_create(); 32 | let init_canary_path = buildstate_path.join(".stack_initialized"); 33 | 34 | if !init_canary_path.exists() { 35 | for node in self.artifact.deploys.iter() { 36 | self.walk_artifact(node)?; 37 | } 38 | 39 | std::fs::write(init_canary_path, "")?; 40 | } else { 41 | println!("Stack has already been initialized, skipping.") 42 | } 43 | 44 | Ok(()) 45 | } 46 | 47 | fn copy_required_files(&self, node: &ArtifactNodeRepr) -> Result<(), Box> { 48 | let node_file_path = std::path::Path::new(&node.file_path); 49 | let node_dir = node_file_path.parent().unwrap(); 50 | 51 | let files = node.files.clone().unwrap_or_default(); 52 | 53 | for file in files { 54 | let file_path = node_dir.join(file); 55 | 56 | if current_dir()?.join(file_path.clone()).exists() { 57 | let file_name = file_path.file_name().unwrap(); 58 | let dest_path = current_dir()?.join(file_name); 59 | 60 | std::fs::copy(file_path, dest_path)?; 61 | } 62 | } 63 | 64 | Ok(()) 65 | } 66 | 67 | fn initalize_node(&self, node: &ArtifactNodeRepr) -> Result<(), Box> { 68 | self.copy_required_files(node)?; 69 | 70 | if node.init_step.is_some() { 71 | let (_, _, resolved_steps) = InputResolver::resolve(node, NO_VALUES_FN, NO_INPUTS_FN, Some(true))?; 72 | 73 | let script = resolved_steps.unwrap().join("&&"); 74 | 75 | run_command_in_user_shell(script, Some("/bin/bash".to_string()))?; 76 | }; 77 | 78 | Ok(()) 79 | } 80 | 81 | fn walk_artifact( 82 | &mut self, 83 | node: &ArtifactNodeRepr, 84 | ) -> Result<(), Box> { 85 | // We want to walk to the end of the dependencies before we build. 86 | // This is because duplicate dependencies can exist, and we want to avoid building the same thing twice. 87 | // By walking to the end we ensure that whichever copy is built first will be in the set of seen nodes. 88 | // This let me avoid worrying about how to handle duplicate dependencies in the dependency tree data structure. 89 | // -Ian 90 | for child in node.dependencies.iter() { 91 | self.walk_artifact(child)? 92 | } 93 | 94 | if !self.initialized.contains(&node.fqn) { 95 | self.initalize_node(&node).and_then(|_out| { 96 | if self.initialized.insert(node.fqn.clone()) { 97 | Ok(()) 98 | } else { 99 | Err(Box::new(std::io::Error::new( 100 | std::io::ErrorKind::Other, 101 | "Step already initialized.", 102 | ))) 103 | } 104 | })?; 105 | } 106 | 107 | Ok(()) 108 | } 109 | } -------------------------------------------------------------------------------- /cli/src/main.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | mod artifacts; 13 | mod builder; 14 | mod cli; 15 | mod composer; 16 | mod config; 17 | mod deployer; 18 | mod initializer; 19 | mod resolver; 20 | mod utils; 21 | mod vcs; 22 | mod watcher; 23 | mod animation; 24 | 25 | use indexmap::IndexMap; 26 | use rayon::prelude::*; 27 | use std::fs; 28 | use std::fs::File; 29 | use std::io::{self}; 30 | use std::process::Command; 31 | use thiserror::Error; 32 | use ureq; 33 | use utils::{buildstate_path_or_create, torb_path, PrettyExit}; 34 | use animation::{BuilderAnimation, Animation}; 35 | 36 | use crate::artifacts::{ 37 | deserialize_stack_yaml_into_artifact, get_build_file_info, load_build_file, write_build_file, 38 | ArtifactRepr, 39 | }; 40 | use crate::builder::StackBuilder; 41 | use crate::cli::cli; 42 | use crate::composer::Composer; 43 | use crate::config::TORB_CONFIG; 44 | use crate::deployer::StackDeployer; 45 | use crate::initializer::StackInitializer; 46 | use crate::utils::{CommandConfig, CommandPipeline, PrettyContext}; 47 | use crate::vcs::{GitVersionControl, GithubVCS}; 48 | use crate::watcher::Watcher; 49 | 50 | const VERSION: &'static str = env!("CARGO_PKG_VERSION"); 51 | 52 | #[derive(Error, Debug)] 53 | pub enum TorbCliErrors { 54 | #[error("Stack manifest missing or invalid. Please run `torb init`")] 55 | ManifestInvalid, 56 | #[error("Stack meta template missing or invalid. Please run `torb init`")] 57 | StackMetaNotFound, 58 | #[error("The stack name was found in multiple repository manifests please prefix the stack name with the repository you wish to use. i.e. torb-artifacts:flask-app-with-react-frontend")] 59 | StackAmbiguous, 60 | } 61 | 62 | fn init() { 63 | println!("Initializing..."); 64 | let torb_path_buf = torb_path(); 65 | let torb_path = torb_path_buf.as_path(); 66 | let artifacts_path = &torb_path.join("repositories"); 67 | if !torb_path.is_dir() { 68 | println!("Creating {}...", torb_path.display()); 69 | 70 | fs::create_dir(&torb_path).unwrap(); 71 | } 72 | 73 | if !artifacts_path.is_dir() { 74 | println!("Cloning build artifacts..."); 75 | fs::create_dir(artifacts_path).unwrap(); 76 | let _clone_cmd_out = Command::new("git") 77 | .arg("clone") 78 | .arg("git@github.com:TorbFoundry/torb-artifacts.git") 79 | .current_dir(&artifacts_path) 80 | .output() 81 | .expect("Failed to clone torb-artifacts"); 82 | }; 83 | 84 | let torb_config_path = torb_path.join("config.yaml"); 85 | let torb_config_template = torb_path.join("repositories/torb-artifacts/config.template.yaml"); 86 | 87 | if !torb_config_path.exists() { 88 | let err_msg = format!("Unable to copy config template file from {}. Please check that Torb has been initialized properly.", torb_config_template.to_str().unwrap()); 89 | fs::copy(torb_config_template, torb_config_path).expect(&err_msg); 90 | } 91 | 92 | let tf_path = torb_path.join("terraform.zip"); 93 | let tf_bin_path = torb_path.join("terraform"); 94 | if !tf_bin_path.is_file() { 95 | println!("Downloading terraform..."); 96 | let tf_url = match std::env::consts::OS { 97 | "linux" => { 98 | "https://releases.hashicorp.com/terraform/1.2.5/terraform_1.2.5_linux_amd64.zip" 99 | } 100 | "macos" => { 101 | "https://releases.hashicorp.com/terraform/1.2.5/terraform_1.2.5_darwin_amd64.zip" 102 | } 103 | _ => panic!("Unsupported OS"), 104 | }; 105 | let resp = ureq::get(tf_url).call().unwrap(); 106 | 107 | let mut out = File::create(&tf_path).unwrap(); 108 | io::copy(&mut resp.into_reader(), &mut out).expect("Failed to write terraform zip file."); 109 | 110 | let mut unzip_cmd = Command::new("unzip"); 111 | 112 | unzip_cmd.arg(&tf_path).current_dir(&torb_path); 113 | 114 | let _unzip_cmd_out = unzip_cmd.output().expect("Failed to unzip terraform."); 115 | } 116 | 117 | let buildx_cmd_conf = CommandConfig::new( 118 | "docker", 119 | vec![ 120 | "buildx", 121 | "create", 122 | "--name", 123 | "torb_builder", 124 | "--driver-opt", 125 | "network=host", 126 | ], 127 | None, 128 | ); 129 | 130 | let res = CommandPipeline::execute_single(buildx_cmd_conf); 131 | 132 | match res { 133 | Ok(_) => println!("Created docker build kit builder, torb_builder."), 134 | Err(err) => panic!("{}", err), 135 | } 136 | 137 | println!("Finished!") 138 | } 139 | 140 | fn create_repo(path: String, local_only: bool) { 141 | if !std::path::Path::new(&path).exists() { 142 | let mut vcs = GithubVCS::new( 143 | TORB_CONFIG.githubToken.clone(), 144 | TORB_CONFIG.githubUser.clone(), 145 | ); 146 | 147 | let mut buf = std::path::PathBuf::new(); 148 | buf.push(path); 149 | 150 | vcs.set_cwd(buf); 151 | 152 | vcs.create_repo(local_only).expect("Failed to create repo."); 153 | } else { 154 | println!("Repo already exists locally. Skipping creation."); 155 | } 156 | } 157 | 158 | fn checkout_stack(name: Option<&str>) { 159 | match name { 160 | Some(name) => { 161 | let stack_yaml: String = 162 | pull_stack(name, false).expect("Failed to pull stack from any repository. Check that the source is configured correctly and that the stack exists."); 163 | 164 | fs::write("./stack.yaml", stack_yaml).expect("Failed to write stack.yaml."); 165 | } 166 | None => { 167 | fs::write("./stack.yaml", "").expect("Failed to write stack.yaml"); 168 | } 169 | } 170 | } 171 | 172 | fn new_stack() { 173 | let torb_path = torb_path(); 174 | let repositories_path = torb_path.join("repositories"); 175 | let torb_artifacts = repositories_path.join("torb-artifacts"); 176 | let template_path = torb_artifacts.join("stack.template.yaml"); 177 | 178 | let dest = std::env::current_dir().unwrap().join("stack.template.yaml"); 179 | 180 | let source_string = template_path.to_str().unwrap(); 181 | let err_msg = format!("Unable to copy config template file from {source_string}. Please check that Torb has been initialized properly."); 182 | 183 | fs::copy(template_path, dest).expect(&err_msg); 184 | } 185 | 186 | fn init_stack(file_path: String) { 187 | println!("Attempting to read or create buildstate folder..."); 188 | buildstate_path_or_create(); 189 | 190 | println!("Attempting to read stack file..."); 191 | let stack_yaml = fs::read_to_string(&file_path).expect("Failed to read stack.yaml."); 192 | 193 | println!("Reading stack into internal representation..."); 194 | let artifact = deserialize_stack_yaml_into_artifact(&stack_yaml) 195 | .expect("Failed to read stack into internal representation."); 196 | 197 | let mut stack_initializer = StackInitializer::new(&artifact); 198 | 199 | stack_initializer 200 | .run_node_init_steps().use_or_pretty_exit( 201 | PrettyContext::default() 202 | .error("Oh no, we failed to initialize the stack!") 203 | .context("Failures here are typically because of missing dependencies for parts of the stack you're looking to initialize.") 204 | .suggestions(vec![ 205 | "Check that all dependencies are installed.", 206 | "Check to make sure you're on a compatible operating system." 207 | ]) 208 | .success("Success! Stack initialized!") 209 | .pretty() 210 | ) 211 | } 212 | 213 | fn compose_build_environment(build_hash: String, build_artifact: &ArtifactRepr) { 214 | let mut composer = Composer::new(build_hash, build_artifact, false); 215 | composer.compose().use_or_pretty_exit( 216 | PrettyContext::default() 217 | .error("Oh no, we failed to generate the IaC build environment!") 218 | .success("Success! IaC build environment generated!") 219 | .context("This typically happens due to failures parsing the stack into HCL for Terraform.") 220 | .suggestions(vec![ 221 | "Check that your inputs are escaped correctly.", 222 | "Check that Torb has been initialized correctly, at ~/.torb you should see a Terraform binary appropriate to your system." 223 | ]) 224 | .pretty() 225 | ); 226 | } 227 | 228 | fn run_dependency_build_steps( 229 | _build_hash: String, 230 | build_artifact: &ArtifactRepr, 231 | build_platform_string: String, 232 | dryrun: bool, 233 | separate_local_registry: bool, 234 | ) -> Result<(), Box> { 235 | let mut builder = StackBuilder::new( 236 | build_artifact, 237 | build_platform_string, 238 | dryrun, 239 | separate_local_registry, 240 | ); 241 | 242 | builder.build() 243 | } 244 | 245 | fn run_deploy_steps( 246 | _build_hash: String, 247 | build_artifact: &ArtifactRepr, 248 | dryrun: bool, 249 | ) -> Result<(), Box> { 250 | let mut deployer = StackDeployer::new(false); 251 | 252 | deployer.deploy(build_artifact, dryrun) 253 | } 254 | 255 | fn watch(fp_opt: Option<&str>, local_registry: bool) { 256 | let watcher = Watcher::configure(fp_opt.unwrap_or("stack.yaml").to_string(), local_registry); 257 | 258 | watcher.start(); 259 | } 260 | 261 | fn clone_artifacts() { 262 | if TORB_CONFIG.repositories.is_some() { 263 | let repos_to_aliases = TORB_CONFIG.repositories.clone().unwrap(); 264 | let torb_path = torb_path(); 265 | let artifacts_path = torb_path.join("repositories"); 266 | repos_to_aliases 267 | .iter() 268 | .par_bridge() 269 | .for_each(|(repo, alias)| { 270 | if alias == "" { 271 | let err_msg = format!("Failed to clone {}.", &repo); 272 | 273 | let _clone_cmd_out = Command::new("git") 274 | .arg("clone") 275 | .arg(repo) 276 | .current_dir(&artifacts_path) 277 | .output() 278 | .expect(&err_msg); 279 | } else { 280 | let alias_path = artifacts_path.join(&alias); 281 | std::fs::create_dir_all(&alias_path) 282 | .expect("Unable to create aliased dir for artifact repo."); 283 | 284 | let err_msg = format!("Failed to clone {} into {}.", &repo, &alias); 285 | 286 | let _clone_cmd_out = Command::new("git") 287 | .arg("clone") 288 | .arg(repo) 289 | .arg(".") 290 | .current_dir(&alias_path) 291 | .output() 292 | .expect(&err_msg); 293 | } 294 | }) 295 | } 296 | } 297 | 298 | fn update_artifacts(name: Option<&str>) { 299 | let filter_name = name.unwrap(); 300 | let torb_path = torb_path(); 301 | let repo_path = torb_path.join("repositories"); 302 | 303 | let repos = fs::read_dir(&repo_path).unwrap().par_bridge(); 304 | 305 | repos.for_each(|repo_result| { 306 | let repo = repo_result.unwrap(); 307 | 308 | if filter_name == "" || repo.file_name() == filter_name { 309 | let repo_name = repo.file_name() 310 | .into_string() 311 | .expect("Failed to convert OsString to String."); 312 | 313 | println!( 314 | "Refreshing '{}' artifact repository...", 315 | repo_name 316 | ); 317 | 318 | let err_msg = format!("Failed to pull {:?}", repo.file_name()); 319 | let artifacts_path = repo_path.join(repo.file_name()); 320 | let pull_cmd_out = Command::new("git") 321 | .arg("pull") 322 | .arg("--rebase") 323 | .current_dir(&artifacts_path) 324 | .output(); 325 | 326 | let success_msg = format!("{repo_name} done refreshing!"); 327 | pull_cmd_out.use_or_pretty_exit( 328 | PrettyContext::default() 329 | .error(&err_msg) 330 | .context("This type of error is usually an access or connection issue.") 331 | .suggestions(vec![ 332 | "Check that you have the ability to access the artifact repo you're refreshing.", 333 | "Check that you have an active internet connection." 334 | ]) 335 | .success(&success_msg) 336 | .pretty() 337 | ); 338 | 339 | } 340 | }) 341 | } 342 | 343 | fn load_stack_manifests() -> IndexMap { 344 | let torb_path = torb_path(); 345 | let artifacts_path = torb_path.join("repositories"); 346 | 347 | let repository_paths = fs::read_dir(&artifacts_path) 348 | .expect("Unable to read list of repositories. Please re-initialize Torb."); 349 | 350 | let mut manifests = IndexMap::::new(); 351 | 352 | for artifact_path_result in repository_paths { 353 | let artifact_path = 354 | artifact_path_result.expect("Unable to read entry in repositories, try again."); 355 | let stack_manifest_path = artifact_path.path().join("stacks").join("manifest.yaml"); 356 | let stack_manifest_contents = fs::read_to_string(&stack_manifest_path).unwrap(); 357 | let stack_manifest_yaml: serde_yaml::Value = 358 | serde_yaml::from_str(&stack_manifest_contents).unwrap(); 359 | 360 | let manifest_name = artifact_path.file_name().to_str().unwrap().to_string(); 361 | 362 | manifests.insert( 363 | manifest_name, 364 | stack_manifest_yaml.get("stacks").unwrap().clone(), 365 | ); 366 | } 367 | 368 | manifests 369 | } 370 | 371 | fn pull_stack( 372 | stack_name: &str, 373 | fail_not_found: bool, 374 | ) -> Result> { 375 | let mut repo = ""; 376 | let mut stack = stack_name; 377 | 378 | if stack_name.find(":").is_some() { 379 | let stack_parts: Vec<&str> = stack_name.split(":").collect(); 380 | repo = stack_parts[0]; 381 | stack = stack_parts[1]; 382 | } 383 | 384 | let manifests = load_stack_manifests(); 385 | 386 | let mut count = 0; 387 | 388 | for (_name, manifest) in manifests.iter() { 389 | let stack_entry = manifest.get(stack); 390 | if stack_entry.is_some() { 391 | count += 1; 392 | } 393 | } 394 | 395 | if count > 1 && repo == "" { 396 | return Err(Box::new(TorbCliErrors::StackAmbiguous)); 397 | } else if repo == "" { 398 | repo = "torb-artifacts" 399 | } 400 | 401 | let err_msg = format!("Unable to find manifest for {repo}. Make sure it was added in config.yaml and pulled with `torb artifacts refresh`"); 402 | let repo_manifest = manifests.get(repo).expect(&err_msg); 403 | 404 | let stack_entry = repo_manifest.get(stack); 405 | 406 | if stack_entry.is_none() { 407 | if fail_not_found { 408 | return Err(Box::new(TorbCliErrors::ManifestInvalid)); 409 | } 410 | 411 | update_artifacts(None); 412 | return pull_stack(stack_name, true); 413 | } else { 414 | let torb_path = torb_path(); 415 | let repo_path = torb_path.join("repositories"); 416 | let artifacts_path = repo_path.join(repo); 417 | let stack_entry_str = stack_entry.unwrap().as_str().unwrap(); 418 | let stack_contents = fs::read(artifacts_path.join("stacks").join(stack_entry_str)) 419 | .map(|s| String::from_utf8(s).unwrap())?; 420 | 421 | return Ok(stack_contents); 422 | } 423 | } 424 | 425 | fn main() { 426 | let cli_app = cli(); 427 | 428 | let cli_matches = cli_app.get_matches(); 429 | 430 | match cli_matches.subcommand_name() { 431 | Some("init") => { 432 | init(); 433 | } 434 | Some("repo") => { 435 | let mut subcommand = cli_matches.subcommand_matches("repo").unwrap(); 436 | match subcommand.subcommand_name() { 437 | Some("create") => { 438 | subcommand = subcommand.subcommand_matches("create").unwrap(); 439 | let path_option = subcommand.value_of("path"); 440 | let local_option = subcommand.value_of("--local-only"); 441 | 442 | create_repo(path_option.unwrap().to_string(), local_option.is_some()); 443 | } 444 | _ => { 445 | println!("No subcommand specified."); 446 | } 447 | } 448 | } 449 | Some("artifacts") => { 450 | let mut subcommand = cli_matches.subcommand_matches("artifacts").unwrap(); 451 | match subcommand.subcommand_name() { 452 | Some("refresh") => { 453 | subcommand = subcommand.subcommand_matches("refresh").unwrap(); 454 | let name_option = subcommand.value_of("name"); 455 | update_artifacts(name_option); 456 | } 457 | Some("clone") => { 458 | clone_artifacts(); 459 | } 460 | _ => {} 461 | } 462 | } 463 | Some("stack") => { 464 | let mut subcommand = cli_matches.subcommand_matches("stack").unwrap(); 465 | match subcommand.subcommand_name() { 466 | Some("checkout") => { 467 | let name_option = subcommand 468 | .subcommand_matches("checkout") 469 | .unwrap() 470 | .value_of("name"); 471 | 472 | checkout_stack(name_option); 473 | } 474 | Some("new") => new_stack(), 475 | Some("init") => { 476 | let file_path_option = subcommand 477 | .subcommand_matches("init") 478 | .unwrap() 479 | .value_of("file"); 480 | 481 | init_stack(file_path_option.unwrap().to_string()) 482 | } 483 | Some("build") => { 484 | subcommand = subcommand.subcommand_matches("build").unwrap(); 485 | let file_path_option = subcommand.value_of("file"); 486 | let dryrun = subcommand.is_present("--dryrun"); 487 | let local_registry = subcommand.is_present("--local-hosted-registry"); 488 | 489 | let build_platforms_string = subcommand 490 | .values_of("--platforms") 491 | .unwrap() 492 | .collect::>() 493 | .join(","); 494 | 495 | if let Some(file_path) = file_path_option { 496 | println!("Attempting to read or create buildstate folder..."); 497 | buildstate_path_or_create(); 498 | println!("Attempting to read and build stack: {}", file_path); 499 | let contents = fs::read_to_string(file_path) 500 | .expect("Something went wrong reading the stack file."); 501 | 502 | let (build_hash, build_filename, _) = write_build_file(contents, None); 503 | 504 | let (_, _, build_artifact) = 505 | load_build_file(build_filename).expect("Unable to load build file."); 506 | 507 | 508 | let animator = BuilderAnimation::new(); 509 | 510 | let build_hash_clone = build_hash.clone(); 511 | let build_artifact_clone = build_artifact.clone(); 512 | 513 | animator.do_with_animation(Box::new( 514 | move || { 515 | run_dependency_build_steps( 516 | build_hash_clone.clone(), 517 | &build_artifact_clone, 518 | build_platforms_string.clone(), 519 | dryrun, 520 | local_registry 521 | ) 522 | } 523 | )).use_or_pretty_exit( 524 | PrettyContext::default() 525 | .error("Oh no, we were unable to build the stack!") 526 | .success("Success! Stack has been built!") 527 | .context("Errors here are typically because of a failed docker build, syntax issue in the dockerfile or a connectivity issue with the docker registry.") 528 | .suggestions(vec![ 529 | "Check that your dockerfile has no syntax errors and is otherwise correct.", 530 | "If you're building with an image registry that is hosted on the same machine, but as a separate service and not the default docker registry, try passing --local-hosted-registry as a flag." 531 | ]) 532 | .pretty() 533 | ); 534 | 535 | compose_build_environment(build_hash.clone(), &build_artifact); 536 | } 537 | } 538 | Some("deploy") => { 539 | subcommand = subcommand.subcommand_matches("deploy").unwrap(); 540 | let file_path_option = subcommand.value_of("file"); 541 | let dryrun = subcommand.is_present("--dryrun"); 542 | 543 | if let Some(file_path) = file_path_option { 544 | println!("Attempting to read and deploy stack: {}", file_path); 545 | let contents = fs::read_to_string(file_path) 546 | .expect("Something went wrong reading the stack file."); 547 | 548 | let artifact = deserialize_stack_yaml_into_artifact(&contents) 549 | .expect("Unable to read stack file into internal representation."); 550 | 551 | let (build_hash, build_filename, _) = get_build_file_info(&artifact) 552 | .expect("Unable to get build file info for stack."); 553 | println!("build_filename: {}", build_filename); 554 | let (_, _, build_artifact) = 555 | load_build_file(build_filename).expect("Unable to load build file."); 556 | 557 | run_deploy_steps(build_hash.clone(), &build_artifact, dryrun) 558 | .use_or_pretty_exit( 559 | PrettyContext::default() 560 | .error("Oh no, we were unable to deploy the stack!") 561 | .success("Success! Stack has been deployed!") 562 | .context("Errors here are typically because of failed Terraform deployments or Helm failures.") 563 | .suggestions(vec![ 564 | "Check that your Terraform IaC environment was generated correctly. \nThis can be found in your project folder at, .torb_buildstate/iac_environment, or .torb_buildstate/watcher_iac_environment if you're using the watcher.", 565 | "To see if your Helm deployment failed you can do `helm ls --namespace ` where the namespace is the one you're deploying to.", 566 | "After seeing if the deployment has failed in Helm, you can use kubectl to debug further. Take a look at https://kubernetes.io/docs/reference/kubectl/cheatsheet/ if you're less familiar with kubectl." 567 | ]) 568 | .pretty() 569 | ) 570 | } 571 | } 572 | Some("watch") => { 573 | subcommand = subcommand.subcommand_matches("watch").unwrap(); 574 | let file_path_option = subcommand.value_of("file"); 575 | let has_local_registry = subcommand.is_present("--local-hosted-registry"); 576 | watch(file_path_option, has_local_registry); 577 | } 578 | Some("list") => { 579 | println!("\nTorb Stacks:\n"); 580 | let stack_manifests = load_stack_manifests(); 581 | 582 | for (repo, manifest) in stack_manifests.iter() { 583 | println!("{repo}:"); 584 | 585 | for (key, _) in manifest.as_mapping().unwrap().iter() { 586 | println!("- {}", key.as_str().unwrap()); 587 | } 588 | } 589 | } 590 | _ => { 591 | println!("No subcommand specified."); 592 | } 593 | } 594 | } 595 | Some("version") => { 596 | println!("Torb Version: {}", VERSION); 597 | } 598 | _ => { 599 | println!("No subcommand specified."); 600 | } 601 | } 602 | } 603 | -------------------------------------------------------------------------------- /cli/src/resolver.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | pub mod inputs; 13 | 14 | use crate::artifacts::{ArtifactNodeRepr, BuildStep, TorbInput, TorbInputSpec}; 15 | use crate::utils::{for_each_artifact_repository, normalize_name, torb_path}; 16 | use crate::watcher::{WatcherConfig}; 17 | 18 | use indexmap::IndexMap; 19 | use serde::{Deserialize, Serialize}; 20 | use serde_yaml::{self, Value}; 21 | use std::collections::HashMap; 22 | use std::process::Command; 23 | use std::{error::Error, path::PathBuf}; 24 | use thiserror::Error; 25 | 26 | // const VERSION: &'static str = env!("CARGO_PKG_VERSION"); 27 | pub fn resolve_stack(stack_yaml: &String) -> Result> { 28 | let stack_def_yaml: serde_yaml::Value = serde_yaml::from_str(stack_yaml).unwrap(); 29 | let stack_name = stack_def_yaml.get("name").unwrap().as_str().unwrap(); 30 | // let stack_description = stack_def_yaml.get("description").unwrap().as_str().unwrap(); 31 | let resolver_conf = ResolverConfig::new( 32 | // false, 33 | normalize_name(stack_name), 34 | // stack_description.to_string(), 35 | stack_def_yaml.clone(), 36 | // VERSION.to_string(), 37 | ); 38 | 39 | let resolver = Resolver::new(&resolver_conf); 40 | 41 | resolver.resolve() 42 | } 43 | 44 | #[derive(Error, Debug)] 45 | pub enum TorbResolverErrors { 46 | #[error( 47 | "Unable to parse stack manifest, please check that it is a valid Torb stack manifest." 48 | )] 49 | CannotParseStackManifest, 50 | } 51 | 52 | #[derive(Clone)] 53 | pub struct ResolverConfig { 54 | // autoaccept: bool, 55 | stack_name: String, 56 | // stack_description: String, 57 | stack_contents: serde_yaml::Value, 58 | // torb_version: String, 59 | } 60 | 61 | impl ResolverConfig { 62 | pub fn new( 63 | // autoaccept: bool, 64 | stack_name: String, 65 | // stack_description: String, 66 | stack_contents: serde_yaml::Value, 67 | // torb_version: String, 68 | ) -> ResolverConfig { 69 | ResolverConfig { 70 | // autoaccept, 71 | stack_name, 72 | // stack_description, 73 | stack_contents, 74 | // torb_version, 75 | } 76 | } 77 | } 78 | 79 | // #[derive(Serialize, Deserialize, Clone)] 80 | // pub struct DeployStep { 81 | // name: String, 82 | // tool_version: String, 83 | // tool_name: String, 84 | // tool_config: IndexMap, 85 | // } 86 | 87 | #[derive(Serialize, Deserialize, Default, Clone, Debug)] 88 | pub struct NodeDependencies { 89 | pub services: Option>, 90 | pub projects: Option>, 91 | pub stacks: Option>, 92 | } 93 | 94 | impl NodeDependencies {} 95 | 96 | #[derive(Clone, Debug)] 97 | pub struct StackGraph { 98 | pub services: HashMap, 99 | pub projects: HashMap, 100 | pub stacks: HashMap, 101 | pub name: String, 102 | pub version: String, 103 | pub kind: String, 104 | pub commits: IndexMap, 105 | pub tf_version: String, 106 | pub helm_version: String, 107 | pub meta: Box>, 108 | pub incoming_edges: HashMap>, 109 | pub namespace: Option, 110 | pub release: Option, 111 | pub repositories: Option>, 112 | pub watcher: WatcherConfig 113 | } 114 | 115 | impl StackGraph { 116 | pub fn new( 117 | name: String, 118 | kind: String, 119 | version: String, 120 | commits: IndexMap, 121 | tf_version: String, 122 | helm_version: String, 123 | meta: Box>, 124 | namespace: Option, 125 | release: Option, 126 | repositories: Option>, 127 | watcher: WatcherConfig 128 | ) -> StackGraph { 129 | StackGraph { 130 | services: HashMap::::new(), 131 | projects: HashMap::::new(), 132 | stacks: HashMap::::new(), 133 | name, 134 | version, 135 | kind, 136 | tf_version, 137 | helm_version, 138 | commits, 139 | meta, 140 | incoming_edges: HashMap::>::new(), 141 | namespace, 142 | release, 143 | repositories, 144 | watcher: watcher 145 | } 146 | } 147 | 148 | pub fn add_service(&mut self, node: &ArtifactNodeRepr) { 149 | self.services.insert(node.fqn.clone(), node.clone()); 150 | } 151 | pub fn add_project(&mut self, node: &ArtifactNodeRepr) { 152 | self.projects.insert(node.fqn.clone(), node.clone()); 153 | } 154 | // pub fn add_stack(&mut self, node: &ArtifactNodeRepr) { 155 | // self.stacks.insert(node.fqn.clone(), node.clone()); 156 | // } 157 | pub fn add_all_incoming_edges_downstream( 158 | &mut self, 159 | stack_name: String, 160 | node: &ArtifactNodeRepr, 161 | ) { 162 | self.incoming_edges 163 | .entry(node.fqn.clone()) 164 | .or_insert(Vec::::new()); 165 | 166 | node.dependency_names 167 | .projects 168 | .as_ref() 169 | .map_or((), |projects| { 170 | projects.iter().for_each(|project| { 171 | let p_fqn = format!("{}.{}.{}", stack_name, "project".to_string(), project); 172 | match self.incoming_edges.get_mut(p_fqn.as_str()) { 173 | Some(edges) => { 174 | edges.push(node.fqn.clone()); 175 | } 176 | None => { 177 | let mut edges = Vec::new(); 178 | edges.push(node.fqn.clone()); 179 | self.incoming_edges.insert(p_fqn.clone(), edges); 180 | } 181 | } 182 | }); 183 | }); 184 | 185 | node.dependency_names 186 | .services 187 | .as_ref() 188 | .map_or((), |projects| { 189 | projects.iter().for_each(|project| { 190 | let s_fqn = format!("{}.{}.{}", stack_name, "service".to_string(), project); 191 | match self.incoming_edges.get_mut(project) { 192 | Some(edges) => { 193 | edges.push(node.fqn.clone()); 194 | } 195 | None => { 196 | let mut edges = Vec::new(); 197 | edges.push(node.fqn.clone()); 198 | self.incoming_edges.insert(s_fqn.clone(), edges); 199 | } 200 | } 201 | }); 202 | }); 203 | 204 | node.dependency_names 205 | .stacks 206 | .as_ref() 207 | .map_or((), |projects| { 208 | projects.iter().for_each(|project| { 209 | let s_fqn = format!("{}.{}.{}", stack_name, "stack".to_string(), project); 210 | match self.incoming_edges.get_mut(project) { 211 | Some(edges) => { 212 | edges.push(node.fqn.clone()); 213 | } 214 | None => { 215 | let mut edges = Vec::new(); 216 | edges.push(node.fqn.clone()); 217 | self.incoming_edges.insert(s_fqn.clone(), edges); 218 | } 219 | } 220 | }); 221 | }); 222 | } 223 | } 224 | 225 | pub struct Resolver { 226 | config: ResolverConfig, 227 | stack: Value, 228 | } 229 | 230 | impl Resolver { 231 | pub fn new(config: &ResolverConfig) -> Resolver { 232 | Resolver { 233 | config: config.clone(), 234 | stack: config.stack_contents.clone(), 235 | } 236 | } 237 | 238 | pub fn resolve(&self) -> Result> { 239 | println!("Resolving stack graph..."); 240 | let yaml = self.stack.clone(); 241 | let graph = self.build_graph(yaml)?; 242 | 243 | Ok(graph) 244 | } 245 | 246 | fn build_graph( 247 | &self, 248 | yaml: serde_yaml::Value, 249 | ) -> Result> { 250 | let meta = Box::new(None); 251 | let mut name = yaml["name"].as_str().unwrap().to_string(); 252 | name = normalize_name(&name); 253 | 254 | let version = yaml["version"].as_str().unwrap().to_string(); 255 | let kind = yaml["kind"].as_str().unwrap().to_string(); 256 | let tf_version = self.get_tf_version(); 257 | let helm_version = self.get_helm_version(); 258 | let mut commits = IndexMap::new(); 259 | 260 | for_each_artifact_repository(Box::new(|_repo_path, repo| { 261 | let repo_string = &repo.file_name().into_string().unwrap(); 262 | let sha = self.get_commit_sha(repo_string); 263 | 264 | commits.insert(repo_string.clone(), sha); 265 | }))?; 266 | 267 | let namespace = yaml["namespace"].as_str().map(|ns| ns.to_string()); 268 | let release = yaml["release"].as_str().map(|ns| ns.to_string()); 269 | let repositories: Option> = 270 | serde_yaml::from_value(yaml["repositories"].clone())?; 271 | 272 | 273 | let watcher: WatcherConfig = match yaml["watcher"] { 274 | Value::Null => WatcherConfig::default(), 275 | _ => serde_yaml::from_value(yaml["watcher"].clone())? 276 | }; 277 | 278 | let mut graph = StackGraph::new( 279 | name, 280 | kind, 281 | version, 282 | commits, 283 | tf_version, 284 | helm_version, 285 | meta, 286 | namespace, 287 | release, 288 | repositories, 289 | watcher 290 | ); 291 | 292 | self.walk_yaml(&mut graph, &yaml); 293 | 294 | Ok(graph) 295 | } 296 | 297 | fn get_helm_version(&self) -> String { 298 | let cmd_out = Command::new("helm") 299 | .arg("version") 300 | .output() 301 | .expect("Failed to get helm version, please make sure helm3 is installed and that the helm alias is in your path."); 302 | 303 | String::from_utf8(cmd_out.stdout).unwrap() 304 | } 305 | 306 | fn get_tf_version(&self) -> String { 307 | let torb_path = torb_path(); 308 | let cmd_out = Command::new("./terraform") 309 | .arg("version") 310 | .arg("-json") 311 | .current_dir(torb_path) 312 | .output() 313 | .expect("Failed to get terraform version, please make sure Torb has been initialized properly."); 314 | 315 | String::from_utf8(cmd_out.stdout).unwrap() 316 | } 317 | 318 | fn get_commit_sha(&self, repo: &String) -> String { 319 | let torb_path = torb_path(); 320 | let artifacts_path = torb_path.join("repositories").join(repo); 321 | let cmd_out = Command::new("git") 322 | .arg("rev-parse") 323 | .arg("HEAD") 324 | .current_dir(artifacts_path) 325 | .output() 326 | .expect("Failed to get current commit SHA for an artifact repo, please make sure git is installed and that Torb has been initialized."); 327 | 328 | let mut sha = String::from_utf8(cmd_out.stdout).unwrap(); 329 | 330 | // Removes newline 331 | sha.pop(); 332 | 333 | sha 334 | } 335 | 336 | fn resolve_service( 337 | &self, 338 | stack_name: &str, 339 | stack_kind_name: &str, 340 | node_name: &str, 341 | service_name: &str, 342 | artifact_path: PathBuf, 343 | inputs: IndexMap, 344 | values: serde_yaml::Value, 345 | source: &str, 346 | namespace: Option, 347 | expedient: bool, 348 | yaml: Value 349 | ) -> Result> { 350 | let mut node: ArtifactNodeRepr = if expedient { 351 | let mut deploy_steps = IndexMap::>>::new(); 352 | 353 | let repo = yaml.get("repository").ok_or("Could not find helm repository for expedient service.")?.as_str().unwrap().to_string(); 354 | let chart = yaml.get("chart").ok_or("Could not find helm chart for expedient service.")?.as_str().unwrap().to_string(); 355 | 356 | let mut helm = IndexMap::::new(); 357 | 358 | helm.insert("repository".to_string(), repo); 359 | helm.insert("chart".to_string(), chart); 360 | helm.insert("custom".to_string(), "false".to_string()); 361 | 362 | deploy_steps.insert("helm".to_string(), Some(helm)); 363 | 364 | 365 | let services_path = artifact_path.join("services"); 366 | let service_path = services_path.join("torb-expedient"); 367 | let torb_yaml_path = service_path.join("torb.yaml"); 368 | let node_fp = torb_yaml_path 369 | .to_str() 370 | .ok_or("Could not convert path to string.")? 371 | .to_string(); 372 | 373 | ArtifactNodeRepr::new( 374 | "".to_string(), 375 | node_name.to_string(), 376 | "".to_string(), 377 | "service".to_string(), 378 | None, 379 | None, 380 | None, 381 | deploy_steps, 382 | IndexMap::::new(), 383 | IndexMap::::new(), 384 | Vec::::new(), 385 | node_fp, 386 | None, 387 | None, 388 | "".to_string(), 389 | None, 390 | None, 391 | true 392 | ) 393 | } else { 394 | let services_path = artifact_path.join("services"); 395 | let service_path = services_path.join(service_name); 396 | let torb_yaml_path = service_path.join("torb.yaml"); 397 | let torb_yaml = std::fs::read_to_string(&torb_yaml_path)?; 398 | let mut deser_node: ArtifactNodeRepr = serde_yaml::from_str(torb_yaml.as_str())?; 399 | 400 | let node_fp = torb_yaml_path 401 | .to_str() 402 | .ok_or("Could not convert path to string.")? 403 | .to_string(); 404 | deser_node.file_path = node_fp; 405 | 406 | deser_node 407 | }; 408 | 409 | node.fqn = format!("{}.{}.{}", stack_name, stack_kind_name, node_name); 410 | 411 | node.source = Some(source.to_string()); 412 | node.namespace = namespace; 413 | 414 | node.values = 415 | serde_yaml::to_string(&values).expect("Unable to convert values yaml to string."); 416 | node.validate_map_and_set_inputs(inputs); 417 | node.discover_and_set_implicit_dependencies(&stack_name.to_string())?; 418 | 419 | Ok(node) 420 | } 421 | 422 | fn reconcile_build_step(&self, build_step: BuildStep, new_build_step: BuildStep) -> BuildStep { 423 | let registry = if new_build_step.registry != "" { 424 | new_build_step.registry 425 | } else { 426 | build_step.registry 427 | }; 428 | 429 | let dockerfile = if new_build_step.dockerfile != "" { 430 | new_build_step.dockerfile 431 | } else { 432 | build_step.dockerfile 433 | }; 434 | 435 | let script_path = if new_build_step.script_path != "" { 436 | new_build_step.script_path 437 | } else { 438 | build_step.script_path 439 | }; 440 | 441 | let tag = if new_build_step.tag != "" { 442 | new_build_step.tag 443 | } else { 444 | build_step.tag 445 | }; 446 | 447 | BuildStep { 448 | registry, 449 | tag, 450 | dockerfile, 451 | script_path, 452 | } 453 | } 454 | 455 | fn resolve_project( 456 | &self, 457 | stack_name: &str, 458 | stack_kind_name: &str, 459 | node_name: &str, 460 | project_name: &str, 461 | artifact_path: PathBuf, 462 | inputs: IndexMap, 463 | build_config: Option<&Value>, 464 | values: serde_yaml::Value, 465 | source: &str, 466 | namespace: Option 467 | ) -> Result> { 468 | let projects_path = artifact_path.join("projects"); 469 | let project_path = projects_path.join(project_name); 470 | let torb_yaml_path = project_path.join("torb.yaml"); 471 | let torb_yaml = std::fs::read_to_string(&torb_yaml_path)?; 472 | let mut node: ArtifactNodeRepr = serde_yaml::from_str(torb_yaml.as_str())?; 473 | let node_fp = torb_yaml_path 474 | .to_str() 475 | .ok_or("Could not convert path to string.")? 476 | .to_string(); 477 | 478 | node.source = Some(source.to_string()); 479 | node.namespace = namespace; 480 | 481 | let build_step = node.build_step.or(Some(BuildStep::default())).unwrap(); 482 | let new_build_step: BuildStep = match build_config { 483 | Some(build) => { 484 | let temp = serde_yaml::from_value(build.clone())?; 485 | self.reconcile_build_step(build_step, temp) 486 | } 487 | None => { 488 | let temp = BuildStep { 489 | registry: "".to_string(), 490 | dockerfile: "".to_string(), 491 | script_path: "".to_string(), 492 | tag: "".to_string(), 493 | }; 494 | 495 | self.reconcile_build_step(build_step, temp) 496 | } 497 | }; 498 | 499 | node.build_step = Some(new_build_step); 500 | node.fqn = format!("{}.{}.{}", stack_name, stack_kind_name, node_name); 501 | node.file_path = node_fp; 502 | node.validate_map_and_set_inputs(inputs); 503 | node.values = 504 | serde_yaml::to_string(&values).expect("Unable to convert values yaml to string."); 505 | node.discover_and_set_implicit_dependencies(&stack_name.to_string())?; 506 | 507 | Ok(node) 508 | } 509 | 510 | fn deserialize_params( 511 | params: Option<&serde_yaml::Value>, 512 | ) -> Result, Box> { 513 | match params { 514 | Some(params) => { 515 | let deserialized_params: IndexMap = 516 | serde_yaml::from_value(params.clone())?; 517 | 518 | Ok(deserialized_params) 519 | } 520 | None => Ok(IndexMap::new()), 521 | } 522 | } 523 | 524 | fn resolve_node( 525 | &self, 526 | stack_name: &str, 527 | stack_kind_name: &str, 528 | node_name: &str, 529 | yaml: serde_yaml::Value, 530 | ) -> Result> { 531 | println!("Resolving node: {}", node_name); 532 | let err = TorbResolverErrors::CannotParseStackManifest; 533 | let home_dir = dirs::home_dir().unwrap(); 534 | let torb_path = home_dir.join(".torb"); 535 | let repository_path = torb_path.join("repositories"); 536 | 537 | let repo = match yaml.get("source") { 538 | Some(source) => source.as_str().unwrap(), 539 | None => "torb-artifacts", 540 | }; 541 | 542 | let artifacts_path = repository_path.join(repo); 543 | 544 | let inputs = Resolver::deserialize_params(yaml.get("inputs")) 545 | .expect("Unable to deserialize inputs."); 546 | 547 | let config_values = yaml.get("values").unwrap_or(&serde_yaml::Value::Null); 548 | 549 | let mut node = match stack_kind_name { 550 | "service" => { 551 | let service_name = yaml 552 | .get("service") 553 | .ok_or(err)? 554 | .as_str() 555 | .expect("Unable to parse service name."); 556 | 557 | let service_namespace = yaml.get("namespace").map(|x| { 558 | x.as_str().unwrap().to_string() 559 | }); 560 | 561 | let expedient: bool = yaml.get("expedient").is_some(); 562 | 563 | self.resolve_service( 564 | stack_name, 565 | stack_kind_name, 566 | node_name, 567 | service_name, 568 | artifacts_path, 569 | inputs, 570 | config_values.clone(), 571 | repo, 572 | service_namespace, 573 | expedient, 574 | yaml.clone() 575 | ) 576 | } 577 | "project" => { 578 | let project_name = yaml 579 | .get("project") 580 | .ok_or(err)? 581 | .as_str() 582 | .expect("Unable to parse project name."); 583 | let build_config = yaml.get("build"); 584 | 585 | let project_namespace = yaml.get("namespace").map(|x| { 586 | x.as_str().unwrap().to_string() 587 | }); 588 | 589 | self.resolve_project( 590 | stack_name, 591 | stack_kind_name, 592 | node_name, 593 | project_name, 594 | artifacts_path, 595 | inputs, 596 | build_config, 597 | config_values.clone(), 598 | repo, 599 | project_namespace 600 | ) 601 | } 602 | 603 | _ => return Err(Box::new(err)), 604 | }?; 605 | 606 | let dep_values = yaml.get("deps"); 607 | match dep_values { 608 | Some(deps) => { 609 | let yaml_str = serde_yaml::to_string(deps)?; 610 | let deps: NodeDependencies = serde_yaml::from_str(yaml_str.as_str()).unwrap(); 611 | node.dependency_names = deps; 612 | 613 | Ok(node) 614 | } 615 | None => return Ok(node), 616 | } 617 | } 618 | 619 | fn walk_yaml(&self, graph: &mut StackGraph, yaml: &serde_yaml::Value) { 620 | // Walk yaml and add nodes to graph 621 | for (key, value) in yaml.as_mapping().unwrap().iter() { 622 | let key_string = key.as_str().unwrap(); 623 | match key_string { 624 | "services" => { 625 | value.as_mapping().and_then(|mapping| { 626 | for (service_name, service_value) in mapping.iter() { 627 | let stack_service_name = service_name.as_str().unwrap(); 628 | let stack_name = self.config.stack_name.clone(); 629 | let service_value = service_value.clone(); 630 | let service_node = self 631 | .resolve_node( 632 | stack_name.as_str(), 633 | "service", 634 | stack_service_name, 635 | service_value, 636 | ) 637 | .unwrap(); 638 | 639 | graph.add_service(&service_node); 640 | graph.add_all_incoming_edges_downstream( 641 | stack_name.clone(), 642 | &service_node, 643 | ); 644 | } 645 | 646 | Some(()) 647 | }); 648 | } 649 | "projects" => { 650 | value.as_mapping().and_then(|mapping| { 651 | for (project_name, project_value) in mapping.iter() { 652 | let project_name = project_name.as_str().unwrap(); 653 | let stack_name = self.config.stack_name.clone(); 654 | let project_value = project_value.clone(); 655 | let project_node = self 656 | .resolve_node( 657 | stack_name.as_str(), 658 | "project", 659 | project_name, 660 | project_value, 661 | ) 662 | .expect("Failed to resolve project node."); 663 | graph.add_project(&project_node); 664 | graph.add_all_incoming_edges_downstream( 665 | stack_name.clone(), 666 | &project_node, 667 | ); 668 | } 669 | 670 | Some(()) 671 | }); 672 | } 673 | _ => (), 674 | } 675 | } 676 | } 677 | } 678 | -------------------------------------------------------------------------------- /cli/src/resolver/inputs.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::artifacts::{ArtifactNodeRepr, TorbInput}; 13 | use crate::composer::InputAddress; 14 | use serde_yaml::Value; 15 | 16 | use thiserror::Error; 17 | 18 | const INIT_TOKEN: &str = "TORB"; 19 | 20 | #[derive(Error, Debug)] 21 | pub enum TorbInputResolverErrors {} 22 | 23 | pub const NO_INPUTS_FN: Option) -> String>> = 24 | None::) -> String>>; 25 | 26 | pub const NO_VALUES_FN: Option) -> String>> = 27 | None::) -> String>>; 28 | 29 | pub const NO_INITS_FN: Option = None; 30 | 31 | pub struct InputResolver<'a, F, U> { 32 | node: &'a ArtifactNodeRepr, 33 | values_fn: Option, 34 | inputs_fn: Option, 35 | inits_fn: Option 36 | } 37 | 38 | impl<'a, F, U> InputResolver<'a, F, U> { 39 | pub fn resolve( 40 | node: &'a ArtifactNodeRepr, 41 | values_fn: Option, 42 | inputs_fn: Option, 43 | inits_fn: Option, 44 | ) -> Result<(Option, Option>, Option>), Box> 45 | where 46 | F: FnMut(Result) -> String, 47 | U: FnMut(&String, Result) -> String, 48 | { 49 | let mut resolver = InputResolver { 50 | node: node, 51 | values_fn, 52 | inputs_fn, 53 | inits_fn 54 | }; 55 | 56 | let values_fn_out = if resolver.values_fn.is_some() { 57 | Some(resolver.resolve_inputs_in_values()) 58 | } else { 59 | None 60 | }; 61 | 62 | let inputs_fn_out = if resolver.inputs_fn.is_some() { 63 | Some(resolver.resolve_inputs_in_mapped_inputs()) 64 | } else { 65 | None 66 | }; 67 | 68 | let inits_fn_out = if resolver.inits_fn.is_some() { 69 | Some(resolver.resolve_node_init_script_inputs()) 70 | } else { 71 | None 72 | }; 73 | 74 | Ok((values_fn_out, inputs_fn_out, inits_fn_out)) 75 | } 76 | 77 | fn resolve_inputs_in_mapped_inputs(&mut self) -> Vec<(String, String)> 78 | where 79 | U: FnMut(&String, Result) -> String, 80 | { 81 | let f = self.inputs_fn.as_mut().unwrap(); 82 | 83 | let mut out: Vec<(String, String)> = vec![]; 84 | 85 | for (_, (spec, value)) in self.node.mapped_inputs.iter() { 86 | let input_address_result = InputAddress::try_from(value); 87 | 88 | let res = f(&spec.clone(), input_address_result.clone()); 89 | 90 | out.push((spec.clone(), res)); 91 | } 92 | 93 | out 94 | } 95 | 96 | 97 | pub fn resolve_node_init_script_inputs(&mut self) -> Vec { 98 | let steps = self.node.init_step.clone().unwrap(); 99 | steps.iter().map(|step| { 100 | self.resolve_torb_value_interpolation(step) 101 | }).collect::>() 102 | } 103 | /* 104 | Case 1: Token at start 105 | Remaining = anything after token 106 | Case 2: Token in middle 107 | Remaining = anything before or after token 108 | Case 3: Token at end 109 | Remaining = anything before token 110 | */ 111 | fn resolve_torb_value_interpolation(&mut self, script_step: &String) -> String { 112 | let start_option: Option = script_step.find(INIT_TOKEN); 113 | match start_option { 114 | Some(start) => { 115 | let mut end = script_step.split_at(start).1.find(" ").unwrap_or(script_step.len()); 116 | end = script_step.split_at(start).1.find("/").unwrap_or(end); 117 | 118 | let remaining = if start == 0 && end == script_step.len() { 119 | let resolved_token = self.resolve_inputs_in_init_step(script_step.to_string()); 120 | let serialized_token = resolved_token.serialize_for_init(); 121 | 122 | serialized_token 123 | } else if end == script_step.len() { 124 | let parts = script_step.split_at(start); 125 | let resolved_token = self.resolve_inputs_in_init_step(parts.1.to_string()); 126 | let remaining = parts.0.to_string(); 127 | let serialized_token = resolved_token.serialize_for_init(); 128 | 129 | format!("{}{}", remaining, serialized_token) 130 | } else if start == 0 { 131 | let parts = script_step.split_at(end); 132 | let resolved_token = self.resolve_inputs_in_init_step(parts.0.to_string()); 133 | let serialized_token = resolved_token.serialize_for_init(); 134 | let remaining = parts.1.to_string(); 135 | format!("{}{}", serialized_token, remaining) 136 | } else { 137 | let parts = script_step.split_at(start); 138 | let remaining_1 = parts.0.to_string(); 139 | let parts = parts.1.split_at(end); 140 | let token = parts.0.to_string(); 141 | let remaining_2 = parts.1.to_string(); 142 | 143 | let resolved_token = self.resolve_inputs_in_init_step(token); 144 | 145 | let serialized_token = resolved_token.serialize_for_init(); 146 | format!("{}{}{}", remaining_1, serialized_token, remaining_2) 147 | }; 148 | 149 | self.resolve_torb_value_interpolation(&remaining.to_string()) 150 | }, 151 | None => { 152 | script_step.clone() 153 | } 154 | } 155 | } 156 | 157 | pub fn resolve_inputs_in_init_step(&mut self, token: String) -> TorbInput 158 | { 159 | let input = token.split("TORB.inputs.").collect::>()[1]; 160 | 161 | let (_, val) = self.node.mapped_inputs.get(input).unwrap(); 162 | 163 | val.clone() 164 | } 165 | 166 | pub fn resolve_inputs_in_values(&mut self) -> String 167 | where 168 | F: FnMut(Result) -> String, 169 | { 170 | let yaml_str = self.node.values.as_str(); 171 | let serde_value: Value = serde_yaml::from_str(yaml_str).unwrap_or(Value::Null); 172 | let resolved_values = self.resolve_inputs_in_helm_values(&serde_value); 173 | 174 | serde_yaml::to_string(&resolved_values).expect("Unable to convert value to string in resolver.") 175 | } 176 | 177 | fn resolve_inputs_in_helm_values(&mut self, value: &Value) -> Value 178 | where 179 | F: FnMut(Result) -> String, 180 | { 181 | let f = self.values_fn.as_mut().unwrap(); 182 | 183 | match value { 184 | Value::String(s) => { 185 | if s.starts_with("self.") { 186 | let torb_input_address = InputAddress::try_from(s.as_str()); 187 | 188 | let string_value = f(torb_input_address); 189 | 190 | Value::String(string_value) 191 | } else { 192 | Value::String(s.to_string()) 193 | } 194 | } 195 | Value::Mapping(m) => { 196 | let mut new_mapping = serde_yaml::Mapping::new(); 197 | for (k, v) in m { 198 | new_mapping.insert(k.clone(), self.resolve_inputs_in_helm_values(v)); 199 | } 200 | 201 | Value::Mapping(new_mapping) 202 | } 203 | Value::Sequence(s) => { 204 | let mut new_seq = serde_yaml::Sequence::new(); 205 | for v in s { 206 | new_seq.push(self.resolve_inputs_in_helm_values(v).to_owned()); 207 | } 208 | 209 | Value::Sequence(new_seq) 210 | } 211 | Value::Number(n) => Value::Number(n.to_owned()), 212 | Value::Bool(b) => Value::Bool(b.to_owned()), 213 | _ => Value::Null, 214 | } 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /cli/src/utils.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use colored::Colorize; 13 | 14 | use core::fmt::Display; 15 | use data_encoding::BASE32; 16 | use sha2::{Digest, Sha256}; 17 | use std::error::Error; 18 | use std::{ 19 | fmt::Debug, 20 | fs::DirEntry, 21 | process::{Command, Output}, 22 | }; 23 | use thiserror::Error; 24 | 25 | #[derive(Error, Debug)] 26 | pub enum TorbUtilityErrors { 27 | #[error( 28 | "Unable to run this command:\n\n{command}, \n\nShell: {shell}, \n\nReason:\n\n{reason}" 29 | )] 30 | UnableToRunCommandInShell { 31 | command: String, 32 | shell: String, 33 | reason: String, 34 | }, 35 | 36 | #[error("Unable to run this command:\n\n{command}, \n\nbecause of this reason: \n\n{reason}")] 37 | UnableToRunCommand { command: String, reason: String }, 38 | 39 | #[error( 40 | "Resource did not match Torb supported Kind, supported: StatefulSet, Deployment, DaemonSet" 41 | )] 42 | UnsupportedKind, 43 | 44 | #[error("Resource not found.")] 45 | ResourceNotFound, 46 | } 47 | 48 | const TORB_PATH: &str = ".torb"; 49 | 50 | pub fn kebab_to_snake_case(input: &str) -> String { 51 | input.replace("-", "_") 52 | } 53 | 54 | #[allow(dead_code)] 55 | pub fn snake_case_to_kebab(input: &str) -> String { 56 | input.replace("_", "-") 57 | } 58 | 59 | pub fn normalize_name(name: &str) -> String { 60 | name.to_lowercase() 61 | .replace("-", "_") 62 | .replace("/", "") 63 | .replace(".", "_") 64 | .replace(" ", "_") 65 | } 66 | 67 | pub fn torb_path() -> std::path::PathBuf { 68 | let home_dir = dirs::home_dir().unwrap(); 69 | home_dir.join(TORB_PATH) 70 | } 71 | 72 | pub fn buildstate_path_or_create() -> std::path::PathBuf { 73 | let current_dir = std::env::current_dir().unwrap(); 74 | let current_dir_state_dir = current_dir.join(".torb_buildstate"); 75 | 76 | if current_dir_state_dir.exists() { 77 | current_dir_state_dir 78 | } else { 79 | std::fs::create_dir_all(¤t_dir_state_dir).unwrap(); 80 | current_dir_state_dir 81 | } 82 | } 83 | 84 | pub fn for_each_artifact_repository( 85 | mut closure: Box () + '_>, 86 | ) -> Result<(), Box> { 87 | let path = torb_path(); 88 | let repo_path = path.join("repositories"); 89 | 90 | let repos = std::fs::read_dir(&repo_path)?; 91 | 92 | for repo_res in repos { 93 | let repo = repo_res?; 94 | 95 | closure(repo_path.clone(), repo); 96 | } 97 | 98 | Ok(()) 99 | } 100 | 101 | pub fn run_command_in_user_shell( 102 | command_str: String, 103 | shell_override: Option, 104 | ) -> Result> { 105 | let shell = match shell_override { 106 | Some(sh) => sh, 107 | None => std::env::var("SHELL").unwrap(), 108 | }; 109 | 110 | let shell_args = vec!["-c".to_string(), command_str.to_string()]; 111 | 112 | let mut command = std::process::Command::new(shell.clone()); 113 | command.args(shell_args); 114 | 115 | let output = command.output()?; 116 | 117 | if output.status.success() { 118 | Ok(output) 119 | } else { 120 | Err(Box::new(TorbUtilityErrors::UnableToRunCommandInShell { 121 | command: command_str.to_string(), 122 | shell: shell, 123 | reason: String::from_utf8(output.stderr).unwrap(), 124 | })) 125 | } 126 | } 127 | 128 | pub fn checksum(data: String, original_hash: String) -> bool { 129 | let hash = Sha256::digest(data.as_bytes()); 130 | let hash_base32 = BASE32.encode(&hash); 131 | 132 | println!("hash: {}", hash_base32); 133 | println!("original_hash: {}", original_hash); 134 | 135 | hash_base32 == original_hash 136 | } 137 | 138 | pub struct CommandPipeline { 139 | commands: Vec, 140 | } 141 | 142 | #[derive(Debug, Clone)] 143 | pub struct CommandConfig<'a> { 144 | command: &'a str, 145 | args: Vec<&'a str>, 146 | working_dir: Option<&'a str>, 147 | } 148 | 149 | impl<'a> CommandConfig<'a> { 150 | pub fn new( 151 | command: &'a str, 152 | args: Vec<&'a str>, 153 | working_dir: Option<&'a str>, 154 | ) -> CommandConfig<'a> { 155 | CommandConfig { 156 | command: command, 157 | args: args, 158 | working_dir: working_dir, 159 | } 160 | } 161 | } 162 | 163 | impl CommandPipeline { 164 | pub fn new(commands: Option>) -> Self { 165 | let new_commands = commands 166 | .unwrap_or(Vec::new()) 167 | .iter() 168 | .map(|conf| { 169 | let mut command = Command::new(conf.command); 170 | 171 | conf.args.iter().for_each(|arg| { 172 | command.arg(arg); 173 | }); 174 | 175 | if conf.working_dir.is_some() { 176 | command.current_dir(conf.working_dir.unwrap()); 177 | }; 178 | 179 | command 180 | }) 181 | .collect(); 182 | 183 | CommandPipeline { 184 | commands: new_commands, 185 | } 186 | } 187 | 188 | pub fn execute_single(conf: CommandConfig) -> Result> { 189 | let mut command = Command::new(conf.command); 190 | 191 | conf.args.iter().for_each(|arg| { 192 | command.arg(arg); 193 | }); 194 | 195 | if conf.working_dir.is_some() { 196 | command.current_dir(conf.working_dir.unwrap()); 197 | }; 198 | 199 | CommandPipeline::run_command(&mut command) 200 | } 201 | 202 | pub fn execute(&mut self) -> Result, Box> { 203 | let outputs: Result, Box> = self 204 | .commands 205 | .iter_mut() 206 | .map(CommandPipeline::run_command) 207 | .collect(); 208 | 209 | outputs 210 | } 211 | 212 | fn run_command(command: &mut Command) -> Result> { 213 | let output = command.output()?; 214 | 215 | if output.status.success() { 216 | Ok(output) 217 | } else { 218 | Err(Box::new(TorbUtilityErrors::UnableToRunCommand { 219 | command: format!("{:?}", command), 220 | reason: String::from_utf8(output.stderr).unwrap(), 221 | })) 222 | } 223 | } 224 | } 225 | 226 | pub enum ResourceKind { 227 | StatefulSet, 228 | DaemonSet, 229 | Deployment, 230 | } 231 | 232 | pub fn get_resource_kind( 233 | name: &String, 234 | namespace: &str, 235 | ) -> Result> { 236 | let conf = CommandConfig::new( 237 | "kubectl", 238 | vec![ 239 | "get", 240 | "deploy,statefulset,daemonset", 241 | "-n", 242 | namespace, 243 | "-o=json", 244 | ], 245 | None, 246 | ); 247 | 248 | let mut cmd = CommandPipeline::new(Some(vec![conf])); 249 | 250 | let out = cmd.execute()?; 251 | 252 | let stdout = String::from_utf8(out[0].stdout.clone())?; 253 | 254 | let value: serde_json::Value = serde_json::from_str(&stdout)?; 255 | 256 | let json = value.as_object().unwrap(); 257 | 258 | let items = json.get("items").unwrap().as_array().unwrap(); 259 | 260 | let mut res: Result> = 261 | Err(Box::new(TorbUtilityErrors::ResourceNotFound {})); 262 | 263 | for item in items.iter().cloned() { 264 | let item_name = item["metadata"]["name"].as_str().unwrap(); 265 | let kind = item["kind"].as_str().unwrap(); 266 | 267 | if name == item_name { 268 | res = match kind { 269 | "Deployment" => Ok(ResourceKind::Deployment), 270 | "DaemonSet" => Ok(ResourceKind::DaemonSet), 271 | "StatefulSet" => Ok(ResourceKind::StatefulSet), 272 | _ => Err(Box::new(TorbUtilityErrors::UnsupportedKind {})), 273 | }; 274 | } 275 | } 276 | 277 | res 278 | } 279 | 280 | #[derive(Clone)] 281 | pub struct PrettyContext<'a> { 282 | success_marquee_msg: Option<&'a str>, 283 | error_marquee_msg: Option<&'a str>, 284 | warning: Option<&'a str>, 285 | error_context: &'a str, 286 | suggestions: Vec<&'a str>, 287 | } 288 | 289 | impl<'a> Default for PrettyContext<'a> { 290 | fn default() -> PrettyContext<'a> { 291 | PrettyContext { 292 | success_marquee_msg: None, 293 | error_marquee_msg: None, 294 | warning: None, 295 | error_context: "", 296 | suggestions: Vec::new(), 297 | } 298 | } 299 | } 300 | 301 | impl<'a> PrettyContext<'a> { 302 | pub fn success(&mut self, msg: &'a str) -> &mut Self { 303 | self.success_marquee_msg = Some(msg); 304 | 305 | self 306 | } 307 | pub fn error(&mut self, msg: &'a str) -> &mut Self { 308 | self.error_marquee_msg = Some(msg); 309 | 310 | self 311 | } 312 | pub fn context(&mut self, msg: &'a str) -> &mut Self { 313 | self.error_context = msg; 314 | 315 | self 316 | } 317 | pub fn suggestions(&mut self, msgs: Vec<&'a str>) -> &mut Self { 318 | self.suggestions = msgs; 319 | 320 | self 321 | } 322 | 323 | pub fn warn(&mut self, msg: &'a str) -> &mut Self { 324 | self.warning = Some(msg); 325 | 326 | self 327 | } 328 | 329 | pub fn pretty(&mut self) -> Self { 330 | self.clone() 331 | } 332 | } 333 | 334 | pub trait PrettyExit { 335 | fn use_or_pretty_exit(self, context: PrettyContext) -> T 336 | where 337 | E: Debug + Display; 338 | 339 | fn use_or_pretty_error(self, exit: bool, context: PrettyContext) -> Option 340 | where 341 | E: Debug + Display; 342 | 343 | fn use_or_pretty_warn_send(self, context: PrettyContext) -> Option 344 | where 345 | E: Send + Debug; 346 | 347 | fn use_or_pretty_warn(self, context: PrettyContext) -> Option 348 | where 349 | E: Debug + Display; 350 | 351 | fn display_success(&self, context: &PrettyContext); 352 | fn display_warning(&self, context: &PrettyContext); 353 | fn display_error(&self, context: &PrettyContext); 354 | fn display_context(&self, context: &PrettyContext); 355 | fn display_suggestions(&self, context: &PrettyContext); 356 | fn display_error_call_to_action(&self, context: &PrettyContext); 357 | } 358 | 359 | impl PrettyExit for Result { 360 | fn use_or_pretty_warn_send(self, context: PrettyContext) -> Option 361 | where 362 | E: Send + Debug, 363 | { 364 | match self.as_ref().err() { 365 | Some(err) => { 366 | self.display_warning(&context); 367 | let err_msg = format!("{:?}", err); 368 | println!("{}", err_msg.yellow()); 369 | self.display_context(&context); 370 | self.display_suggestions(&context); 371 | self.display_error_call_to_action(&context); 372 | None 373 | } 374 | None => { 375 | self.display_success(&context); 376 | Some(self.unwrap()) 377 | } 378 | } 379 | } 380 | 381 | fn use_or_pretty_warn(self, context: PrettyContext) -> Option 382 | where 383 | E: Debug + Display, 384 | { 385 | match self.as_ref().err() { 386 | Some(err) => { 387 | self.display_warning(&context); 388 | let err_msg = format!("{}", err); 389 | println!("{}", err_msg.yellow()); 390 | self.display_context(&context); 391 | self.display_suggestions(&context); 392 | self.display_error_call_to_action(&context); 393 | None 394 | } 395 | None => { 396 | self.display_success(&context); 397 | Some(self.unwrap()) 398 | } 399 | } 400 | } 401 | 402 | fn use_or_pretty_exit(self, context: PrettyContext) -> T 403 | where 404 | E: Debug + Display, 405 | { 406 | self.use_or_pretty_error(true, context).unwrap() 407 | } 408 | 409 | fn use_or_pretty_error(self, exit: bool, context: PrettyContext) -> Option 410 | where 411 | E: Debug + Display, 412 | { 413 | match self.as_ref().err() { 414 | Some(err) => { 415 | self.display_error(&context); 416 | let err_msg = format!("{}", err); 417 | println!("{}", err_msg.red()); 418 | self.display_context(&context); 419 | self.display_suggestions(&context); 420 | self.display_error_call_to_action(&context); 421 | 422 | if exit { 423 | std::process::exit(1); 424 | } else { 425 | None 426 | } 427 | } 428 | None => { 429 | self.display_success(&context); 430 | Some(self.unwrap()) 431 | } 432 | } 433 | } 434 | 435 | fn display_success(&self, context: &PrettyContext) { 436 | if context.success_marquee_msg.is_some() { 437 | println!("{}\n", context.success_marquee_msg.unwrap().bold().green()); 438 | }; 439 | } 440 | 441 | fn display_error(&self, context: &PrettyContext) { 442 | if context.error_marquee_msg.is_some() { 443 | println!("{}\n", context.error_marquee_msg.unwrap().bold().red()); 444 | } 445 | } 446 | 447 | fn display_warning(&self, context: &PrettyContext) { 448 | println!("{}\n", context.warning.unwrap().bold().yellow()); 449 | } 450 | 451 | fn display_context(&self, context: &PrettyContext) { 452 | println!("{}\n", context.error_context.bold().yellow()); 453 | } 454 | 455 | fn display_suggestions(&self, context: &PrettyContext) { 456 | println!("{}", "What can you do?".bold().yellow()); 457 | for suggestion in context.suggestions.iter() { 458 | println!("- {}", suggestion.bold().yellow()); 459 | } 460 | } 461 | 462 | fn display_error_call_to_action(&self, _context: &PrettyContext) { 463 | println!("\n{}", "After trying our suggestions, If this looks like something that should be reported to the maintainers\n\nYou can do so here:".bold()); 464 | println!("\n https://github.com/TorbFoundry/torb/issues/new \n"); 465 | } 466 | } 467 | -------------------------------------------------------------------------------- /cli/src/vcs.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use std::error::Error; 13 | use std::fs; 14 | use std::path::PathBuf; 15 | use std::process::Command; 16 | use thiserror::Error; 17 | use ureq::{AgentBuilder}; 18 | 19 | #[derive(Error, Debug)] 20 | pub enum TorbVCSErrors { 21 | #[error("Cannot create repo directory at: {path:?}, reason: {response:?}")] 22 | UnableToCreateLocalRepoDir { path: PathBuf, response: String }, 23 | #[error("Unable to init local git repo, reason: {response:?}")] 24 | UnableToInitLocalGitRepo { response: String }, 25 | #[error("Unable to sync remote repo, reason: {response:?}")] 26 | UnableToSyncRemoteRepo { response: String }, 27 | #[error("Unable to push to remote repo, reason: {response:?}")] 28 | UnableToPushToRemoteRepo { response: String }, 29 | #[error("Unable to push to init readme, reason: {response:?}")] 30 | UnableToInitReadme { response: String }, 31 | } 32 | trait Or: Sized { 33 | fn or(self, other: Self) -> Self; 34 | } 35 | 36 | impl<'a> Or for &'a str { 37 | fn or(self, other: &'a str) -> &'a str { 38 | if self.is_empty() { other } else { self } 39 | } 40 | } 41 | mod private { 42 | use super::GithubVCS; 43 | 44 | pub trait Sealed {} 45 | impl Sealed for GithubVCS {} 46 | } 47 | 48 | pub trait GitVersionControlHelpers: private::Sealed { 49 | fn init_readme(&self) -> Result<(), TorbVCSErrors> { 50 | let repo_name = self.get_repo_name().unwrap().to_string(); 51 | let error_msg_ga_readme = "Failed to git add README.md"; 52 | let error_msg_commit_readme = "Failed to git commit README.md"; 53 | let cwd = self.get_cwd(); 54 | let readme_path = cwd.join("README.md"); 55 | let contents = format!("# {}", repo_name); 56 | 57 | fs::File::create(&readme_path).unwrap(); 58 | fs::write(&readme_path, contents).unwrap(); 59 | 60 | let git_add_readme = Command::new("git") 61 | .arg("add") 62 | .arg("./README.md") 63 | .current_dir(self.get_cwd()) 64 | .output() 65 | .expect(error_msg_ga_readme); 66 | 67 | Ok(git_add_readme).map(|output| { 68 | if !output.status.success() { 69 | Err(output) 70 | } else { 71 | Ok(()) 72 | } 73 | }).and_then(|_output| { 74 | let git_commit_readme = Command::new("git") 75 | .arg("commit") 76 | .arg("-m") 77 | .arg("Add README.md") 78 | .current_dir(self.get_cwd()) 79 | .output() 80 | .expect(error_msg_commit_readme); 81 | 82 | if !git_commit_readme.status.success() { 83 | Err(git_commit_readme.stderr) 84 | } else { 85 | Ok(()) 86 | } 87 | }).map_err(|err| { 88 | TorbVCSErrors::UnableToInitReadme { 89 | response: String::from_utf8(err).unwrap() 90 | } 91 | }) 92 | } 93 | 94 | fn add_remote_origin(&self) -> Result<(), TorbVCSErrors> { 95 | let repo_name = self.get_repo_name().unwrap().to_string(); 96 | let error_msg_remote = format!("Failed to add remote: {:?}", repo_name); 97 | let remote_repo = format!("{}:{}/{}", self.get_address(), self.get_user(), repo_name); 98 | println!("remote: {:?}", remote_repo.clone()); 99 | 100 | let git_remote_command = Command::new("git") 101 | .arg("remote") 102 | .arg("add") 103 | .arg("origin") 104 | .arg(remote_repo) 105 | .current_dir(self.get_cwd()) 106 | .output() 107 | .expect(&error_msg_remote); 108 | 109 | if !git_remote_command.status.success() { 110 | Err(TorbVCSErrors::UnableToInitLocalGitRepo { 111 | response: String::from_utf8(git_remote_command.stderr).unwrap(), 112 | }) 113 | } else { 114 | Ok(()) 115 | } 116 | } 117 | 118 | fn create_main_branch(&self) -> Result<(), TorbVCSErrors> { 119 | let error_msg_main = "Failed to sync main branch.".to_string(); 120 | let git_main_branch = Command::new("git") 121 | .arg("branch") 122 | .arg("-M") 123 | .arg("main") 124 | .current_dir(self.get_cwd()) 125 | .output() 126 | .expect(&error_msg_main); 127 | 128 | if !git_main_branch.status.success() { 129 | Err(TorbVCSErrors::UnableToSyncRemoteRepo { 130 | response: String::from_utf8(git_main_branch.stderr).unwrap(), 131 | }) 132 | } else { 133 | Ok(()) 134 | } 135 | } 136 | 137 | fn push_new_main(&self) -> Result<(), TorbVCSErrors> { 138 | let error_msg_push = "Failed to push to remote.".to_string(); 139 | let mut git_push_main = Command::new("git"); 140 | 141 | git_push_main 142 | .arg("push") 143 | .arg("-u") 144 | .arg("origin") 145 | .arg("main") 146 | .current_dir(self.get_cwd()); 147 | 148 | let res = git_push_main 149 | .output() 150 | .expect(&error_msg_push); 151 | 152 | if !res.status.success() { 153 | Err(TorbVCSErrors::UnableToPushToRemoteRepo { 154 | response: String::from_utf8(res.stderr).unwrap(), 155 | }) 156 | } else { 157 | Ok(()) 158 | } 159 | } 160 | 161 | fn get_cwd(&self) -> PathBuf; 162 | fn get_address(&self) -> String; 163 | fn get_user(&self) -> String; 164 | 165 | fn get_repo_name(&self) -> Option { 166 | let cwd = self.get_cwd(); 167 | 168 | let repo_name = cwd.file_name().unwrap().to_str(); 169 | 170 | match repo_name { 171 | Some(repo_name) => { 172 | Some(repo_name.to_string()) 173 | } 174 | None => { 175 | None 176 | } 177 | } 178 | } 179 | } 180 | 181 | pub trait GitVersionControl: GitVersionControlHelpers { 182 | fn create_remote_repo(&self) -> Result>; 183 | 184 | fn create_local_repo( 185 | &self 186 | ) -> Result> { 187 | let mkdir = Command::new("mkdir") 188 | .arg(self.get_cwd()) 189 | .output() 190 | .expect("Failed to create directory."); 191 | 192 | if mkdir.status.success() { 193 | let error_msg = format!("Failed to init git repo at path: {:?}", self.get_cwd()); 194 | let git_command = Command::new("git") 195 | .arg("init") 196 | .current_dir(self.get_cwd()) 197 | .output() 198 | .expect(&error_msg); 199 | 200 | if git_command.status.success() { 201 | if let Some(_remote) = self.get_repo_name() { 202 | self.init_readme() 203 | .and_then(|_arg| { 204 | self.add_remote_origin() 205 | }) 206 | .and_then(|_arg| { self.create_main_branch() }) 207 | .and_then(|_arg| { self.push_new_main() } )?; 208 | 209 | Ok(self.get_cwd().clone()) 210 | } else { 211 | Ok(self.get_cwd().clone()) 212 | } 213 | } else { 214 | Err(Box::new(TorbVCSErrors::UnableToCreateLocalRepoDir { 215 | path: self.get_cwd(), 216 | response: String::from_utf8(git_command.stderr).unwrap(), 217 | })) 218 | } 219 | } else { 220 | let err = TorbVCSErrors::UnableToInitLocalGitRepo { 221 | response: std::str::from_utf8(&mkdir.stderr)?.to_string(), 222 | }; 223 | 224 | Err(Box::new(err)) 225 | } 226 | } 227 | 228 | fn create_repo( 229 | &self, 230 | local_only: bool, 231 | ) -> Result<(PathBuf, String), Box> { 232 | if local_only { 233 | Ok((self.create_local_repo()?, "".to_string())) 234 | } else { 235 | let remote = self.create_remote_repo()?; 236 | 237 | Ok(( 238 | self.create_local_repo()?, 239 | remote, 240 | )) 241 | } 242 | } 243 | 244 | /* 245 | Ian: Generally setters and getters in Rust are non idiomatic and a bit of a smell, 246 | however traits don't allow us to enforce struct members, or reference them directly. 247 | 248 | The hack for this is to create methods that enforce the members you want. 249 | */ 250 | fn _get_api_token(&self) -> String; 251 | fn get_api_token(&self) -> String { 252 | self._get_api_token() 253 | } 254 | 255 | fn _get_user(&self) -> String; 256 | 257 | fn _get_address(&self) -> String; 258 | 259 | fn _get_cwd(&self) -> PathBuf; 260 | 261 | fn _set_cwd(&mut self, directory: PathBuf) -> PathBuf; 262 | fn set_cwd(&mut self, directory: PathBuf) -> PathBuf { 263 | self._set_cwd(directory) 264 | } 265 | } 266 | 267 | pub struct GithubVCS { 268 | api_token: String, 269 | user: String, 270 | agent: ureq::Agent, 271 | remote_address: String, 272 | cwd: PathBuf, 273 | } 274 | 275 | impl GitVersionControlHelpers for GithubVCS { 276 | fn get_user(&self) -> String { 277 | self._get_user() 278 | } 279 | 280 | fn get_address(&self) -> String { 281 | self._get_address() 282 | } 283 | 284 | fn get_cwd(&self) -> PathBuf { 285 | self._get_cwd() 286 | } 287 | } 288 | 289 | impl GitVersionControl for GithubVCS { 290 | fn create_remote_repo(&self) -> Result> { 291 | let name = self.get_repo_name().unwrap(); 292 | 293 | let token = self.get_api_token(); 294 | /* 295 | The amount of HTTP requests at the cli level should be fairly low and not take much time. 296 | With that consideration taking on the overhead of an async runtime which is a heavy dependency, 297 | and an async client with the changes to a rust project needed to typically support async does not 298 | seem like the right move to me. - Ian 299 | */ 300 | let req_string = format!("https://api.github.com/user/repos"); 301 | let req = self 302 | .agent 303 | .post(&req_string) 304 | .set("Authorization", &format!("Bearer {}", token)); 305 | 306 | let resp = req 307 | .send_json(ureq::json!({ 308 | "name": name, 309 | "private": true, 310 | "auto_init": false 311 | }))? 312 | .into_string()?; 313 | 314 | Ok(resp) 315 | } 316 | 317 | fn _get_api_token(&self) -> String { 318 | self.api_token.clone() 319 | } 320 | 321 | fn _get_user(&self) -> String { 322 | self.user.clone() 323 | } 324 | 325 | fn _get_address(&self) -> String { 326 | self.remote_address.clone() 327 | } 328 | 329 | fn _get_cwd(&self) -> PathBuf { 330 | self.cwd.clone() 331 | } 332 | 333 | fn _set_cwd(&mut self, directory: PathBuf) -> PathBuf { 334 | self.cwd = directory; 335 | 336 | self.cwd.clone() 337 | } 338 | } 339 | 340 | impl GithubVCS { 341 | pub fn new(api_token: String, user: String) -> GithubVCS { 342 | let agent = AgentBuilder::new().build(); 343 | 344 | GithubVCS { 345 | api_token: api_token, 346 | user: user, 347 | agent: agent, 348 | remote_address: "git@github.com".to_string(), 349 | cwd: PathBuf::new(), 350 | } 351 | } 352 | } 353 | -------------------------------------------------------------------------------- /cli/src/watcher.rs: -------------------------------------------------------------------------------- 1 | // Business Source License 1.1 2 | // Licensor: Torb Foundry 3 | // Licensed Work: Torb v0.3.7-03.23 4 | // The Licensed Work is © 2023-Present Torb Foundry 5 | // 6 | // Change License: GNU Affero General Public License Version 3 7 | // Additional Use Grant: None 8 | // Change Date: Feb 22, 2023 9 | // 10 | // See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | 12 | use crate::artifacts::{write_build_file, ArtifactRepr}; 13 | use crate::builder::StackBuilder; 14 | // use crate::deployer::StackDeployer; 15 | use crate::composer::Composer; 16 | use crate::deployer::StackDeployer; 17 | use crate::utils::buildstate_path_or_create; 18 | use crate::utils::{ 19 | get_resource_kind, CommandConfig, CommandPipeline, PrettyContext, PrettyExit, ResourceKind, 20 | }; 21 | 22 | use std::collections::HashSet; 23 | use std::sync::{Arc, Mutex, MutexGuard}; 24 | use std::{sync::PoisonError, time::Duration}; 25 | use indexmap::IndexMap; 26 | use tokio::{ 27 | runtime::Runtime, 28 | sync::mpsc::{channel, Receiver}, 29 | time, 30 | }; 31 | 32 | use notify::{Config, Event, RecommendedWatcher, RecursiveMode, Watcher as NotifyWatcher}; 33 | use serde::{Deserialize, Serialize}; 34 | use std::path::PathBuf; 35 | 36 | #[derive(Serialize, Deserialize, Clone, Debug)] 37 | pub struct WatcherConfig { 38 | paths: Vec, 39 | interval: u64, 40 | patch: bool, 41 | exempt: Vec, 42 | dev_mounts: IndexMap> 43 | } 44 | 45 | impl Default for WatcherConfig { 46 | fn default() -> WatcherConfig { 47 | WatcherConfig { 48 | paths: vec!["./".to_string()], 49 | interval: 3000, 50 | patch: true, 51 | exempt: vec![], 52 | dev_mounts: IndexMap::new() 53 | } 54 | } 55 | } 56 | 57 | pub struct Watcher { 58 | pub paths: Vec, 59 | pub interval: u64, 60 | pub patch: bool, 61 | pub artifact: Arc, 62 | pub build_hash: String, 63 | pub build_filename: String, 64 | pub dev_mounts: IndexMap>, 65 | internal: Arc, 66 | } 67 | 68 | struct WatcherInternal { 69 | pub queue: Mutex>, 70 | pub separate_local_registry: bool, 71 | pub exempt: Vec, 72 | pub exempt_set: HashSet, 73 | } 74 | 75 | impl WatcherInternal { 76 | fn new(separate_local_registry: bool, exempt: Vec) -> Self { 77 | WatcherInternal { 78 | queue: Mutex::new(Vec::::new()), 79 | separate_local_registry, 80 | exempt_set: HashSet::from_iter(exempt.iter().cloned()), 81 | exempt: exempt, 82 | } 83 | } 84 | fn redeploy( 85 | &self, 86 | artifact: Arc, 87 | ) -> Result<(), PoisonError>>> { 88 | self.queue.lock().map(|mut queue| { 89 | if !queue.is_empty() { 90 | println!("Changes found during watcher interval, redeploying!"); 91 | 92 | queue.clear(); 93 | queue.shrink_to(10); 94 | 95 | let build_platforms = "".to_string(); 96 | 97 | let mut builder = StackBuilder::new_with_exempt_list(&artifact, build_platforms, false, self.separate_local_registry.clone(), self.exempt.clone()); 98 | 99 | builder.build().use_or_pretty_error( 100 | false, 101 | PrettyContext::default() 102 | .success("Success! Watcher rebuilt stack.") 103 | .error("Oh no! The Watcher failed to rebuild the stack. Continuing to watch, please fix your errors.") 104 | .pretty() 105 | ); 106 | 107 | for (_, node) in artifact.nodes.iter() { 108 | if self.exempt_set.get(&node.fqn).is_some() { 109 | continue 110 | }; 111 | 112 | let resource_name = format!("{}-{}", artifact.release(), node.display_name(true)); 113 | 114 | let namespace = artifact.namespace(node); 115 | let kind_res = get_resource_kind(&resource_name, &namespace); 116 | 117 | let kind = match kind_res { 118 | Err(err) => { 119 | panic!("{}", err) 120 | } 121 | Ok(_enum) => { 122 | match _enum { 123 | ResourceKind::DaemonSet => "daemonset", 124 | ResourceKind::Deployment => "deployment", 125 | ResourceKind::StatefulSet => "statefulset" 126 | } 127 | } 128 | }; 129 | 130 | let cmd = CommandConfig::new("kubectl", 131 | vec![ 132 | "rollout", 133 | "restart", 134 | kind, 135 | resource_name.as_str(), 136 | "--namespace", 137 | &namespace 138 | ], 139 | None 140 | ); 141 | let err_msg = format!("Unable to execute rollout redeploy for {} {}", kind, resource_name); 142 | CommandPipeline::execute_single(cmd).expect(&err_msg); 143 | } 144 | 145 | } 146 | }) 147 | } 148 | } 149 | 150 | impl Watcher { 151 | pub fn configure(file_path: String, local_registry: bool) -> Self { 152 | let contents = std::fs::read_to_string(file_path) 153 | .expect("Something went wrong reading the stack file."); 154 | 155 | let location = std::path::Path::new("/tmp").to_path_buf(); 156 | 157 | let (build_hash, build_filename, artifact) = write_build_file(contents, Some(&location)); 158 | let watcher = artifact.watcher.clone(); 159 | 160 | Watcher::new( 161 | watcher.paths, 162 | artifact, 163 | Some(watcher.interval), 164 | Some(watcher.patch), 165 | local_registry, 166 | build_hash, 167 | build_filename, 168 | watcher.exempt, 169 | watcher.dev_mounts 170 | ) 171 | } 172 | 173 | fn new( 174 | paths: Vec, 175 | artifact: ArtifactRepr, 176 | interval: Option, 177 | patch: Option, 178 | local_registry: bool, 179 | build_hash: String, 180 | build_filename: String, 181 | exempt: Vec, 182 | mounts: IndexMap> 183 | ) -> Self { 184 | let interval = interval.unwrap_or(3000); 185 | let patch = patch.unwrap_or(true); 186 | let mut bufs = Vec::new(); 187 | 188 | for str in paths.iter() { 189 | let p = PathBuf::from(str); 190 | bufs.push(p); 191 | } 192 | 193 | let internal = Arc::new(WatcherInternal::new(local_registry, exempt)); 194 | 195 | Watcher { 196 | paths: bufs, 197 | interval, 198 | patch, 199 | artifact: Arc::new(artifact), 200 | build_hash, 201 | build_filename, 202 | dev_mounts: mounts, 203 | internal, 204 | } 205 | } 206 | 207 | fn setup_stack(&mut self) { 208 | let build_platforms = "".to_string(); 209 | 210 | let mut builder = StackBuilder::new( 211 | &self.artifact, 212 | build_platforms, 213 | false, 214 | self.internal.separate_local_registry.clone(), 215 | ); 216 | 217 | builder.build().use_or_pretty_exit( 218 | PrettyContext::default() 219 | .error("Oh no, we were unable to build the stack when starting the watcher!") 220 | .success("Success! Stack has been built!") 221 | .context("Errors here are typically because of a failed docker build, syntax issue in the dockerfile or a connectivity issue with the docker registry.") 222 | .suggestions(vec![ 223 | "Check that your dockerfile has no syntax errors and is otherwise correct.", 224 | "If you're building with an image registry that is hosted on the same machine, but as a separate service and not the default docker registry, try passing --local-hosted-registry as a flag." 225 | ]) 226 | .pretty() 227 | ); 228 | 229 | let mut composer = 230 | Composer::new_with_dev_mounts(self.build_hash.clone(), &self.artifact, self.patch.clone(), self.dev_mounts.clone()); 231 | composer.compose().unwrap(); 232 | 233 | let mut deployer = StackDeployer::new(self.patch.clone()); 234 | 235 | deployer 236 | .deploy(&self.artifact, false) 237 | .use_or_pretty_exit( 238 | PrettyContext::default() 239 | .error("Oh no, we were unable to deploy the stack when starting the watcher!") 240 | .success("Success! Stack has been deployed!") 241 | .context("Errors here are typically because of failed Terraform deployments or Helm failures.") 242 | .suggestions(vec![ 243 | "Check that your Terraform IaC environment was generated correctly. \nThis can be found in your project folder at, .torb_buildstate/iac_environment, or .torb_buildstate/watcher_iac_environment if you're using the watcher.", 244 | "To see if your Helm deployment failed you can do `helm ls --namespace ` where the namespace is the one you're deploying to.", 245 | "After seeing if the deployment has failed in Helm, you can use kubectl to debug further. Take a look at https://kubernetes.io/docs/reference/kubectl/cheatsheet/ if you're less familiar with kubectl." 246 | ]) 247 | .pretty() 248 | ); 249 | 250 | let buildstate_path = buildstate_path_or_create(); 251 | let non_watcher_iac = buildstate_path.join("iac_environment"); 252 | let watcher_iac = buildstate_path.join("watcher_iac_environment"); 253 | let tf_state_path = watcher_iac.join("terraform.tfstate"); 254 | 255 | if tf_state_path.exists() { 256 | let new_path = non_watcher_iac.join("terraform.tfstate"); 257 | std::fs::copy(tf_state_path, new_path).expect("Failed to copy supporting build file."); 258 | }; 259 | } 260 | 261 | pub fn start(mut self) { 262 | self.setup_stack(); 263 | 264 | let rt = Runtime::new().unwrap(); 265 | let interval = self.interval.clone(); 266 | 267 | let internal_ref = self.internal.clone(); 268 | let artifact_ref = self.artifact.clone(); 269 | rt.spawn(async move { 270 | let mut interval = time::interval(Duration::from_millis(interval.to_owned())); 271 | loop { 272 | interval.tick().await; 273 | internal_ref 274 | .redeploy(artifact_ref.clone()) 275 | .expect("Unable to complete redeploy!"); 276 | } 277 | }); 278 | 279 | rt.block_on(async { 280 | if let Err(e) = self.watch().await { 281 | println!("error: {:?}", e) 282 | } 283 | }); 284 | 285 | rt.shutdown_timeout(Duration::from_millis(2000)) 286 | } 287 | 288 | async fn watch(&mut self) -> notify::Result<()> { 289 | let (mut watcher, mut rx) = self.async_watcher()?; 290 | 291 | for path in self.paths.iter() { 292 | println!("Watching: {}", path.to_str().unwrap()); 293 | watcher.watch(&path, RecursiveMode::Recursive)?; 294 | } 295 | 296 | while let Some(res) = rx.recv().await { 297 | match res { 298 | Ok(event) => self.internal.queue.lock()?.push(event), 299 | Err(e) => panic!("{}", e), 300 | } 301 | } 302 | 303 | Ok(()) 304 | } 305 | 306 | fn async_watcher( 307 | &self, 308 | ) -> notify::Result<(RecommendedWatcher, Receiver>)> { 309 | let (tx, rx) = channel(1); 310 | 311 | let watcher = RecommendedWatcher::new( 312 | move |res| { 313 | let rt = Runtime::new().unwrap(); 314 | 315 | rt.block_on(async { 316 | tx.send(res).await.unwrap(); 317 | }) 318 | }, 319 | Config::default(), 320 | )?; 321 | 322 | Ok((watcher, rx)) 323 | } 324 | } 325 | -------------------------------------------------------------------------------- /license_header.txt: -------------------------------------------------------------------------------- 1 | Business Source License 1.1 2 | Licensor: Torb Foundry 3 | Licensed Work: Torb v0.3.7-03.23 4 | The Licensed Work is © 2023-Present Torb Foundry 5 | 6 | Change License: GNU Affero General Public License Version 3 7 | Additional Use Grant: None 8 | Change Date: Feb 22, 2024 9 | 10 | See LICENSE file at https://github.com/TorbFoundry/torb/blob/main/LICENSE for details. 11 | --------------------------------------------------------------------------------