├── .github
├── workflows-in-construction
│ └── release.yml
└── workflows
│ └── documentation.yml
├── .gitignore
├── LICENSE
├── README.md
├── SECURITY.md
├── docs
├── assets
│ ├── diagrams
│ │ └── Orthweb.drawio
│ └── images
│ │ ├── AppTraffic0.png
│ │ ├── AppTraffic1.png
│ │ ├── AppTraffic2.png
│ │ ├── Orthweb.png
│ │ ├── Overview.png
│ │ ├── dicom-proxy.png
│ │ ├── orthanc_logo.png
│ │ ├── private-connection.png
│ │ └── yorku-logo.jpg
├── deployment
│ ├── configuration.md
│ ├── infrastructure.md
│ └── preparation.md
├── design
│ ├── configmgmt.md
│ ├── deviceconnectivity.md
│ ├── infrastructure.md
│ └── ingress.md
├── index.md
├── introduction
│ └── index.md
├── support
│ └── index.md
└── validation
│ ├── additional.md
│ ├── advanced.md
│ └── basic.md
├── mkdocs.yml
└── terraform
├── .gitignore
├── README.md
├── main.tf
├── modules
├── client-vpn
│ ├── main.tf
│ └── variables.tf
├── database
│ ├── main.tf
│ ├── output.tf
│ └── variables.tf
├── ec2
│ ├── main.tf
│ ├── output.tf
│ ├── userdata1.sh
│ ├── userdata2.tpl
│ └── variables.tf
├── key
│ ├── main.tf
│ ├── output.tf
│ └── variables.tf
├── network
│ ├── main.tf
│ ├── output.tf
│ └── variables.tf
└── storage
│ ├── main.tf
│ ├── output.tf
│ └── variables.tf
├── out
└── .gitkeep
├── output.tf
├── provider.tf
├── terraform.tfvars
└── variables.tf
/.github/workflows-in-construction/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*' # Triggers only on tags starting with 'v' (e.g., v1.2.3)
7 |
8 | jobs:
9 | release:
10 | name: Create GitHub Release
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: Checkout repository
15 | uses: actions/checkout@v4
16 |
17 | - name: Extract version from tag
18 | id: get_version
19 | run: echo "VERSION=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV
20 |
21 | - name: Find previous tag
22 | id: get_previous_tag
23 | run: |
24 | PREV_TAG=$(git describe --tags --abbrev=0 $(git rev-list --tags --skip=1 --max-count=1) 2>/dev/null || echo "None")
25 | echo "PREVIOUS_TAG=$PREV_TAG" >> $GITHUB_ENV
26 |
27 | - name: Generate changelog
28 | run: |
29 | echo "## Changes in v${VERSION}" > release_notes.txt
30 |
31 | if [ "$PREVIOUS_TAG" = "None" ]; then
32 | echo "First release - showing full commit history:" >> release_notes.txt
33 | git log --pretty=format:"- %h %s" >> release_notes.txt
34 | else
35 | echo "Changes since $PREVIOUS_TAG:" >> release_notes.txt
36 | git log $PREVIOUS_TAG..HEAD --pretty=format:"- %h %s" >> release_notes.txt
37 | fi
38 |
39 | - name: Display changelog (for debugging)
40 | run: cat release_notes.txt
41 |
42 | - name: Create GitHub Release
43 | uses: softprops/action-gh-release@v2
44 | with:
45 | tag_name: v${{ env.VERSION }}
46 | name: Release v${{ env.VERSION }}
47 | body_path: release_notes.txt # ✅ Uses generated commit history
48 | draft: false
49 | prerelease: false
--------------------------------------------------------------------------------
/.github/workflows/documentation.yml:
--------------------------------------------------------------------------------
1 | name: ci
2 | on:
3 | push:
4 | branches:
5 | - main
6 | permissions:
7 | contents: write
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v3
13 | - uses: actions/setup-python@v4
14 | with:
15 | python-version: 3.x
16 | - uses: actions/cache@v2
17 | with:
18 | key: ${{ github.ref }}
19 | path: .cache
20 | - run: |
21 | pip install mkdocs-material
22 | pip install mkdocs-include-markdown-plugin
23 | - run: mkdocs gh-deploy --force
24 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | terraform.tfstate.backup
3 | terraform.tfstate.*
4 | terraform.tfstate
5 | *.drawio.bkp
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Orthweb - Orthanc Solution on AWS
2 |
3 |
4 |
5 | [](https://aws.amazon.com/amazon-linux-2)
6 | [](https://www.docker.com/)
7 | [](https://www.postgresql.org/)
8 | [](https://nginx.org/en/index.html)
9 | [](https://www.keycloak.org/)
10 | [](https://opensource.org/licenses/Apache-2.0)
11 |
12 | [](https://www.terraform.io/)
13 | [](https://aws.amazon.com/ec2/)
14 | [](https://aws.amazon.com/s3/)
15 | [](https://aws.amazon.com/rds/postgresql/)
16 |
17 | ## Overview
18 |
19 | The **[Orthweb](https://github.com/digihunch/orthweb)** project automates the creation of a cloud-based mini-[PACS](https://en.wikipedia.org/wiki/Picture_archiving_and_communication_system) based on **[Orthanc](https://www.orthanc-server.com/)** and Amazon Web Services (AWS). The project artifact addresses the cloud foundation and configuration management, and enables adopters to host the Orthanc software as a service ([SaaS](https://en.wikipedia.org/wiki/Software_as_a_service)). To get started, follow the [documentation](https://digihunch.github.io/orthweb/). 💪 Let's automate medical imaging!
20 |
21 | Imaging systems handling sensitive data must operate on secure platforms. Typically, large organizations dedicate specialized IT resources to build their enterprise-scale cloud foundations. This cloud foundation, also known as a [landing zone](https://www.digihunch.com/2022/12/landing-zone-in-aws/), addresses security and scalability. Each line of business of the large organization is allocated with a segment (e.g. an VPC) in the landing zone, to deploy their own applications.
22 |
23 | However, many Orthanc adopters are small teams without overarching cloud strategies from their parent organizations. They are startups, research groups, independent clinics, and so on. To leverage Orthanc capabilities, they need simple cloud foundations that are equally secure and scalable. To close this gap, we proposed and implemnted a cloud-based Orthanc solution: the [**Orthweb** project](https://www.digihunch.com/2020/11/medical-imaging-web-server-deployment-pipeline/).
24 |
25 | 
26 |
27 | To build the foundation fast, **Orthweb** project uses **Terraform** template (an [infrastructure-as-code](https://en.wikipedia.org/wiki/Infrastructure_as_code) technology) to provision a self-contained infrastrcture stack in a single AWS account, without relying upon established network infrastructure. The infrastructure layer provisioned in this project contains a single VPC with multiple subnets, along with optional VPC endpoints. The infrastructure layer also contains encryption keys, managed database service and S3 storage.
28 |
29 | The **Orthweb** project also streamlines the configuration of Orthanc solution, by proposing a paradign for Orthanc configuration management. The project leverages cloud-init [user data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and `makefile` to configure the servers during the server's initialization process. The artifact to install **Orthanc** is stored in a separate repository for adopters to fork and customize. The [orthanc-config](https://github.com/digihunchinc/orthanc-config) repository is a good example.
30 |
31 |
32 | The project orchestrates the application containers with Docker daemon on EC2 instances. Technical users can expect to build a cloud-based mini-PACS in one hour with rich feature, scalability and security. For those considering hosting Orthanc on Kubernetes, check out the sister project [Korthweb](https://github.com/digihunch/korthweb).
33 |
34 | ## Partners
35 |
36 |
37 | **[York MRI Facility](https://mri.info.yorku.ca/)**
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | ## Disclaimer
2 |
3 | Despite the small infrastructure footprint, the maintainer for **Orthweb** project strives to ensure that the configuration is aligned with common compliance frameworks, such as:
4 |
5 | - Health Insurance Portability and Accountability Act (HIPAA)
6 | - International Organization for Standardization (ISO) 27001 and ISO 27002
7 |
8 | However, it is important to understand that the regulatory auditing is the responsibility of the project adopter. AWS Config and Security Hub are very helpful services for compliance review. You may directly contact [Digi Hunch](mailto:digihunch@gmail.com) for security issues.
9 |
--------------------------------------------------------------------------------
/docs/assets/images/AppTraffic0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/AppTraffic0.png
--------------------------------------------------------------------------------
/docs/assets/images/AppTraffic1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/AppTraffic1.png
--------------------------------------------------------------------------------
/docs/assets/images/AppTraffic2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/AppTraffic2.png
--------------------------------------------------------------------------------
/docs/assets/images/Orthweb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/Orthweb.png
--------------------------------------------------------------------------------
/docs/assets/images/Overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/Overview.png
--------------------------------------------------------------------------------
/docs/assets/images/dicom-proxy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/dicom-proxy.png
--------------------------------------------------------------------------------
/docs/assets/images/orthanc_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/orthanc_logo.png
--------------------------------------------------------------------------------
/docs/assets/images/private-connection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/private-connection.png
--------------------------------------------------------------------------------
/docs/assets/images/yorku-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/docs/assets/images/yorku-logo.jpg
--------------------------------------------------------------------------------
/docs/deployment/configuration.md:
--------------------------------------------------------------------------------
1 | When an EC2 instance is launched, the cloud-init process executes a script as defined in user-data on the first boot. This initializes the configuration management.
2 |
3 | ## Pattern
4 |
5 | The main steps of the user data script is illustrated below.
6 |
7 |
8 | ```mermaid
9 | graph LR
10 | subgraph OuterRectangle["EC2 Instance"]
11 | subgraph MiddleRectangle["Cloud Init"]
12 | InnerRectangle1["1 git clone"]
13 | InnerRectangle2["2 update parameter"]
14 | InnerRectangle3["3 execute command"]
15 | end
16 | end
17 | InnerRectangle1 --> |deployment_options
ConfigRepo| Repo["orthanc-config repository"]
18 | InnerRectangle3 --> |deployment_options
InitCommand| Command["cd orthanc-config
&& make aws"]
19 | ```
20 |
21 |
22 | First, the script pulls the configuration repo as specified to a local directory. Then it updates parameter file in the repo. Last, the scripts executes the command given in the variable, from the directory. They are configured in the Terraform variable `deployment_options`.
23 |
24 |
25 | ## Bootstrapping
26 |
27 | The configuration management repo is [orthanc-config](https://github.com/digihunchinc/orthanc-config). Review the `README.md` file in the [repo](https://github.com/digihunchinc/orthanc-config) for details. In short, the repo suits multiple environments. For example, on AWS EC2, the initial command is to `make aws`. Under the hood, the `makefile` is the key to orchestrate the configuration activities in shell commands. Such activities include:
28 |
29 | * Updating configuration file for Orthanc (e.g. S3 and database connectivity string)
30 | * Checking dependencies on EC2 instance
31 | * Building self-signed certificates
32 | * Initializing database in RDS for Keycloak
33 |
34 | The `makefile` takes environment variables set from `.env` file, which looks like this:
35 |
36 | ```sh
37 | {% include "https://raw.githubusercontent.com/digihunchinc/orthanc-config/refs/heads/main/.env" %}
38 | ```
39 | Some entries in this file are configured based on the Terraform templates input variable.
40 |
41 | ## Makefile
42 | Here is what the `makefile` looks like to implement those activities:
43 |
44 | ```sh
45 | {% include "https://raw.githubusercontent.com/digihunchinc/orthanc-config/refs/heads/main/makefile" %}
46 | ```
47 |
48 | By default, we use the cloud-init script invoke the make command.To watch for the log output and errors from during cloud-init, check the cloud init log file (/var/log/cloud-init-output.log) on the EC2 instance. For convenience, the output from the command also prints the next steps to finish the configuration.
49 |
50 | A critical output of the `makefile` driven automation is the `docker-compose.yaml` file from the template, which allows user to start the system with docker compose command from the working directory.
--------------------------------------------------------------------------------
/docs/deployment/infrastructure.md:
--------------------------------------------------------------------------------
1 |
2 | ## Overview
3 | Since we execute Terraform commands locally to drive the deployment, we also store Terraform states locally. Advanced Terraform users may choose to managed Terraform platform such as HCP Terraform(previously known as Terraform Cloud) which is beyond the scope of this document.
4 |
5 | Now we can start deploying Orthanc. From your command terminal, go to the [`terraform`](https://github.com/digihunch/orthweb/tree/main/terraform) directory, and run `terraform` commands from this directory.
6 |
7 | ## Terraform Init
8 | First, initialize terraform template with this command:
9 | ```sh
10 | terraform init
11 | ```
12 | The command initializes Terraform template, including pulling external modules and download providers. Successful initialization should report the following line:
13 |
14 | ```
15 | Terraform has been successfully initialized!
16 | ```
17 |
18 | After initialization, terraform creates `.terraform` directory to store the pulled modules and providers.
19 |
20 | ## Adjust Variables
21 |
22 | There are several ways to declare [input variable](https://developer.hashicorp.com/terraform/language/values/variables) in Terraform. In this solution, we use `terraform.tfvars` file in the terraform working directory. The [file](https://github.com/digihunch/orthweb/blob/main/terraform/terraform.tfvars) is loaded with functional input variables. Users should review the variables and adjust accordingly. Here is a sample of the `terraform.tfvars` file:
23 |
24 | ```
25 | {% include "https://raw.githubusercontent.com/digihunch/orthweb/refs/heads/main/terraform/terraform.tfvars" %}
26 | ```
27 |
28 | To determin the variable values, some decisions to make are:
29 |
30 | - Value of provider tag and site name
31 | - Size of EC2 instance and public key
32 | - Number of availability zones, CIDR for the VPC and subnet sizing
33 | - CIDR blocks of the web and dicom client to whitelist
34 | - Whether or not to ship docker log to Cloud Watch and retention period
35 |
36 | In most cases, users at least need to update the provider tag and site name. Read the full document for input variables [here](https://github.com/digihunch/orthweb/blob/main/terraform/README.md).
37 |
38 | ## Terraform Plan
39 | We plan the deployment with command:
40 |
41 | ```sh
42 | terraform plan
43 | ```
44 |
45 | The is command projects the changes that will be applied to AWS. It will print out the resources and what changes Terraform will make.
46 |
47 | If you're running this command for the first time, Terraform will flag all resources as to be created. If you're running the command with a change of Terraform template, it will only mark the prospective changes.
48 |
49 | At the end of the result, it will summarize the actions to take, for example:
50 | ```
51 | Plan: 54 to add, 0 to change, 0 to destroy.
52 | ```
53 | If the plan fails, check the code and state file. The number of resources to add depends on the specific input variables and whether the solution has been previously deployed.
54 |
55 | ## Terraform Apply
56 |
57 | If the plan looks good, we can apply the deployment plan:
58 | ```
59 | terraform apply
60 | ```
61 | Then, you need to say `yes` to the prompt. Terraform kicks off the deployment.
62 |
63 | During deployment, Terraform provider interacts with your AWS account to provision the resources. Some critical resources takes much longer than others due to sheer size. For example, the database alone takes 15 minutes. The EC2 instances also takes a few minutes because of the bootstrapping process that configures Orthanc application. The entire deployment process can take as long as 30 minutes. To fask track the progress, you parrallelize the deployment with flags such as `-parallelism=3`.
64 |
65 | ## Review Output
66 |
67 | Upon successful deployment, the screen should print out four entries. They are explained in the table below:
68 |
69 | |key|example value|protocol|purpose|
70 | |--|--|--|--|
71 | |**server_dns**|ec2-15-156-192-145.ca-central-1.compute.amazonaws.com, ec2-99-79-73-88.ca-central-1.compute.amazonaws.com (HTTPS and DICOM TLS)|HTTPS/DICOM-TLS|Business traffic: HTTPS on port 443 and DICOM-TLS on port 11112. Reachable from the Internet.|
72 | |**host_info**|Primary:i-02d92d2c1c046ea62 Secondary:i-076b93808575da71e|SSH|For management traffic. |
73 | |**s3_bucket**|wealthy-lemur-orthbucket.s3.amazonaws.com|HTTPS-S3| For orthanc to store and fetch images. Access is restricted.|
74 | |**db_endpoint**|wealthy-lemur-orthancpostgres.cqfpmkrutlau.us-east-1.rds.amazonaws.com:5432|TLS-POSTGRESQL| For orthanc to index data. Access is restricted.|
75 |
76 | Once the screen prints the output, the EC2 instances may still take a couple extra minutes in the background to finish configuring Orthanc. We can start validation as per the steps outlined in the next section.
77 |
78 | If applicable, deploy the custom application traffic management
79 |
80 | ## Terraform State
81 | Terraform keeps a local file `terraform.tfstate` for the last known state of the deployed resources, known as the state file. This file is critical for the ongoing maintanance of the deployed resources.
82 |
83 | Ad hoc changes to the resources created by Terraform are not registered in the state file. These changes, often referred to as configuration drift, are very likely to cause issues when the Terraform managed resources are updated or deleted. In general, manual changes to Terraform managed resources should be avoided. Changes should be first registered in the Terraform template and applied via the `terraform apply` command.
84 |
85 | ## Cost Estimate
86 |
87 | Below is a per-day estimate of cost (in USD) of the infrastructure based on default configuration.
88 |
89 | | AWS Service | Standing Cost |
90 | | :---------------- | :------: |
91 | | Relational Database | $3.6 |
92 | | EC2-Instances | $2.2 |
93 | | VPC | $0.6 |
94 | | Key Management Service | $0.13 |
95 | | EC2-Other | $0.12 |
96 | | Secrets Manager | $0.12 |
97 | | S3 | $0.13 |
98 | | Total daily cost | $7 |
99 |
100 | Note, the numbers does not include data processing charges such as images stored to and retrieved from S3, or data moved in and out of the Internet Gateway, etc. The numbers also have free-tier usage factored in. Users are advised to run the solution with everyday usage to get a more realistic ballpark of daily cost. AWS has a comprehensive [pricing calculator](https://calculator.aws/#/) and [saving plans](https://aws.amazon.com/savingsplans/) available.
101 |
102 | ## Logs
103 | To view container logs from EC2 instance, use docker compose log command:
104 | ```sh
105 | docker compose logs -f
106 | ```
107 | If cloud watch log is enabled, the docker daemon configuration file is automatically configured on EC2 instances to ship logs to cloud watch log groups with the configured retention window.
108 |
109 |
110 | ## Clean up
111 | After the validation is completed, it is important to remember this step to stop incurring on-going cost.
112 |
113 | You can delete all the resources with `destroy` command:
114 | ```
115 | terraform destroy
116 | ```
117 | The command should report that all resources are deleted.
118 |
--------------------------------------------------------------------------------
/docs/deployment/preparation.md:
--------------------------------------------------------------------------------
1 | The solution was tested on Mac and Linux. The instructions are based on Mac or Linux. Use the solution on Windows at your own risk. There are also several ways to adjust the steps to work with managed Terraform environment (e.g. Terraform Cloud). For simplicity, this documentation assumes that you work from a command terminal.
2 |
3 | ## Prerequisite
4 |
5 | In your command terminal, install the required packages:
6 |
7 | * Make sure **[awscli](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)** is installed and [configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure) so you can connect to your AWS account with as your IAM user (using `Access Key ID` and `Secret Access Key` with administrator privilege). If you will need to SSH to the EC2 instance, you also need to install [session manager plugin](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html);
8 | * Make sure **Terraform CLI** is [installed](https://learn.hashicorp.com/tutorials/terraform/install-cli). In the Orthweb template, Terraform also uses your IAM credential to [authenticate into AWS](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#shared-credentials-file).
9 |
10 | Then, use Git to pull the repostory:
11 | ```sh
12 | git clone https://github.com/digihunch/orthweb.git
13 | ```
14 | Before running terraform command, enter the `orthweb` directory as current working directory.
15 |
16 |
17 | ## Additional Steps
18 |
19 | Take the preparatory steps below if you need to inspect or troubleshoot the Orthanc deployment. Otherwise, skip to the next section to start Installation.
20 |
21 | ### Secure SSH access
22 | There are two ways to SSH to the EC2 instances. To use your own choice of command terminal, you must configure your [RSA key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) on the EC2 instances. Alternatively, without your own RSA key pair, you may use web-based command terminal provided by Session Manager in AWS console.
23 |
24 | ### Use your own command terminal
25 |
26 | You need to [create your RSA key pair](https://help.dreamhost.com/hc/en-us/articles/115001736671-Creating-a-new-Key-pair-in-Mac-OS-X-or-Linux). Your public key will be stored as file `~/.ssh/id_rsa.pub` on MacOS or Linux by default. Here is how the template determines what to send to EC2 as authorized public key:
27 |
28 | 1. If you specify public key data in the input variable `pubkey_data`, then it will added as authorized public key when the EC2 instances are created.
29 | 2. If `pubkey_data` is not specified, then it looks for the file path specified in input variable `pubkey_path` for public key
30 | 3. If `pubkey_path` is not specified, then it uses default public key path `~/.ssh/id_rsa.pub` and pass the public key
31 | 4. If no file is found at the default public key path, then the template will not send a public key. The EC2 instances to be provisioned will not have an authorized public key. Your only option to SSH to the instance is using AWS web console.
32 |
33 | Terraform template picks up environment variable prefixed with `TF_VAR_` and pass them in as Terraform's [input variable](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables) without the prefix in the name. For example, if you set environment as below before running `terraform init`, then Terraform will pick up the value for input variables `pubkey_data` and `pubkey_path`:
34 | ```sh
35 | export
36 | TF_VAR_pubkey_data="mockpublickeydatawhichissuperlongdonotputyourprivatekeyherepleaseabcxyzpubkklsss"
37 | TF_VAR_pubkey_path="/tmp/mykey.pub"
38 | ```
39 |
40 | Your SSH client works in tandem with session-manager-plugin. You can add the following section to your local SSH configuration file (i.e. `~/.ssh/config`) so it allows the session manager proxies the SSH session for hostnames matching `i-*` and `mi-*`.
41 |
42 | ```
43 | host i-* mi-*
44 | ProxyCommand sh -c "aws ssm start-session --target %h --document-name AWS-StartSSHSession --parameters 'portNumber=%p'"
45 | IdentityFile ~/.ssh/id_rsa
46 | User ec2-user
47 | ```
48 | Then you will be able to directly ssh to an instance by its instance ID, even if the instance does not have a public IP. It will use Linux user `ec2-user`, whose public key has been pre-loaded authorized public key.
49 |
50 | ### Use web-based terminal
51 |
52 | Orthweb automaically configures the permission required for EC2 instances to connect to AWS system manager.
53 |
54 | Log on to AWS console, from `AWS System Manager` in your region, on the left-hand pannel, under `Node Management`, select `Fleet Manager`. You should see your instances listed. Select the Node by the name, select `Node actions` and then `Start terminal session` (under `Connect`). It will take you to a web-based command console and logged in as `ssm-user`. You can switch to our `ec2-user` with sudo commands:
55 | ```bash
56 | sh-4.2$ sudo -s
57 | [root@ip-172-27-3-138 bin]# su - ec2-user
58 | Last login: Wed Nov 23 22:02:57 UTC 2022 from localhost on pts/0
59 | [ec2-user@ip-172-27-3-138 ~]$
60 | ```
61 |
62 | Both mechanisms are enabled by default in the Terraform template.
63 |
64 | ## Custom deployment options
65 | This project comes with working default but you can customize it in certain ways, by modifying the variable file `terraform.tfvars`. The variables are self-explanatory and defined in `variables.tf` file.
66 |
67 | |variable|description|
68 | |--|--|
69 | |**network_config**| Adjust the networking configuration (e.g. CIDRs, sizing) and specify interface endpoints to enable if required.|
70 | |**provider_tags**| Adjust the resource tags to apply to every resources deployed through the Terraform template |
71 | |**deployment_options**| Adjust the deployment specification. For example, use a different instance size, configuration repo, and have your own site name|
72 |
73 | If you use BYO DNS name, make sure to set the **SiteName** correctly. The value of site name, if set, is used in several configuration files for Orthanc. If it is set incorrectly, you will not be able to browse the orthanc Site correctly.
74 |
--------------------------------------------------------------------------------
/docs/design/configmgmt.md:
--------------------------------------------------------------------------------
1 | Upon launching the EC2 instances, the Orthweb solution can pull from a public Git repo and automatically completes the configuration of Orthanc. Orthweb completes a baseline configuration for Orthanc. The configuration was based on the reference configuration repo for [authorization service](https://github.com/orthanc-team/orthanc-auth-service), with some simplification efforts.
2 |
3 | ## Automation
4 |
5 | The configuration files lives in a separate repo called [orthanc-config](https://github.com/digihunchinc/orthanc-config), or otherwise specified. If you have custom configuration, you may fork this repository and reference it when deploying Orthweb.
6 |
7 | The repo directory can be placed in user directory `~` and it consists of a `makefile`. The file contains the steps and commands to automate the configuration for Orthanc. The `README.md` file in the repo has the instruction but the idea is to complete the installation with a single command. For example, on an EC2 instance, you may run `make aws`. This will call the required steps needed to configure Orthanc on the specific EC2 instance. On the other hand, if you simply need a generic development environment (e.g. on MacBook), you can run the `make dev` command. This command can even be supplied as a variable to the Terraform template such that it gets automatically executed.
8 |
9 | The repo consists of several directories. The key directory is `config`, where each subfolder is mapped to a directory in the file systems of running containers.
10 |
11 | ## Containers
12 | The orthanc applciation consists of the following container-based services. The containers either uses pre-built images by orthanc-team, or images built as needed in the Docker compose file. How these containers interact with each other is illustrated as below:
13 |
14 |
15 | ```mermaid
16 | graph TD;
17 | B[Web Browser] --> |HTTPS
TCP 443|C{Nginx Proxy
Container};
18 | X[DICOM Device] -->|DICOM TLS
TCP 11112| C;
19 | C -->|HTTP
TCP 8042| D[Orthanc
Container];
20 | C -. DICOM
TCP 4242 .-> D;
21 | D -->|via Nginx| F;
22 | C -->|HTTP
TCP 8080| F[KeyCloak
Container];
23 | D -->|HTTP
TCP 8000| G[Custom Authorization
Service Container];
24 | F -->|via Nginx| G;
25 | ```
26 |
27 |
28 | The Nginx proxy is configured to direct traffic to different containers based on path. For example, if the request path is `/keycloak/*`, then the HTTP request is directed to keycloak container. If the request path is `/orthanc/*`, then it request is directed to Orthanc container. The Orthanc container is able to connect to S3 and PostgreSQL database.
29 |
30 | ## Plugins
31 |
32 | One of the key configuration file is `orthanc.json`, which defines the required configurations. Available configuraiton options can be found in [Orthanc book](https://orthanc.uclouvain.be/book/). The [official plugins](https://orthanc.uclouvain.be/book/plugins.html#index-of-the-official-plugins) in the baseline configuration include:
33 |
34 | - AWS S3 Storage Plugin (for deployment on EC2 only)
35 | - PostgreSQL Index Plugin
36 | - Orthanc Explorer 2 Plugin
37 | - Advanced Authorization Plugin
38 | - DicomWeb Plugin
39 | - Stone Web Viewer Plugin
40 |
41 | The Authorization plugin requires use of the Orthanc Explorer 2 plugin, and integrates with the external custom authorization service. The PostgreSQL database is to index the study data. The imaging pixel data are stored to S3 bucket using the S3 Storage Plugin.
42 |
43 |
44 | ## Application logs
45 |
46 | Container produces logs through docker deamon. To persist the log data, the solution provides a mechanism to tell Docker daemon to push container logs to Cloud Watch, with a configurable retention window. Even if an EC2 instance dies or containers fail, the log data are stored outside of the EC2 instance.
--------------------------------------------------------------------------------
/docs/design/deviceconnectivity.md:
--------------------------------------------------------------------------------
1 | Device connectivity is another area of customization depending on the organization's network setup.
2 |
3 | Images with sensitive information are transferred in [DICOM protocol](https://dicom.nema.org/medical/dicom/current/output/chtml/part08/PS3.8.html). If they are transferred across the Internet, they must be encrypted with Transfer Layer Security (TLS). To configure secure DICOM transfer, both sides must support TLS. While this is configured by default in the orthanc solution, the modality side does not always have proper support for TLS.
4 |
5 | As a result, users should not send DICOM images from modalities over the Internet to Orthanc without TLS configuration. If the modality does not support DICOM TLS configuration, consider the following ways to secure the transfer.
6 |
7 | ## Private Network Connection
8 | At network infrastructure level, the organzation may build a AWS Direct Connect connection with AWS. Requirement for such network connection should be reviewed with the network team of the organization, and require collaboration of multiple teams.
9 |
10 | 
11 |
12 | Instead of private physical connection, user may build a virtual private connection over the Internet using VPN.
13 |
14 | ## Virtual Private Network (VPN)
15 | Compared with Direct Connect, VPN involves less effort and cost. The solution can work with two models of VPN:
16 |
17 | * Site-to-site VPN: requiring either a physical device or software application to act as a customer gateway.
18 | * Client VPN: requiring OpenVPN-based client on one or more workstations. Enable split-tunnel so only relevent traffic are routed to VPC.
19 |
20 | If you're sending imaging data from a handful of workstations. Client VPN is a good approach, and it is implemented in the Orthweb as a module. The configuration is based on [this instruction](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/cvpn-getting-started.html), which automates the following otherwise manual activities:
21 |
22 | 1. the VPN client and the VPN endpoint use certificate based mutual authentication. Many use OpenSSL to create certificates but AWS instruction uses "easyrsa3" to create them. The template addon uses Terraform tls provider to create the certificates.
23 | 2. When creating the VPN endpoint, specify a separate CIDR range for client IPs, e.g. `192.168.0.0/22` In this context, the client IP is the workstation's IP once it connects to the VPC via client VPN.
24 | 3. create a new security group (e.g. vpn-ep-sg) with outbound rule allowing all types of traffic to destination CIDR 0.0.0.0/0
25 | 4. When creating the VPN endpoint, associate it with the two private subnets as target network (which adds the required routes under the hood). Set vpn-ep-sg as the security group. Create the authorization rules as instructed.
26 | 5. Enable [split-tunnel](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/split-tunnel-vpn.html) for the VPN endpoint so other network features on the workstation are not impacted.
27 |
28 | Once the VPC client software ([OpenVPN](https://openvpn.net/client/) or [AWS VPN client](https://aws.amazon.com/vpn/client-vpn-download/)) is configured and connected from the workstation, the connection between the EC2 instance and the DICOM client will become secured at the IP layer. The application on the workstation connects to the server by private IP.
29 |
30 | ## Use a DICOM proxy
31 | The organization may consider running a local DICOM proxy. The proxy receives images from modality in the clear, and forwards the images over the Internet to Orthanc. Unlike the modality application, such proxy applications usually come with full support of TLS. There are not many open-source options. An on-prem instance of Orthanc can be configured to act as a DICOM proxy.
32 |
33 | 
34 |
35 | In this configuration the DICOM port should also open. Use security group to restrict where the port can receive traffic.
--------------------------------------------------------------------------------
/docs/design/infrastructure.md:
--------------------------------------------------------------------------------
1 |
2 | The Orthweb proposes a reference architecture to host Orthanc on AWS. The reference architecture does not include application traffic management. This section discusses how the components work in this architecture. The [next page](./ingress.md), will focus on the options for ingress traffic management.
3 |
4 | ## The Reference Arthicture
5 |
6 | The architecure includes a VPC that spans across two availability zones (AZs). Each AZ has a public and a private subnet. Each public subnet stands one EC2 instance, with a public IP address routable from the Internet. The Reference Architecture is illustrated in the diagram below:
7 |
8 | 
9 |
10 | The two EC2 instances operates active-active, each with its own public IP. There are more options to manage application traffic and they are discussed separately in the section. The instances listens for DICOM (over TLS) and HTTPS traffic on TCP port `11112` and `443`.
11 |
12 | Within each EC2 instance, Docker daemon runs the required containers for Orthanc application. Orthweb uses [Docker Compose](https://docs.docker.com/compose/) to instruct Docker daemon how to orchestrate several containers of different purposes. The EC2 instances are launched using launch template.
13 |
14 | The private subnet contains the network interface for RDS PostgreSQL database. For the Orthanc container to connect to S3, an S3 gateway endpoint is created in each AZ. Optionally and at additional cost, users may enable interface endpoints in each AZ to route AWS management traffic privately.
15 |
16 | ## Redundancy
17 |
18 | The Orthweb solution provisions a pair of EC2 instances by default for redundancy. If you only need one of the two EC2 instance, feel free to stop the other one to avoid incurring charges. To stop and start an instance using AWS CLI, identify the instance ID and run:
19 | ```sh
20 | aws ec2 stop-instances --instance-ids i-12345678 # instance billing stops
21 | aws ec2 start-instances --instance-ids i-12345678 # instance billing restarts
22 | ```
23 | Bear in mind that when there is only one EC2 instance running, it becomes a single point of failure.
24 |
25 | The pair of EC2 instances, in conjunction with additional cloud resources, can bring high availability to the solution. However, the Orthweb solution does not intend to address high availability with its out-of-the-box configuration. For high availability, options are discussed under [application traffic management](ingress.md) and usually require additional customization efforts.
26 |
27 | ## Certificate
28 |
29 | TLS certificate (either self-signed or BYO certificates) must be applied to both HTTP and DICOM ports for traffic encryption, because the data contains patient information and travels across the Internet. Provisioning third-party certificate is the responsibility of the user, and the process to provision TLS certificate for an organization varies largely depending on the Public Key Infrastructure (PKI).
30 |
31 | The Orthweb solution provisions self-signed certificate in the configuration. It automatically configures Nginx proxy using the self-signed certificate for both TCP ports (443 and 11112). The limitation with Self-signed certificates is that they are not broadly trusted. However, they can still encrypt the traffic. The self-signed certificate is issued to the public DNS name of the EC2 instance as the certificate Common Name, in the format of `ec2-pub-lic-ip-addr..compute.amazonaws.com`.
32 |
33 |
34 | ## Network Paths
35 | The EC2 instances handles network traffic for both business and management purposes. Assuming the application traffic coming from the Internet arrives at the network interface of the two EC2 instances, we consider the following traffic pathes:
36 |
37 | * DICOM and web traffic: connection from client browser or DICOM AE travels across Internet and arrives at EC2's network interface via the Internet Gateway of VPC. Returning taffic directed to the client goes through Internet Gateway. Both types of traffic are by default protected with TLS (transport layer security).
38 | * Database traffic: only Orthanc container in each EC2 instance makes connection to RDS instance. The endpoint of RDS instance is deployed in the prviate subnets of the VPC. The database traffic does not leave the VPC. The traffic is protected with TLS.
39 | * AWS Management traffic: by default, AWS management traffic such as secret manager, KMS, are routed through the Internet, and are encrypted with TLS. Optionally, users may introduce their interface endpoints in the VPC in order to route such traffic privately.
40 |
41 | | Choice of interface endpoints in VPC | Routing Pattern | Tradeoff
42 | | -------- | ------- | ------- |
43 | | `[]` (none by default) | Without any interface endpoints, all types of AWS management traffic are routed through Internet. | Most cost-efficient configuration. |
44 | | `["kms","secretsmanager"]` | Traffic for critical management traffic (secrets and keys) is routed privately | A balanced configration between security risk and cost |
45 | | `["kms","secretsmanager","ec2","ssm","ec2messages","ssmmessages"]` | All types of AWS management traffic are routed privately. | Most secure configuration but each interface endpoint incurs its own cost. |
46 |
47 |
48 | * AWS Data Traffic: the EC2 instances makes connection to S3 to store imaging data. The terraform template creates S3 Gateway Endpoint such that the traffic from EC2 instance to S3 is routed privately. The traffic to S3 is protected with TLS encryption.
49 |
50 | ## Compliance
51 |
52 | Below are some other concers in regard with the configurations for security compliance:
53 |
54 | 1. Both DICOM and web traffic are encrypted in TLS. This requires peer DICOM AE to support DICOM TLS as well.
55 | 2. PostgreSQL data is encrypted at rest, and the database traffic between Orthanc application and database is encrypted in SSL. The database endpoint is not open to public.
56 | 3. The S3 bucket has server side encryption. The traffic in transit between S3 bucket and Orthanc application is encrypted as well.
57 | 4. The password for database are generated dynamically and stored in AWS Secret Manager in AWS. The EC2 instance is granted access to the secret, which allows the configuration script to fetch the secret and launch container with it.
58 | 5. The self-signed X509 certificate is dynamically generated using openssl11 during bootstrapping, in compliance with [Mac requirement](https://support.apple.com/en-us/HT210176).
59 | 6. Secret Manager, S3 and the KMS key used to encrypt objects and secrets all use resource-based IAM role to restrict access.
60 | 7. VPC flow log and S3 access log are sent to a separate S3 bucket. However, the S3 access log usually takes 10 minutes to be delivered.
61 |
62 |
63 | ## Limitations
64 | Currently there are also some limitation with secure configuration:
65 |
66 | 1. Database secret rotation isn't implemented. Instead, Database password is generated at Terraform client and then sent to deployment server to create PostgreSQL. The generated password is also stored in state file of Terraform. To overcome this, the application would need to automatically receive secret update.
67 | 2. Secret management with Docker container: secret are presented to container process as environment variables, instead of file content. This is generally secure enough but not the best practice, as per [this article](https://techbeacon.com/devops/how-keep-your-container-secrets-secure).
68 | 3. Self-signed certificates are often flagged by the Browser as insecure. So users are advised to use their own certificates.
--------------------------------------------------------------------------------
/docs/design/ingress.md:
--------------------------------------------------------------------------------
1 | Ingress traffic management concerns with how external web traffic (and possibly DICOM traffic) reach the network interface of Orthanc EC2 instances.
2 |
3 | The Orthweb solution does not provide a prescriptive design pattern or implementation for ingress traffic management. This is because the requirements in this area often vary so significantly that no two organizations share the same design.
4 |
5 | This section discusses some possible customization options for ingress traffic management. Note, these patterns are not the only viable options. Please discuss the most suitable design with us.
6 |
7 | ## Out-of-box Configuration
8 | The out-of-box configuration functions without ingress traffic management service. However, it comes with two difference DNS names, one for each EC2 instance, as illustrated below:
9 |
10 | 
11 |
12 | In this configuration, each EC2 instance lives in a separate availability zone. Both are connected to the same database (RDS) instance and storage (S3). In the event of an EC2 instance failure, the other instance is available. User may also choose to stop one of the instances for lower cost.
13 |
14 | On each EC2 instance, the Nginx container listens to port 443 and proxies web request according to its configuration.
15 |
16 | ## Limitations
17 | In most production scenarios, the out-of-box configuration is not convinient. First, the two EC2 instances have two separate DNS names. In the event that one instance becomes unavailable, users and modalities have to use the alternative site DNS name. Second, the DNS name is automatically created based on the public IP address. Users do not have control of the DNS names. Thrid, the DNS names end with `amazonaws.com`, which is not owned by the users, and therefore users are not able to create trusted certificates.
18 |
19 | To bring the solution to produciton, it is recommended to introduce additional cloud resources to manage ingress traffic. The rest of the section discusses at very high level some options.
20 |
21 | ## Use Domain Naming Service (DNS)
22 | Consider introducing a DNS service to point to both EC2 instances. The DNS resolution result determins which EC2 instance the client connects to. So each EC2 instance must still open 443 and 11112 ports. This pattern is illustrated as below:
23 |
24 | 
25 |
26 | In this pattern, the DNS can resolves to the public DNS name for both EC2 instances. The result of DNS resolution can rotate, round robin or based on availability. In this option you will bring your own DNS name, and manage your own TLS certificate, instead of using the self-signed certificate provisioned during automation.
27 |
28 | It is also possible to integrate with Content Delivery Network (CDN, such as CloudFlare, CloudFront) for advanced features such as application firewall.
29 |
30 | ## Use Load Balancer (NLB or ALB)
31 | As cost allows, consider placing a network load balancer in front of the EC2 instances. We would be able to configure the network load balancer so it automatically sends the traffic to a functional EC2 instance, thereby eliminating the manual fail over procedure. This pattern is illustrated as below:
32 |
33 | 
34 |
35 | This configuration has several advantages. The security group of the EC2 instances can be narrowed down to only open to the load balancer. You can use Application Load Balancer or Network Load Balancer in AWS. The former supports integration with Web Application Firewall but only for HTTPS traffic. The latter supports both DICOM and HTTPS traffic. Both options supports integration with AWS Certificate Manager to automatically manage TLS certificate.
36 |
37 | ## Secure Application Traffic
38 | With appropriate Ingress Configuration the solution encrypts web traffic end-to-end. The DICOM image traffic technically can follow the same ingress traffic pattern. In some clinical settings, this is not always realistic, because many modalities do not support DICOM TLS sufficiently. The next section discusses some options to transfer DICOM images through different pathways.
39 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Orthweb - Orthanc Solution on AWS
2 |
3 |
4 |
5 | [](https://aws.amazon.com/amazon-linux-2)
6 | [](https://www.docker.com/)
7 | [](https://www.postgresql.org/)
8 | [](https://nginx.org/en/index.html)
9 | [](https://www.keycloak.org/)
10 | [](https://opensource.org/licenses/Apache-2.0)
11 |
12 | [](https://www.terraform.io/)
13 | [](https://aws.amazon.com/ec2/)
14 | [](https://aws.amazon.com/s3/)
15 | [](https://aws.amazon.com/rds/postgresql/)
16 |
17 |
18 | ## Overview
19 |
20 | The **[Orthweb](https://github.com/digihunch/orthweb)** project automates the creation of a cloud-based mini-[PACS](https://en.wikipedia.org/wiki/Picture_archiving_and_communication_system) based on **[Orthanc](https://www.orthanc-server.com/)** and Amazon Web Services (AWS). The project artifact addresses the cloud foundation and configuration management, and enables adopters to host the Orthanc software as a service ([SaaS](https://en.wikipedia.org/wiki/Software_as_a_service)). To get started, follow the [documentation](https://digihunch.github.io/orthweb/). 💪 Let's automate medical imaging!
21 |
22 | Imaging systems handling sensitive data must operate on secure platforms. Typically, large organizations dedicate specialized IT resources to build their enterprise-scale cloud foundations. This cloud foundation, also known as a [landing zone](https://www.digihunch.com/2022/12/landing-zone-in-aws/), addresses security and scalability. Each line of business of the large organization is allocated with a segment (e.g. an VPC) in the landing zone, to deploy their own applications.
23 |
24 | However, many Orthanc adopters are small teams without overarching cloud strategies from their parent organizations. They are startups, research groups, independent clinics, and so on. To leverage Orthanc capabilities, they need simple cloud foundations that are equally secure and scalable. To close this gap, we proposed and implemnted a cloud-based Orthanc solution: the [**Orthweb** project](https://www.digihunch.com/2020/11/medical-imaging-web-server-deployment-pipeline/).
25 |
26 | 
27 |
28 | To build the foundation fast, **Orthweb** project uses **Terraform** template (an [infrastructure-as-code](https://en.wikipedia.org/wiki/Infrastructure_as_code) technology) to provision a self-contained infrastrcture stack in a single AWS account, without relying upon established network infrastructure. The infrastructure layer provisioned in this project contains a single VPC with multiple subnets, along with optional VPC endpoints. The infrastructure layer also contains encryption keys, managed database service and S3 storage.
29 |
30 | The **Orthweb** project also streamlines the configuration of Orthanc solution, by proposing a paradign for Orthanc configuration management. The project leverages cloud-init [user data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) and `makefile` to configure the servers during the server's initialization process. The artifact to install **Orthanc** is stored in a separate repository for adopters to fork and customize. The [orthanc-config](https://github.com/digihunchinc/orthanc-config) repository is a good example.
31 |
32 |
33 | The project orchestrates the application containers with Docker daemon on EC2 instances. Technical users can expect to build a cloud-based mini-PACS in one hour with rich feature, scalability and security. For those considering hosting Orthanc on Kubernetes, check out the sister project [Korthweb](https://github.com/digihunch/korthweb).
--------------------------------------------------------------------------------
/docs/introduction/index.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 |
3 | Orthanc handles sensitive data and must be hosted on secure platforms. The motive of **Orthweb** project is to accelerate Orthanc deployment on Amazon Web Services (AWS) platform. The Orthweb project includes:
4 |
5 | 1. A prescriptive architecture optimized for hosting Orthanc. The architecture is opinionated but suitable for common scenarios. The architecture design is discussed in the [Infrastructure](../design/infrastructure.md) section.
6 |
7 | 2. The implementation artifact for the prescriptive architecture using[Terraform](https://www.terraform.io/) to manage infrastructure as code. The **Orthanc Terraform template** is available in the [orthweb](https://github.com/digihunch/orthweb) GitHub repository.
8 |
9 | 3. A baseline Orthanc configuration with key plugins such as advanced authorization and auxiliary services such as KeyCloak. The artifact for configuration management is available in the [orthanc-config](https://github.com/digihunchinc/orthanc-config) GitHub repository. To customize configuration, create a fork of this repo.
10 |
11 | While **Orthweb** provisions a fully functional Orthanc solution, there are some areas it does not intend to address. One example is ingress traffic management, whose design must account for integration with the current infrastructure and security setup, which is vastly different from organziation to organization. To drive this initiative in your organization, contact professional services at [Digi Hunch](https://www.digihunch.com)💡.
12 |
13 | ## Use case
14 |
15 | If you have an AWS account without networking foundation for Orthanc, the template in [Orthweb](https://github.com/digihunch/orthweb) suits exactly to your needs. Then you may use the automation artifact in [orthanc-config](https://github.com/digihunchinc/orthanc-config), or your own repository, to configure Orthanc.
16 |
17 | If you have pre-created networking layer (e.g. VPC, subnets), then you only need to create virtual machine with relevant dependencies before installing Orthanc. You can use **Orthweb** as a reference implementation to examine how the application interact with underlying cloud resources, and potentially reuse some Terraform [modules](https://github.com/digihunch/orthweb/tree/main/terraform/modules) in the repo.
18 |
19 | To allow Terraform to create resources in AWS, it needs sufficient permissions for deployment. Such permission for deployment usually requires administrator-level access.
20 |
21 | ## Choice of Tools
22 |
23 | Orthweb is based on numerous open-source tools and commercial cloud services. Here are the rationales behind the choice:
24 |
25 | **Terraform** is a widely used infrastructure-as-code utility. The templates are written in Hashicorp Configuration Language(HCL), which strikes a good balance between declarativeness and level of abstraction. However, you do need to securely store [Terraform state](https://developer.hashicorp.com/terraform/language/state), and be wary of its [workflow nuances](https://itnext.io/pains-in-terraform-collaboration-249a56b4534e).
26 |
27 | **Docker** is a simple way to host container workload. The **Orthweb** solution uses `Docker Compose` to orchestrate several containers of different purposes which is widely used by application developers. [Amazon ECS](https://aws.amazon.com/ecs/) is an alternative with some [limitations](https://github.com/digihunch/orthweb/issues/1#issuecomment-852669561) and concerns on platform lock-in.
28 |
29 | **PostgreSQL** is the choice of database amongst all the database engines that Orthanc supports. It is feature rich and supports analytical workloads. AWS has two flavours of managed PostgreSQL: `RDS for PostgreSQL` and `Aurora PostgreSQL`. The Orthweb solutions works with the former.
30 |
31 | **Nginx Proxy** is a widely adopted reverse proxy to handle incoming requests. There are several alternative reverse proxy technologies. From 2022 to 2024, **Orthweb** uses Envoy Proxy. Since 2024, Orthweb switched back to Nginx as the default reverse proxy because it is the popular choice in the Orthanc user community.
32 |
33 | **Amazon S3** is a scalable, feature-rich object storage platform well integrated with other AWS cloud services. S3 is the de-facto standard for object storage and Orthanc can store DICOM objects using S3 plugin. The pre-compiled binary for S3 plugin has been included the Orthanc release since 2022.
34 |
35 |
36 | ## Getting Started
37 |
38 | If you just want to start deploying Orthanc, skip right to the [Deployment](../deployment/preparation.md) section. Otherwise, in the next section, we will discuss the architecture design, how to use and what to expect from the **Orthweb** solution.
--------------------------------------------------------------------------------
/docs/support/index.md:
--------------------------------------------------------------------------------
1 | For feature requests or bugs, please open an [Issue](https://github.com/digihunch/orthweb/issues) on GitHub.
2 |
3 | Orthweb is dedicated to streamline the deployment experience. The idealistic intent of 1-click install often conflicts with the reality of having to integrate with a diverse range of incumbent configurations. To strke a balance, we share our best practices with a prescriptive architecture, and leave some aspects open to further customization. Please discuss with [Digi Hunch](https://www.digihunch.com/)💡 about support and professional services. Some of our areas of expertise include:
4 |
5 | * Custom Orthanc Configurations
6 | * Custom Networking infrastructure
7 | * Deployment of Orthanc in custom [cloud platform](https://www.digihunch.com/cloud-platform/)
8 | * Integration with existing services, such as security and IAM
9 | * Design and build of custom [cloud landing zone](https://www.digihunch.com/2023/09/orchestrate-landing-zone-with-landing-zone-accelerator-on-aws/)
10 | * Improvement of [Terraform workflow](https://medium.com/itnext/pains-in-terraform-collaboration-249a56b4534e)
11 |
12 | ❤️ Thank you again for using Orthweb. 💪 Let's automate medical imaging!
--------------------------------------------------------------------------------
/docs/validation/additional.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 | In this section we connect to database and S3 storage for additional points of validation.
3 |
4 | ## Database validation
5 |
6 | The PostgreSQL RDS instance is accessible only from the EC2 instance on port 5432. You can get the database URL and credential from file `/home/ec2-user/.orthanc.env`. To validate by psql client, run:
7 |
8 | ```sh
9 | sudo amazon-linux-extras enable postgresql14
10 | sudo yum install postgresql
11 | psql --host=postgresdbinstance.us-east-1.rds.amazonaws.com --port 5432 --username=myuser --dbname=orthancdb
12 | ```
13 |
14 | Then you are in the PostgreSQL command console and can check the tables using SQL, for example:
15 |
16 | ```sh
17 | orthancdb=> \dt;
18 | List of relations
19 | Schema | Name | Type | Owner
20 | --------+-----------------------+-------+--------
21 | public | attachedfiles | table | myuser
22 | public | changes | table | myuser
23 | public | deletedfiles | table | myuser
24 | public | deletedresources | table | myuser
25 | public | dicomidentifiers | table | myuser
26 | public | exportedresources | table | myuser
27 | public | globalintegers | table | myuser
28 | public | globalproperties | table | myuser
29 | public | maindicomtags | table | myuser
30 | public | metadata | table | myuser
31 | public | patientrecyclingorder | table | myuser
32 | public | remainingancestor | table | myuser
33 | public | resources | table | myuser
34 | public | serverproperties | table | myuser
35 | (14 rows)
36 |
37 | orthancdb=> select * from attachedfiles;
38 | id | filetype | uuid | compressedsize | uncompressedsize | compressiontype | uncompressedhash | compressedhash | revision
39 | ----+----------+--------------------------------------+----------------+------------------+-----------------+----------------------------------+----------------------------------+----------
40 | 4 | 1 | 87719ef0-cbb1-4249-a0ac-e68356d97a7a | 525848 | 525848 | 1 | bd07bf5f2f1287da0f0038638002e9b1 | bd07bf5f2f1287da0f0038638002e9b1 | 0
41 | (1 row)
42 | ```
43 |
44 | This is as far as we can go in terms of validating database. Without the schema document, we are not able to interpret the content. It is also not recommended to tamper with the tables directly bypassing the application.
45 |
46 | ## Storage Validation
47 |
48 | Storage validation can be performed simply by examining the content of S3 bucket. Once studies are sent to Orthanc, the corresponding DICOM file should appear in the S3 bucket. For example, we can run the following AWS CLI command from the EC2 instance:
49 |
50 |
51 | ```sh
52 | aws s3 ls s3://bucket-name
53 | 2021-12-02 18:54:41 525848 87719ef0-cbb1-4249-a0ac-e68356d97a7a.dcm
54 | ```
55 |
56 | The bucket is not publicly assissible and is protected by bucket policy configured during resource provisioning.
--------------------------------------------------------------------------------
/docs/validation/advanced.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 | In this section, we go through a few checkpoints through a system administrator's lens, to ensure the system is functional and correctly configured.
3 |
4 | ## Server Validation
5 |
6 | Now we SSH to the server as `ec2-user`, as instructed above. Once connected, we can check cloud init log:
7 | ```sh
8 | sudo tail -F /var/log/cloud-init-output.log
9 | ```
10 | In the log, each container should say `Orthanc has started`. The configuration files related to Orthanc deployment are in directory `/home/ec2-user/orthanc-config`. Refer to the [orthanc-config](https://github.com/digihunchinc/orthanc-config) repository for how the configuration automation works.
11 |
12 | ## DICOM communication (TLS)
13 |
14 | To emulate DICOM activity, we use [dcmtk](https://dicom.offis.de/dcmtk.php.en), with TLS options. We use the `echoscu` executable to issue `C-ECHO` DIMSE command, and the `storescu` executable to issue `C-STORE` commands. For example:
15 |
16 | ```sh
17 | echoscu -aet TESTER -aec ORTHANC -d +tls client.key client.crt -rc +cf ca.crt ec2-35-183-66-248.ca-central-1.compute.amazonaws.com 11112
18 | ```
19 | The files `client.key`, `client.crt` and `ca.crt` can all be obtained from the /tmp/ directory on the server.
20 |
21 |
22 | The output should read Status code 0 in `C-ECHO-RSP`, followed by `C-ECHO-RQ`. Here is an example of the output from `storescu`:
23 |
24 | ```
25 | I: Association Accepted (Max Send PDV: 16372)
26 | I: Sending Echo Request (MsgID 1)
27 | D: DcmDataset::read() TransferSyntax="Little Endian Implicit"
28 | I: Received Echo Response (Success)
29 | I: Releasing Association
30 | ```
31 |
32 | Further, we can store some DICOM part 10 file (usually .dcm extension containing images) to Orthanc server, using `storescu` executable:
33 |
34 | ```sh
35 | storescu -aet TESTER -aec ORTHANC -d +tls client.key client.crt -rc +cf ca.crt ec2-35-183-66-248.ca-central-1.compute.amazonaws.com 11112 DICOM_Images/COVID/56364823.dcm
36 | ```
37 |
38 | Below is an example of what the output from `storescu` should look like:
39 |
40 | ```
41 | D: ===================== OUTGOING DIMSE MESSAGE ====================
42 | D: Message Type : C-STORE RQ
43 | D: Message ID : 427
44 | D: Affected SOP Class UID : CTImageStorage
45 | D: Affected SOP Instance UID : 1.3.6.1.4.1.9590.100.1.2.227776817313443872620744441692571990763
46 | D: Data Set : present
47 | D: Priority : medium
48 | D: ======================= END DIMSE MESSAGE =======================
49 | D: DcmDataset::read() TransferSyntax="Little Endian Implicit"
50 | I: Received Store Response
51 | D: ===================== INCOMING DIMSE MESSAGE ====================
52 | D: Message Type : C-STORE RSP
53 | D: Presentation Context ID : 41
54 | D: Message ID Being Responded To : 427
55 | D: Affected SOP Class UID : CTImageStorage
56 | D: Affected SOP Instance UID : 1.3.6.1.4.1.9590.100.1.2.227776817313443872620744441692571990763
57 | D: Data Set : none
58 | D: DIMSE Status : 0x0000: Success
59 | D: ======================= END DIMSE MESSAGE =======================
60 | I: Releasing Association
61 | ```
62 |
63 | C-STORE-RSP status 0 indicates successful image transfer, and the image should viewable from the Orthanc site address.
64 |
65 | ## DICOM communication (without TLS)
66 |
67 | Caution: turn off TLS only if the images are transferred over private connection or encrypted connection. Refer to [device connectivity](../design/deviceconnectivity.md) for how to set up.
68 |
69 | To turn off TLS, locate the server configuration in the nginx configuration file for DICOM port, and remove the SSL options. For exmaple, here is what the snippet looks like with TLS encryption:
70 | ```
71 | stream {
72 | server {
73 | listen 11112 ssl;
74 | proxy_pass orthanc-service:4242;
75 | ssl_certificate /usr/local/nginx/conf/site.pem;
76 | ssl_certificate_key /usr/local/nginx/conf/site.pem;
77 | ssl_protocols SSLv3 TLSv1 TLSv1.2 TLSv1.3;
78 | ssl_ciphers HIGH:!aNULL:!MD5:ECDH+AESGCM;
79 | ssl_session_cache shared:SSL:20m;
80 | ssl_session_timeout 4h;
81 | ssl_handshake_timeout 30s;
82 | }
83 | }
84 | ```
85 | Here is what it looks like after removing TLS encryption:
86 |
87 | ```
88 | stream {
89 | server {
90 | listen 11112;
91 | proxy_pass orthanc-service:4242;
92 | }
93 | }
94 | ```
95 |
96 | When using dcmtk utility for DICOM Ping or C-STORE, also remove the arguments related to tls.
--------------------------------------------------------------------------------
/docs/validation/basic.md:
--------------------------------------------------------------------------------
1 | ## Overview
2 | We first perform a basic level of validation as an end user. Then we'll dive into technical validation with certain components. In all the validation steps, it is important to know the correct service address. If your environment comes with customized ingress configuration, such as using your domain name, content delivery network or load balancer, the service address used for testing will be different.
3 |
4 | The steps given are based on out-of-box configurations. So the service address looks like `ec2-35-183-66-248.ca-central-1.compute.amazonaws.com`.
5 |
6 | ## DICOM ping
7 |
8 | To Validate DICOM capability, we can test with C-ECHO and C-STORE. We can use any DICOM compliant application. For example, [Horos](https://horosproject.org/) on MacOS is a UI-based application. In Preference->Locations, configure a new DICOM nodes with:
9 |
10 | * Address: the site address as given above
11 | * AE title: ORTHANC
12 | * Port: 11112 (or otherwise configured)
13 |
14 | Remember to enable TLS. Then you will be able to verify the node (i.e. C-ECHO) and send existing studies from Horos to Orthanc (C-STORE).
15 |
16 | ## Web Browser
17 | To Validate the the web service, simply visit the site address (with `https://` scheme) and put in the [default credential](https://github.com/digihunch/orthweb/blob/main/app/orthanc.json#L6) at the prompt. Note that your web browser may flag the site as insecure because the server certificate's CA is self-signed and not trusted.
18 |
19 | Alternatively, you may use `curl` command to fetch the health check URI:
20 |
21 | ```sh
22 | curl -HHost:web.orthweb.com -k -X GET https://ec2-35-183-66-248.ca-central-1.compute.amazonaws.com/nginx_health --cacert ca.crt
23 | ```
24 | The curl command should return 200 code.
25 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | site_name: Orthweb - Orthanc Solution on AWS
2 | repo_name: digihunch/orthweb
3 | repo_url: https://github.com/digihunch/orthweb
4 | strict: true
5 |
6 | markdown_extensions:
7 | - pymdownx.superfences:
8 | custom_fences:
9 | - name: mermaid
10 | class: mermaid
11 | format: !!python/name:pymdownx.superfences.fence_code_format
12 | - pymdownx.snippets:
13 | auto_append: ["../LICENSE"]
14 | base_path: ["docs"]
15 |
16 | theme:
17 | name: material
18 | language: en
19 | palette:
20 | - scheme: default
21 | primary: indigo
22 | accent: indigo
23 | toggle:
24 | icon: material/brightness-7
25 | name: Switch to dark mode
26 | - scheme: slate
27 | primary: indigo
28 | accent: indigo
29 | toggle:
30 | icon: material/brightness-4
31 | name: Switch to light mode
32 | nav:
33 | - Home: 'index.md'
34 | - Introduction: 'introduction/index.md'
35 | - Architecture:
36 | - 'Infrastructure': 'design/infrastructure.md'
37 | - 'Configuration Management': 'design/configmgmt.md'
38 | - 'Ingress Traffic': 'design/ingress.md'
39 | - 'Device Connectivity': 'design/deviceconnectivity.md'
40 | - Deployment:
41 | - 'Preparation': 'deployment/preparation.md'
42 | - 'Infrastructure': 'deployment/infrastructure.md'
43 | - 'Configuration': 'deployment/configuration.md'
44 | - Validation:
45 | - 'Basic Validation': 'validation/basic.md'
46 | - 'Advanced Validation': 'validation/advanced.md'
47 | - 'Additional Validation': 'validation/additional.md'
48 |
49 | - Support: 'support/index.md'
50 | plugins:
51 | - search
52 | - include-markdown
53 |
--------------------------------------------------------------------------------
/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | terraform.tfstate.backup
2 | terraform.tfstate
3 | .terraform/**
4 | .terraform.tfstate.lock.info
5 | .terraform.lock.hcl
6 | *.swp
7 | *.ovpn
8 | *.ovpn.base
9 |
--------------------------------------------------------------------------------
/terraform/README.md:
--------------------------------------------------------------------------------
1 | ## Requirements
2 |
3 | | Name | Version |
4 | |------|---------|
5 | | [terraform](#requirement\_terraform) | >= 1.10 |
6 | | [aws](#requirement\_aws) | >= 5.84.0 |
7 |
8 | ## Providers
9 |
10 | | Name | Version |
11 | |------|---------|
12 | | [random](#provider\_random) | 3.7.1 |
13 |
14 | ## Modules
15 |
16 | | Name | Source | Version |
17 | |------|--------|---------|
18 | | [client\_vpn](#module\_client\_vpn) | ./modules/client-vpn | n/a |
19 | | [database](#module\_database) | ./modules/database | n/a |
20 | | [ec2](#module\_ec2) | ./modules/ec2 | n/a |
21 | | [key](#module\_key) | ./modules/key | n/a |
22 | | [network](#module\_network) | ./modules/network | n/a |
23 | | [storage](#module\_storage) | ./modules/storage | n/a |
24 |
25 | ## Resources
26 |
27 | | Name | Type |
28 | |------|------|
29 | | [random_pet.prefix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/pet) | resource |
30 |
31 | ## Inputs
32 |
33 | | Name | Description | Type | Default | Required |
34 | |------|-------------|------|---------|:--------:|
35 | | [deployment\_options](#input\_deployment\_options) | Deployment Options for app configuration:
`ConfigRepo` Git Repository for app configuration.
`SiteName` The Site URL
`InitCommand` The command to execute from the config directory
`EnableCWLog` Enable sending Docker daemon log to Cloud Watch.
`CWLogRetention` Retention for Log Group | object({
ConfigRepo = string
SiteName = string
InitCommand = string
EnableCWLog = bool
CWLogRetention = number
})
| {
"CWLogRetention": 3,
"ConfigRepo": "https://github.com/digihunchinc/orthanc-config.git",
"EnableCWLog": true,
"InitCommand": "pwd && echo Custom Init Command (e.g. make aws)",
"SiteName": null
}
| no |
36 | | [ec2\_config](#input\_ec2\_config) | EC2 instance configuration.
`InstanceType` must be amd64 Linux Instance;
`PublicKeyData` is the Public Key (RSA or ED25519) of the administrator; used when deploying from Terraform Cloud; overriden by valid *PublicKeyPath* value;
`PublicKeyPath` is the local file path to the public key. Used when deploying from an environment with access to the public key on the file system. | object({
InstanceType = string
PublicKeyData = string
PublicKeyPath = string
})
| {
"InstanceType": "t3.medium",
"PublicKeyData": null,
"PublicKeyPath": "~/.ssh/id_rsa.pub"
}
| no |
37 | | [network\_config](#input\_network\_config) | Networking Configuration
`vpc_cidr` is the CIDR block for the main VPC.
`dcm_cli_cidrs` represents DICOM client IP address space.
`web_cli_cidrs` represents web client IP address space.
`az_count` sets number of availability zones, to either 2 or 3.
`public_subnet_pfxlen` sets the size of public subnets.
`private_subnet_pfxlen`sets the size of private subnets.
`interface_endpoints` specifies VPC interface endpoints to configure.
`vpn_client_cidr` set to a non-conflicting CIDR of at least /22 to configure client VPN; otherwise leave as `null` or `""` to skip client VPN configuration.
`vpn_cert_cn_suffix` is the suffix of the Common Name of VPN certificates.
`vpn_cert_valid_days` is validity of VPN certificate in days. | object({
vpc_cidr = string
dcm_cli_cidrs = list(string)
web_cli_cidrs = list(string)
az_count = number
public_subnet_pfxlen = number
private_subnet_pfxlen = number
interface_endpoints = list(string)
vpn_client_cidr = string
vpn_cert_cn_suffix = string
vpn_cert_valid_days = number
})
| {
"az_count": 2,
"dcm_cli_cidrs": [
"0.0.0.0/0"
],
"interface_endpoints": [],
"private_subnet_pfxlen": 22,
"public_subnet_pfxlen": 24,
"vpc_cidr": "172.17.0.0/16",
"vpn_cert_cn_suffix": "vpn.digihunch.com",
"vpn_cert_valid_days": 3650,
"vpn_client_cidr": "",
"web_cli_cidrs": [
"0.0.0.0/0"
]
}
| no |
38 | | [provider\_tags](#input\_provider\_tags) | Tags to apply for every resource by default at provider level. | `map(string)` | {
"environment": "dev",
"owner": "info@digihunch.com"
}
| no |
39 |
40 | ## Outputs
41 |
42 | | Name | Description |
43 | |------|-------------|
44 | | [db\_endpoint](#output\_db\_endpoint) | Database endpiont (port 5432 only accessible privately from EC2 Instance) |
45 | | [host\_info](#output\_host\_info) | Instance IDs and Public IPs of EC2 instances |
46 | | [s3\_bucket](#output\_s3\_bucket) | S3 bucket name for data storage |
47 | | [server\_dns](#output\_server\_dns) | DNS names of EC2 instances |
48 |
--------------------------------------------------------------------------------
/terraform/main.tf:
--------------------------------------------------------------------------------
1 | resource "random_pet" "prefix" {}
2 |
3 | locals {
4 | vpc_pfxlen = parseint(regex("/(\\d+)$", var.network_config.vpc_cidr)[0], 10)
5 | # Calculate subnet size for each type of subnet per AZ, in the order of public subnet and private subnet
6 | subnet_sizes = [var.network_config.public_subnet_pfxlen - local.vpc_pfxlen, var.network_config.private_subnet_pfxlen - local.vpc_pfxlen]
7 | # Calculate the Subnet CIDRs for each type of subnet, in all AZs
8 | subnet_cidrs = cidrsubnets(var.network_config.vpc_cidr, flatten([for i in range(var.network_config.az_count) : local.subnet_sizes])...)
9 | # For each type of subnet, build a list of CIDRs for the subnet type in all AZs
10 | public_subnets_cidr_list = [for idx, val in local.subnet_cidrs : val if idx % 2 == 0]
11 | private_subnets_cidr_list = [for idx, val in local.subnet_cidrs : val if idx % 2 == 1]
12 | }
13 |
14 | module "key" {
15 | # This encryption key is used to encrypt s3 bucket, database, secret manager entry, EC2 volume, etc
16 | source = "./modules/key"
17 | resource_prefix = random_pet.prefix.id
18 | }
19 |
20 | module "storage" {
21 | source = "./modules/storage"
22 | custom_key_arn = module.key.custom_key_id
23 | resource_prefix = random_pet.prefix.id
24 | is_prod = var.provider_tags.environment == "prd"
25 | depends_on = [module.key]
26 | }
27 |
28 | module "network" {
29 | source = "./modules/network"
30 | network_cidr_blocks = {
31 | vpc_cidr_block = var.network_config.vpc_cidr
32 | public_subnet_cidr_blocks = local.public_subnets_cidr_list
33 | private_subnet_cidr_blocks = local.private_subnets_cidr_list
34 | }
35 | ifep_services = var.network_config.interface_endpoints
36 | vpc_flow_logging_bucket_arn = module.storage.s3_info.logging_bucket_arn
37 | resource_prefix = random_pet.prefix.id
38 | }
39 |
40 | module "database" {
41 | source = "./modules/database"
42 | vpc_config = {
43 | vpc_id = module.network.vpc_info.vpc_id
44 | private_subnet_ids = module.network.vpc_info.private_subnet_ids
45 | }
46 | custom_key_arn = module.key.custom_key_id
47 | resource_prefix = random_pet.prefix.id
48 | is_prod = var.provider_tags.environment == "prd"
49 | depends_on = [module.key]
50 | }
51 |
52 | module "ec2" {
53 | source = "./modules/ec2"
54 | public_key = var.ec2_config.PublicKeyData != null ? var.ec2_config.PublicKeyData : (fileexists(var.ec2_config.PublicKeyPath) ? file(var.ec2_config.PublicKeyPath) : "")
55 | role_name = "${random_pet.prefix.id}-InstanceRole"
56 | db_info = module.database.db_info
57 | secret_info = module.database.secret_info
58 | s3_bucket_name = module.storage.s3_info.bucket_name
59 | custom_key_arn = module.key.custom_key_id
60 | vpc_config = {
61 | vpc_id = module.network.vpc_info.vpc_id
62 | public_subnet_ids = module.network.vpc_info.public_subnet_ids
63 | public_subnet_cidr_blocks = local.public_subnets_cidr_list
64 | dcm_cli_cidrs = (var.network_config.vpn_client_cidr == "" || var.network_config.vpn_client_cidr == null) ? var.network_config.dcm_cli_cidrs : concat(var.network_config.dcm_cli_cidrs, [var.network_config.vpn_client_cidr])
65 | web_cli_cidrs = var.network_config.web_cli_cidrs
66 | }
67 | ec2_config = {
68 | InstanceType = var.ec2_config.InstanceType
69 | }
70 | deployment_options = var.deployment_options
71 | resource_prefix = random_pet.prefix.id
72 | depends_on = [module.database, module.storage, module.network]
73 | }
74 |
75 | module "client_vpn" {
76 | source = "./modules/client-vpn"
77 | count = (var.network_config.vpn_client_cidr == "" || var.network_config.vpn_client_cidr == null) ? 0 : 1
78 | vpn_config = {
79 | vpc_id = module.network.vpc_info.vpc_id
80 | vpc_cidr = var.network_config.vpc_cidr
81 | private_subnet_ids = module.network.vpc_info.private_subnet_ids
82 | vpn_client_cidr = var.network_config.vpn_client_cidr
83 | vpn_cert_cn_suffix = var.network_config.vpn_cert_cn_suffix
84 | vpn_cert_valid_days = var.network_config.vpn_cert_valid_days
85 | }
86 |
87 | s3_bucket_name = module.storage.s3_info.bucket_name
88 | resource_prefix = random_pet.prefix.id
89 | depends_on = [module.network]
90 | }
--------------------------------------------------------------------------------
/terraform/modules/client-vpn/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | vpncfg_filename = "vpn-config.ovpn"
3 | }
4 |
5 | data "aws_region" "this" {}
6 |
7 | resource "tls_private_key" "ca_key" {
8 | algorithm = "RSA"
9 | rsa_bits = 2048
10 | }
11 |
12 | resource "tls_self_signed_cert" "ca_cert" {
13 | private_key_pem = tls_private_key.ca_key.private_key_pem
14 |
15 | subject {
16 | common_name = "ca.${var.vpn_config.vpn_cert_cn_suffix}"
17 | organization = "VPN Certificate Organization"
18 | }
19 |
20 | is_ca_certificate = true
21 | set_authority_key_id = true
22 | set_subject_key_id = true
23 | validity_period_hours = var.vpn_config.vpn_cert_valid_days
24 |
25 | allowed_uses = [
26 | "cert_signing",
27 | "crl_signing",
28 | ]
29 | }
30 |
31 | resource "tls_private_key" "server_key" {
32 | algorithm = "RSA"
33 | rsa_bits = 2048
34 | }
35 |
36 | resource "tls_private_key" "client_key" {
37 | algorithm = "RSA"
38 | rsa_bits = 2048
39 | }
40 |
41 | resource "tls_cert_request" "server_csr" {
42 | private_key_pem = tls_private_key.server_key.private_key_pem
43 |
44 | subject {
45 | common_name = "server.${var.vpn_config.vpn_cert_cn_suffix}"
46 | }
47 |
48 | dns_names = ["server.${var.vpn_config.vpn_cert_cn_suffix}"]
49 | }
50 |
51 | resource "tls_cert_request" "client_csr" {
52 | private_key_pem = tls_private_key.client_key.private_key_pem
53 |
54 | subject {
55 | common_name = "client.${var.vpn_config.vpn_cert_cn_suffix}"
56 | }
57 |
58 | dns_names = ["client.${var.vpn_config.vpn_cert_cn_suffix}"]
59 | }
60 |
61 | resource "tls_locally_signed_cert" "server_cert" {
62 | cert_request_pem = tls_cert_request.server_csr.cert_request_pem
63 | ca_private_key_pem = tls_private_key.ca_key.private_key_pem
64 | ca_cert_pem = tls_self_signed_cert.ca_cert.cert_pem
65 |
66 | set_subject_key_id = true
67 | is_ca_certificate = false
68 | validity_period_hours = var.vpn_config.vpn_cert_valid_days * 24
69 |
70 | allowed_uses = [
71 | "key_encipherment",
72 | "digital_signature",
73 | "server_auth",
74 | ]
75 | }
76 |
77 | resource "tls_locally_signed_cert" "client_cert" {
78 | cert_request_pem = tls_cert_request.client_csr.cert_request_pem
79 | ca_private_key_pem = tls_private_key.ca_key.private_key_pem
80 | ca_cert_pem = tls_self_signed_cert.ca_cert.cert_pem
81 |
82 | validity_period_hours = var.vpn_config.vpn_cert_valid_days * 24
83 |
84 | allowed_uses = [
85 | "key_encipherment",
86 | "digital_signature",
87 | "client_auth",
88 | ]
89 | }
90 |
91 | resource "aws_acm_certificate" "imported_vpn_server_cert" {
92 | private_key = tls_private_key.server_key.private_key_pem
93 | certificate_body = tls_locally_signed_cert.server_cert.cert_pem
94 | certificate_chain = tls_self_signed_cert.ca_cert.cert_pem
95 | lifecycle {
96 | create_before_destroy = true
97 | }
98 | tags = {
99 | Name = "ImportedVPNServerCertificate"
100 | }
101 | }
102 |
103 | resource "aws_acm_certificate" "imported_vpn_client_cert" {
104 | private_key = tls_private_key.client_key.private_key_pem
105 | certificate_body = tls_locally_signed_cert.client_cert.cert_pem
106 | certificate_chain = tls_self_signed_cert.ca_cert.cert_pem
107 |
108 | lifecycle {
109 | create_before_destroy = true
110 | }
111 | tags = {
112 | Name = "ImportedVPNClientCertificate"
113 | }
114 | }
115 |
116 | resource "aws_ec2_client_vpn_endpoint" "client_vpn" {
117 | description = "Client VPN Endpoint"
118 | server_certificate_arn = aws_acm_certificate.imported_vpn_server_cert.arn
119 | client_cidr_block = var.vpn_config.vpn_client_cidr
120 | vpc_id = var.vpn_config.vpc_id
121 | security_group_ids = [aws_security_group.vpn_secgroup.id]
122 | split_tunnel = true
123 |
124 | authentication_options {
125 | type = "certificate-authentication"
126 | root_certificate_chain_arn = aws_acm_certificate.imported_vpn_client_cert.arn
127 | }
128 |
129 | connection_log_options {
130 | enabled = false
131 | }
132 | tags = {
133 | Name = "ClientVPN-Endpoint"
134 | }
135 | }
136 |
137 | data "external" "vpn_config_base" {
138 | program = ["bash", "-c", "aws ec2 export-client-vpn-client-configuration --client-vpn-endpoint-id ${aws_ec2_client_vpn_endpoint.client_vpn.id} --region ${data.aws_region.this.name} --output json"]
139 | depends_on = [aws_ec2_client_vpn_endpoint.client_vpn]
140 | }
141 |
142 | resource "local_file" "vpn_config" {
143 | depends_on = [aws_ec2_client_vpn_endpoint.client_vpn]
144 | filename = "./out/${local.vpncfg_filename}"
145 | content = <<-EOT
146 | ${data.external.vpn_config_base.result.ClientConfiguration}
147 |
148 |
149 | ${tls_locally_signed_cert.client_cert.cert_pem}
150 |
151 |
152 |
153 | ${tls_private_key.client_key.private_key_pem_pkcs8}
154 |
155 | EOT
156 | }
157 |
158 | # Upload the VPN config file to S3 bucket
159 | resource "aws_s3_object" "vpc_config_file" {
160 | bucket = var.s3_bucket_name
161 | key = "config/${local.vpncfg_filename}"
162 | source = "./out/${local.vpncfg_filename}"
163 | #source_hash = fileexists("./out/${local.vpncfg_filename}") ? filebase64sha256("./out/${local.vpncfg_filename}") : null
164 |
165 | depends_on = [resource.local_file.vpn_config]
166 | }
167 |
168 | resource "aws_ec2_client_vpn_authorization_rule" "authorization_rule" {
169 | client_vpn_endpoint_id = aws_ec2_client_vpn_endpoint.client_vpn.id
170 | target_network_cidr = var.vpn_config.vpc_cidr ## Where the client VPN can connect.
171 | authorize_all_groups = true
172 | }
173 |
174 | resource "aws_ec2_client_vpn_network_association" "vpn_subnet_association" {
175 | #for_each = toset(var.vpn_config.private_subnet_ids)
176 | # for_each doesn't like values derived from resource attributes that cannot be determined until apply
177 | # therefore for_each would require targeted apply first.
178 | count = length(var.vpn_config.private_subnet_ids)
179 | client_vpn_endpoint_id = aws_ec2_client_vpn_endpoint.client_vpn.id
180 | subnet_id = var.vpn_config.private_subnet_ids[count.index]
181 | }
182 |
183 | resource "aws_security_group" "vpn_secgroup" {
184 | name = "${var.resource_prefix}-vpn-secgroup"
185 | description = "Security group for VPN endpoint"
186 | vpc_id = var.vpn_config.vpc_id
187 |
188 | ingress {
189 | from_port = 443
190 | to_port = 443
191 | protocol = "udp"
192 | cidr_blocks = ["0.0.0.0/0"]
193 | description = "Allow UDP traffic coming in through port 443"
194 | }
195 |
196 | tags = { Name = "${var.resource_prefix}-vpn-sg" }
197 | }
198 |
--------------------------------------------------------------------------------
/terraform/modules/client-vpn/variables.tf:
--------------------------------------------------------------------------------
1 | variable "vpn_config" {
2 | description = "VPN configuration"
3 | type = object({
4 | vpc_id = string
5 | vpc_cidr = string
6 | vpn_client_cidr = string
7 | vpn_cert_cn_suffix = string
8 | private_subnet_ids = list(string)
9 | vpn_cert_valid_days = number
10 | })
11 | }
12 |
13 | variable "resource_prefix" {
14 | type = string
15 | description = "Uniq prefix of each resource"
16 | }
17 |
18 | variable "s3_bucket_name" {
19 | type = string
20 | }
--------------------------------------------------------------------------------
/terraform/modules/database/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | db_log_exports = ["postgresql", "upgrade"]
3 | db_log_retention_days = 7
4 | }
5 |
6 | data "aws_caller_identity" "current" {}
7 |
8 | resource "random_password" "password" {
9 | length = 16
10 | special = true
11 | override_special = "!%*-_+:?"
12 | }
13 |
14 | resource "aws_secretsmanager_secret" "secretDB" {
15 | name = "${var.resource_prefix}DatabaseCreds"
16 | kms_key_id = var.custom_key_arn
17 | tags = { Name = "${var.resource_prefix}-DBSecret" }
18 | }
19 |
20 | resource "aws_secretsmanager_secret_version" "sversion" {
21 | secret_id = aws_secretsmanager_secret.secretDB.id
22 | secret_string = <> $ConfigDir/.env
28 | echo S3_BUCKET=${s3_bucket} >> $ConfigDir/.env
29 | echo S3_REGION=${aws_region} >> $ConfigDir/.env
30 |
31 | cd $ConfigDir
32 | ${init_command}
33 | '
34 |
35 | ## Configure Docker daemon
36 |
37 | if [ "${cw_docker_log}" == "true" ]; then
38 | cat </etc/docker/daemon.json
39 | {
40 | "log-driver": "awslogs",
41 | "log-opts": {
42 | "awslogs-region": "${aws_region}",
43 | "awslogs-group": "/${resource_prefix}/orthweb/containers"
44 | }
45 | }
46 | EOF
47 | fi
48 |
49 | systemctl restart docker
50 | echo "Leaving userdata2 script"
51 |
--------------------------------------------------------------------------------
/terraform/modules/ec2/variables.tf:
--------------------------------------------------------------------------------
1 | variable "public_key" {
2 | type = string
3 | sensitive = true
4 | }
5 | variable "vpc_config" {
6 | description = "VPC configuration"
7 | type = object({
8 | vpc_id = string
9 | public_subnet_cidr_blocks = list(string)
10 | public_subnet_ids = map(string)
11 | web_cli_cidrs = list(string)
12 | dcm_cli_cidrs = list(string)
13 | })
14 | }
15 | variable "s3_bucket_name" {
16 | type = string
17 | }
18 | variable "role_name" {
19 | type = string
20 | }
21 | variable "db_info" {
22 | type = object({
23 | db_address = string
24 | db_port = string
25 | db_instance_identifier = string
26 | db_instance_arn = string
27 | })
28 |
29 | }
30 | variable "secret_info" {
31 | type = object({
32 | db_secret_arn = string
33 | db_secret_name = string
34 | })
35 | }
36 | variable "custom_key_arn" {
37 | type = string
38 | }
39 | variable "deployment_options" {
40 | type = map(any)
41 | }
42 | variable "ec2_config" {
43 | type = object({
44 | InstanceType = string
45 | })
46 | }
47 | variable "resource_prefix" {
48 | type = string
49 | description = "Uniq prefix of each resource"
50 | }
51 |
--------------------------------------------------------------------------------
/terraform/modules/key/main.tf:
--------------------------------------------------------------------------------
1 | data "aws_caller_identity" "current" {}
2 |
3 | data "aws_region" "this" {}
4 |
5 | resource "aws_kms_key" "customKey" {
6 | description = "This key is used to encrypt resources"
7 | deletion_window_in_days = 10
8 | enable_key_rotation = true
9 | policy = jsonencode({
10 | Version = "2012-10-17"
11 | Id = "${var.resource_prefix}-KMS-KeyPolicy"
12 | Statement = [
13 | {
14 | Sid = "Enable IAM User Permissions"
15 | Effect = "Allow"
16 | Principal = {
17 | "AWS" : "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"
18 | }
19 | Action = "kms:*"
20 | Resource = "*"
21 | }, {
22 | Sid = "Allow Cloud Watch, VPC flow log and s3 access logging sources to use the key"
23 | Effect = "Allow"
24 | Principal = {
25 | "Service" : [
26 | "logs.${data.aws_region.this.name}.amazonaws.com",
27 | "delivery.logs.amazonaws.com",
28 | "logging.s3.amazonaws.com"
29 | ]
30 | }
31 | Action = [
32 | "kms:Encrypt*",
33 | "kms:Decrypt*",
34 | "kms:ReEncrypt*",
35 | "kms:GenerateDataKey*",
36 | "kms:Describe*",
37 | ]
38 | Resource = "*"
39 | }
40 | ]
41 | })
42 | tags = { Name = "${var.resource_prefix}-Custom-KMS-Key" }
43 | }
44 |
45 |
--------------------------------------------------------------------------------
/terraform/modules/key/output.tf:
--------------------------------------------------------------------------------
1 | output "custom_key_id" {
2 | value = aws_kms_key.customKey.arn
3 | }
--------------------------------------------------------------------------------
/terraform/modules/key/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_prefix" {
2 | type = string
3 | description = "Uniq prefix of each resource"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/network/main.tf:
--------------------------------------------------------------------------------
1 | data "aws_availability_zones" "this" {}
2 | data "aws_region" "this" {}
3 |
4 | resource "aws_vpc" "orthmain" {
5 | cidr_block = var.network_cidr_blocks.vpc_cidr_block
6 | instance_tenancy = "default"
7 | enable_dns_hostnames = true
8 | tags = { Name = "${var.resource_prefix}-MainVPC" }
9 | }
10 |
11 | resource "aws_flow_log" "mainVPCflowlog" {
12 | log_destination = var.vpc_flow_logging_bucket_arn
13 | log_destination_type = "s3"
14 | traffic_type = "REJECT"
15 | vpc_id = aws_vpc.orthmain.id
16 | max_aggregation_interval = 600
17 | destination_options {
18 | per_hour_partition = true
19 | }
20 | tags = { Name = "${var.resource_prefix}-MainVPCFlowLog" }
21 | }
22 |
23 | # Instances are placed in public subnet. Private for DB subnets and endpoint interfaces.
24 | # Private subnets do not need to access the Internet, hence no NAT Gateway
25 |
26 | resource "aws_subnet" "public_subnets" {
27 | #checkov:skip=CKV_AWS_130: For public subnet, assign public IP by default
28 | for_each = {
29 | for cidr in var.network_cidr_blocks.public_subnet_cidr_blocks :
30 | substr(data.aws_availability_zones.this.names[index(var.network_cidr_blocks.public_subnet_cidr_blocks, cidr)], -2, -1) => {
31 | subnet_cidr_block = cidr
32 | availability_zone = data.aws_availability_zones.this.names[index(var.network_cidr_blocks.public_subnet_cidr_blocks, cidr)]
33 | }
34 | }
35 | vpc_id = aws_vpc.orthmain.id
36 | cidr_block = each.value.subnet_cidr_block
37 | map_public_ip_on_launch = true
38 | availability_zone = each.value.availability_zone
39 | tags = {
40 | Name = "${var.resource_prefix}-PublicSubnet${each.key}"
41 | Type = "Public"
42 | }
43 | }
44 |
45 | resource "aws_subnet" "private_subnets" {
46 | for_each = {
47 | for cidr in var.network_cidr_blocks.private_subnet_cidr_blocks :
48 | substr(data.aws_availability_zones.this.names[index(var.network_cidr_blocks.private_subnet_cidr_blocks, cidr)], -2, -1) => {
49 | subnet_cidr_block = cidr
50 | availability_zone = data.aws_availability_zones.this.names[index(var.network_cidr_blocks.private_subnet_cidr_blocks, cidr)]
51 | }
52 | }
53 | vpc_id = aws_vpc.orthmain.id
54 | cidr_block = each.value.subnet_cidr_block
55 | map_public_ip_on_launch = false
56 | availability_zone = each.value.availability_zone
57 | tags = {
58 | Name = "${var.resource_prefix}-PrivateSubnet${each.key}"
59 | Type = "Private"
60 | }
61 | }
62 | resource "aws_internet_gateway" "maingw" {
63 | vpc_id = aws_vpc.orthmain.id
64 | tags = { Name = "${var.resource_prefix}-MainGateway" }
65 | }
66 |
67 | resource "aws_route_table" "public_route_table" {
68 | vpc_id = aws_vpc.orthmain.id
69 | route {
70 | cidr_block = "0.0.0.0/0"
71 | gateway_id = aws_internet_gateway.maingw.id
72 | }
73 | tags = { Name = "${var.resource_prefix}-PublicRouteTable" }
74 | }
75 |
76 | resource "aws_route_table_association" "pubsub_rt_assocs" {
77 | for_each = aws_subnet.public_subnets
78 | subnet_id = aws_subnet.public_subnets[each.key].id
79 | route_table_id = aws_route_table.public_route_table.id
80 | }
81 |
82 | resource "aws_vpc_endpoint" "s3_ep" {
83 | vpc_id = aws_vpc.orthmain.id
84 | service_name = "com.amazonaws.${data.aws_region.this.name}.s3"
85 | vpc_endpoint_type = "Gateway"
86 | tags = { Name = "${var.resource_prefix}-s3-gwep" }
87 | }
88 |
89 | resource "aws_security_group" "ifep_sg" {
90 | name = "${var.resource_prefix}-interface-endpoint-sg"
91 | description = "Security Group For Interface Endpoints"
92 | vpc_id = aws_vpc.orthmain.id
93 | # Inbound rule to allow traffic on port 443 (HTTPS)
94 | ingress {
95 | description = "Allow inbound HTTPS traffic"
96 | from_port = 443
97 | to_port = 443
98 | protocol = "tcp"
99 | cidr_blocks = [var.network_cidr_blocks.vpc_cidr_block]
100 | }
101 | # No egress rule becasue an endpoint should not initiate communication
102 | tags = { Name = "${var.resource_prefix}-interface-endpoint-sg" }
103 | }
104 |
105 | resource "aws_vpc_endpoint" "standard_interface_endpoints" {
106 | for_each = toset(var.ifep_services)
107 | vpc_id = aws_vpc.orthmain.id
108 | service_name = "com.amazonaws.${data.aws_region.this.name}.${each.key}"
109 | vpc_endpoint_type = "Interface"
110 | private_dns_enabled = true
111 | subnet_ids = values(aws_subnet.private_subnets)[*].id
112 | security_group_ids = [aws_security_group.ifep_sg.id]
113 | tags = { Name = "${var.resource_prefix}-${each.key}-ifep" }
114 | }
115 |
116 |
117 |
--------------------------------------------------------------------------------
/terraform/modules/network/output.tf:
--------------------------------------------------------------------------------
1 | output "vpc_info" {
2 | value = {
3 | vpc_id = aws_vpc.orthmain.id
4 | public_subnet_ids = { for k, v in aws_subnet.public_subnets : v.cidr_block => v.id }
5 | private_subnet_ids = values(aws_subnet.private_subnets)[*].id
6 | }
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/terraform/modules/network/variables.tf:
--------------------------------------------------------------------------------
1 | variable "network_cidr_blocks" {
2 | description = "CIDR blocks at VPC and subnet levels"
3 | type = object({
4 | vpc_cidr_block = string
5 | public_subnet_cidr_blocks = list(string)
6 | private_subnet_cidr_blocks = list(string)
7 | })
8 | }
9 | variable "ifep_services" {
10 | type = list(string)
11 | default = ["sts", "secretsmanager"]
12 | }
13 | variable "vpc_flow_logging_bucket_arn" {
14 | type = string
15 | }
16 | variable "resource_prefix" {
17 | type = string
18 | description = "Uniq prefix of each resource"
19 | }
20 |
--------------------------------------------------------------------------------
/terraform/modules/storage/main.tf:
--------------------------------------------------------------------------------
1 | locals { access_log_prefix = "accesslog/orthbucket/" }
2 |
3 | data "aws_caller_identity" "current" {}
4 |
5 | data "aws_region" "this" {}
6 |
7 | resource "aws_s3_bucket" "orthbucket" {
8 | bucket = "${var.resource_prefix}-orthbucket"
9 |
10 | force_destroy = !var.is_prod # remaining object does not stop bucket from being deleted when force_destroy is true
11 | tags = { Name = "${var.resource_prefix}-orthbucket" }
12 | }
13 |
14 | resource "aws_s3_bucket_versioning" "orthbucket_versioning" {
15 | bucket = aws_s3_bucket.orthbucket.id
16 | versioning_configuration {
17 | status = "Enabled"
18 | }
19 | }
20 |
21 | resource "aws_s3_bucket_server_side_encryption_configuration" "test" {
22 | bucket = aws_s3_bucket.orthbucket.bucket
23 | rule {
24 | apply_server_side_encryption_by_default {
25 | kms_master_key_id = var.custom_key_arn
26 | sse_algorithm = "aws:kms"
27 | }
28 | }
29 | }
30 |
31 | resource "aws_s3_bucket_public_access_block" "orthbucketblockpublicaccess" {
32 | bucket = aws_s3_bucket.orthbucket.id
33 | block_public_acls = true
34 | block_public_policy = true
35 | ignore_public_acls = true
36 | restrict_public_buckets = true
37 | depends_on = [aws_s3_bucket.orthbucket] # explicit dependency to avoid errors on conflicting conditional operation
38 | }
39 |
40 | # Ref https://aws.amazon.com/blogs/security/how-to-restrict-amazon-s3-bucket-access-to-a-specific-iam-role/
41 | # Each IAM entity (user or role) has a defined aws:userid variable.
42 |
43 | resource "aws_s3_bucket_policy" "orthbucketpolicy" {
44 | bucket = aws_s3_bucket.orthbucket.id
45 | policy = jsonencode({
46 | Version = "2012-10-17"
47 | Id = "${var.resource_prefix}-OrthBucketPolicy"
48 | Statement = [
49 | {
50 | Sid = "DenyInsecureConnections"
51 | Effect = "Deny"
52 | Principal = "*"
53 | Action = "s3:*"
54 | Resource = [
55 | aws_s3_bucket.orthbucket.arn,
56 | "${aws_s3_bucket.orthbucket.arn}/*",
57 | ]
58 | Condition = {
59 | Bool = {
60 | "aws:SecureTransport" : "false"
61 | }
62 | }
63 | },
64 | {
65 | Sid = "AllowAccountRoot"
66 | Effect = "Allow"
67 | Principal = {
68 | AWS = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"
69 | }
70 | Action = [
71 | "s3:Put*",
72 | "s3:Get*",
73 | "s3:List*",
74 | "s3:Delete*"
75 | ]
76 | Resource = [
77 | aws_s3_bucket.orthbucket.arn,
78 | "${aws_s3_bucket.orthbucket.arn}/*",
79 | ]
80 | }
81 | ]
82 | })
83 | depends_on = [aws_s3_bucket_public_access_block.orthbucketblockpublicaccess]
84 | }
85 |
86 | resource "aws_s3_bucket" "logging_bucket" {
87 | bucket = "${var.resource_prefix}-orthweb-logging"
88 | force_destroy = true
89 | tags = { Name = "${var.resource_prefix}-logging" }
90 | }
91 | resource "aws_s3_bucket_versioning" "orthweb_logging_versioning" {
92 | bucket = aws_s3_bucket.logging_bucket.id
93 | versioning_configuration {
94 | status = "Enabled"
95 | }
96 | }
97 |
98 | resource "aws_iam_policy" "vpc_flow_logs_policy" {
99 | name = "${var.resource_prefix}-vpc-flow-logs-policy"
100 | description = "IAM policy for VPC flow logs to write logs to S3 bucket"
101 |
102 | policy = jsonencode({
103 | Version = "2012-10-17",
104 | Statement = [
105 | {
106 | Effect = "Allow",
107 | Action = [
108 | "s3:PutObject",
109 | "s3:GetBucketAcl",
110 | "s3:GetBucketPolicy",
111 | "s3:PutObjectAcl"
112 | ],
113 | Resource = [
114 | aws_s3_bucket.logging_bucket.arn,
115 | "${aws_s3_bucket.logging_bucket.arn}/*"
116 | ]
117 | }
118 | ]
119 | })
120 | }
121 |
122 | # IAM role for VPC flow logs to assume and write logs to the S3 bucket
123 | resource "aws_iam_role" "vpc_flow_logs_role" {
124 | name = "${var.resource_prefix}-vpc-flow-logs-role"
125 | assume_role_policy = jsonencode({
126 | Version = "2012-10-17",
127 | Statement = [
128 | {
129 | Effect = "Allow",
130 | Principal = {
131 | Service = "vpc-flow-logs.amazonaws.com"
132 | },
133 | Action = "sts:AssumeRole"
134 | }
135 | ]
136 | })
137 | }
138 |
139 | # Attach the IAM policy to the IAM role
140 | resource "aws_iam_role_policy_attachment" "vpc_flow_logs_policy_attachment" {
141 | role = aws_iam_role.vpc_flow_logs_role.name
142 | policy_arn = aws_iam_policy.vpc_flow_logs_policy.arn
143 | }
144 |
145 | resource "aws_s3_bucket_server_side_encryption_configuration" "logging_sse" {
146 | bucket = aws_s3_bucket.logging_bucket.bucket
147 | rule {
148 | apply_server_side_encryption_by_default {
149 | kms_master_key_id = var.custom_key_arn
150 | sse_algorithm = "aws:kms"
151 | }
152 | }
153 | }
154 |
155 | resource "aws_s3_bucket_public_access_block" "orthweb_loggingbucket_blockpublicaccess" {
156 | bucket = aws_s3_bucket.logging_bucket.id
157 | block_public_acls = true
158 | block_public_policy = true
159 | ignore_public_acls = true
160 | restrict_public_buckets = true
161 | depends_on = [aws_s3_bucket.logging_bucket] # explicit dependency to avoid errors on conflicting conditional operation
162 | }
163 |
164 | resource "aws_s3_bucket_policy" "orthweb_logging_policy" {
165 | bucket = aws_s3_bucket.logging_bucket.id
166 | # https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html
167 | policy = jsonencode({
168 | Version = "2012-10-17"
169 | Id = "${var.resource_prefix}-OrthwebLoggingBucketPolicy"
170 | Statement = [
171 | {
172 | Sid = "S3ServerAccessLogsPolicy",
173 | Effect = "Allow",
174 | Principal = {
175 | Service = "logging.s3.amazonaws.com"
176 | },
177 | Action = [
178 | "s3:PutObject"
179 | ],
180 | Resource = "${aws_s3_bucket.logging_bucket.arn}/${local.access_log_prefix}*",
181 | Condition = {
182 | ArnLike = {
183 | "aws:SourceArn" = "${aws_s3_bucket.orthbucket.arn}"
184 | },
185 | StringEquals = {
186 | "aws:SourceAccount" = "${data.aws_caller_identity.current.account_id}"
187 | }
188 | }
189 | },
190 | {
191 | Sid = "AWSLogDeliveryWrite",
192 | Effect = "Allow",
193 | Principal = {
194 | Service = "delivery.logs.amazonaws.com"
195 | },
196 | Action = [
197 | "s3:PutObject"
198 | ],
199 | Resource = "${aws_s3_bucket.logging_bucket.arn}/AWSLogs/${data.aws_caller_identity.current.account_id}/*",
200 | Condition = {
201 | StringEquals = {
202 | "s3:x-amz-acl" = "bucket-owner-full-control"
203 | "aws:SourceAccount" = "${data.aws_caller_identity.current.account_id}"
204 | },
205 | ArnLike = {
206 | "aws:SourceArn" = "arn:aws:logs:${data.aws_region.this.name}:${data.aws_caller_identity.current.account_id}:*"
207 | }
208 | }
209 | },
210 | {
211 | Sid = "AWSLogDeliveryAclCheck",
212 | Effect = "Allow",
213 | Principal = {
214 | Service = "delivery.logs.amazonaws.com"
215 | },
216 | Action = [
217 | "s3:GetBucketAcl"
218 | ],
219 | Resource = "${aws_s3_bucket.logging_bucket.arn}",
220 | Condition = {
221 | StringEquals = {
222 | "aws:SourceAccount" = "${data.aws_caller_identity.current.account_id}"
223 | },
224 | ArnLike = {
225 | "aws:SourceArn" = "arn:aws:logs:${data.aws_region.this.name}:${data.aws_caller_identity.current.account_id}:*"
226 | }
227 | }
228 | }
229 | ]
230 | })
231 | depends_on = [aws_s3_bucket_public_access_block.orthweb_loggingbucket_blockpublicaccess]
232 | }
233 |
234 | resource "aws_s3_bucket_logging" "bucket_logging_target_association" {
235 | bucket = aws_s3_bucket.orthbucket.id
236 |
237 | target_bucket = aws_s3_bucket.logging_bucket.id
238 | target_prefix = local.access_log_prefix
239 | }
240 |
--------------------------------------------------------------------------------
/terraform/modules/storage/output.tf:
--------------------------------------------------------------------------------
1 | output "s3_info" {
2 | value = {
3 | bucket_domain_name = aws_s3_bucket.orthbucket.bucket_domain_name
4 | bucket_name = aws_s3_bucket.orthbucket.bucket
5 | logging_bucket_arn = aws_s3_bucket.logging_bucket.arn
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/terraform/modules/storage/variables.tf:
--------------------------------------------------------------------------------
1 | variable "custom_key_arn" {
2 | type = string
3 | }
4 | variable "resource_prefix" {
5 | type = string
6 | description = "Uniq prefix of each resource"
7 | }
8 | variable "is_prod" {
9 | type = bool
10 | description = "whether the resource is in prod environment"
11 | default = false
12 | }
--------------------------------------------------------------------------------
/terraform/out/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/digihunch/orthweb/0700f5635def6fd85366e6f22e08ebcc8f126c2a/terraform/out/.gitkeep
--------------------------------------------------------------------------------
/terraform/output.tf:
--------------------------------------------------------------------------------
1 | output "host_info" {
2 | value = join(", ", [for i in range(length(module.ec2.hosts_info.instance_ids)) : join("", [module.ec2.hosts_info.instance_ids[i], " (Public IP ", module.ec2.hosts_info.public_ips[i], ")"])])
3 | description = "Instance IDs and Public IPs of EC2 instances"
4 | }
5 | output "server_dns" {
6 | value = format("%s %s", join(", ", [for pub_dns in module.ec2.hosts_info.public_dns : pub_dns]), "(HTTPS and DICOM TLS)")
7 | description = "DNS names of EC2 instances"
8 | }
9 | output "db_endpoint" {
10 | value = join(":", [module.database.db_info.db_address, module.database.db_info.db_port])
11 | description = "Database endpiont (port 5432 only accessible privately from EC2 Instance)"
12 | }
13 | output "s3_bucket" {
14 | value = module.storage.s3_info.bucket_domain_name
15 | description = "S3 bucket name for data storage"
16 | }
--------------------------------------------------------------------------------
/terraform/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = ">= 5.84.0"
6 | }
7 | }
8 | required_version = ">= 1.10"
9 | }
10 |
11 | provider "aws" {
12 | default_tags {
13 | tags = {
14 | Environment = var.provider_tags.environment
15 | Owner = var.provider_tags.owner
16 | Application = "Orthanc"
17 | }
18 | }
19 | }
20 |
21 | provider "tls" {}
--------------------------------------------------------------------------------
/terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | ec2_config = {
2 | InstanceType = "t3.medium"
3 | PublicKeyData = null
4 | PublicKeyPath = "~/.ssh/id_rsa.pub"
5 | }
6 | network_config = {
7 | vpc_cidr = "172.17.0.0/16"
8 | dcm_cli_cidrs = ["0.0.0.0/0"]
9 | web_cli_cidrs = ["0.0.0.0/0"]
10 | az_count = 2
11 | public_subnet_pfxlen = 24
12 | private_subnet_pfxlen = 22
13 | interface_endpoints = []
14 | vpn_client_cidr = "" # 192.168.0.0/22
15 | vpn_cert_cn_suffix = "vpn.digihunch.com"
16 | vpn_cert_valid_days = 3650
17 | }
18 | provider_tags = {
19 | environment = "dev"
20 | owner = "admin@digihunch.com"
21 | }
22 | deployment_options = {
23 | ConfigRepo = "https://github.com/digihunchinc/orthanc-config.git"
24 | CWLogRetention = 3
25 | EnableCWLog = false
26 | SiteName = null
27 | InitCommand = "pwd && echo Custom Command && make aws"
28 | }
29 |
--------------------------------------------------------------------------------
/terraform/variables.tf:
--------------------------------------------------------------------------------
1 | variable "ec2_config" {
2 | description = "EC2 instance configuration.\n `InstanceType` must be amd64 Linux Instance; \n `PublicKeyData` is the Public Key (RSA or ED25519) of the administrator; used when deploying from Terraform Cloud; overriden by valid *PublicKeyPath* value; \n `PublicKeyPath` is the local file path to the public key. Used when deploying from an environment with access to the public key on the file system."
3 | type = object({
4 | InstanceType = string
5 | PublicKeyData = string
6 | PublicKeyPath = string
7 | })
8 | default = {
9 | InstanceType = "t3.medium" # must be an EBS-optimized instance type with amd64 CPU architecture.
10 | PublicKeyData = null
11 | PublicKeyPath = "~/.ssh/id_rsa.pub"
12 | }
13 | validation {
14 | condition = (var.ec2_config.PublicKeyData != null && var.ec2_config.PublicKeyData != "") || (var.ec2_config.PublicKeyPath != null && var.ec2_config.PublicKeyPath != "")
15 | error_message = "Must specify one of ec2_config.PublicKeyData and ec2_config.PublicKeyPath."
16 | }
17 | validation {
18 | condition = (
19 | var.ec2_config.PublicKeyPath == null || var.ec2_config.PublicKeyPath == "" ||
20 | can(regex("^(ssh-rsa|ssh-ed25519) [A-Za-z0-9+/=]+( [^ ]+)?$", file(var.ec2_config.PublicKeyPath)))
21 | )
22 | error_message = "If provided, the file must exist and contain a valid RSA (ssh-rsa) or ED25519 (ssh-ed25519) public key in OpenSSH format."
23 | }
24 | validation {
25 | condition = (
26 | var.ec2_config.PublicKeyData == null || var.ec2_config.PublicKeyData == "" || can(regex("^(ssh-rsa|ssh-ed25519) [A-Za-z0-9+/=]+( [^ ]+)?$", var.ec2_config.PublicKeyData))
27 | )
28 | error_message = "If provided, var.ec2_config.PublicKeyData must be in a valid OpenSSH format (starting with 'ssh-rsa' or 'ssh-ed25519')."
29 | }
30 | }
31 |
32 | variable "network_config" {
33 | description = "Networking Configuration\n`vpc_cidr` is the CIDR block for the main VPC.\n`dcm_cli_cidrs` represents DICOM client IP address space.\n`web_cli_cidrs` represents web client IP address space. \n `az_count` sets number of availability zones, to either 2 or 3.\n`public_subnet_pfxlen` sets the size of public subnets.\n`private_subnet_pfxlen`sets the size of private subnets.\n`interface_endpoints` specifies VPC interface endpoints to configure.\n `vpn_client_cidr` set to a non-conflicting CIDR of at least /22 to configure client VPN; otherwise leave as `null` or `\"\"` to skip client VPN configuration.\n`vpn_cert_cn_suffix` is the suffix of the Common Name of VPN certificates.\n`vpn_cert_valid_days` is validity of VPN certificate in days."
34 | type = object({
35 | vpc_cidr = string
36 | dcm_cli_cidrs = list(string)
37 | web_cli_cidrs = list(string)
38 | az_count = number
39 | public_subnet_pfxlen = number
40 | private_subnet_pfxlen = number
41 | interface_endpoints = list(string)
42 | vpn_client_cidr = string
43 | vpn_cert_cn_suffix = string
44 | vpn_cert_valid_days = number
45 | })
46 | default = {
47 | vpc_cidr = "172.17.0.0/16"
48 | dcm_cli_cidrs = ["0.0.0.0/0"]
49 | web_cli_cidrs = ["0.0.0.0/0"]
50 | az_count = 2
51 | public_subnet_pfxlen = 24
52 | private_subnet_pfxlen = 22
53 | interface_endpoints = []
54 | vpn_client_cidr = ""
55 | vpn_cert_cn_suffix = "vpn.digihunch.com"
56 | vpn_cert_valid_days = 3650
57 | # For all management traffic on private route: ["kms","secretsmanager","ec2","ssm","ec2messages","ssmmessages"]
58 | # For secrets and keys on private route: ["kms","secretsmanager"]
59 | # For all management traffic via Internet (lowest cost): []
60 | # View available options: https://docs.aws.amazon.com/vpc/latest/privatelink/aws-services-privatelink-support.html#vpce-view-available-services
61 | }
62 | validation {
63 | condition = can(cidrhost(var.network_config.vpc_cidr, 32))
64 | error_message = "Input variable network_config.vpc_cidr must be a valid IPv4 CIDR."
65 | }
66 | validation {
67 | condition = alltrue([
68 | for cidr in var.network_config.web_cli_cidrs : can(cidrhost(cidr, 0))
69 | ])
70 | error_message = "Input variable network_config.web_cli_cidrs must be a list of valid IPv4 CIDRs."
71 | }
72 | validation {
73 | condition = alltrue([
74 | for cidr in var.network_config.dcm_cli_cidrs : can(cidrhost(cidr, 0))
75 | ])
76 | error_message = "Input variable network_config.dcm_cli_cidrs must be a list of valid IPv4 CIDRs."
77 | }
78 | validation {
79 | condition = var.network_config.az_count >= 1 && var.network_config.az_count <= 3
80 | error_message = "Input variable network_config.az_count must be a numeric value between 1, 2 or 3"
81 | }
82 | validation {
83 | condition = var.network_config.vpn_client_cidr == null || var.network_config.vpn_client_cidr == "" || can(cidrhost(var.network_config.vpn_client_cidr, 32))
84 | error_message = "Input variable network_config.vpn_client_cidr must be either empty or a valid IPv4 CIDR with at least /22 range."
85 | }
86 | }
87 |
88 | variable "provider_tags" {
89 | description = "Tags to apply for every resource by default at provider level."
90 | type = map(string)
91 | default = {
92 | environment = "dev"
93 | owner = "info@digihunch.com"
94 | }
95 | validation {
96 | condition = contains(["prd", "dev", "tst", "stg"], var.provider_tags.environment)
97 | error_message = "The environment code must be one of: prd, dev, tst, or stg"
98 | }
99 | }
100 |
101 | variable "deployment_options" {
102 | description = "Deployment Options for app configuration:\n `ConfigRepo` Git Repository for app configuration.\n `SiteName` The Site URL\n `InitCommand` The command to execute from the config directory\n `EnableCWLog` Enable sending Docker daemon log to Cloud Watch.\n `CWLogRetention` Retention for Log Group"
103 | type = object({
104 | ConfigRepo = string
105 | SiteName = string
106 | InitCommand = string
107 | EnableCWLog = bool
108 | CWLogRetention = number
109 | })
110 | default = {
111 | ConfigRepo = "https://github.com/digihunchinc/orthanc-config.git" # configuration repo to clone.
112 | SiteName = null
113 | InitCommand = "pwd && echo Custom Init Command (e.g. make aws)" # Command to run from config directory.
114 | EnableCWLog = true
115 | CWLogRetention = 3 # CloudWatch Log group Retention days -1 to disable
116 | }
117 | validation {
118 | condition = contains([0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1096, 1827, 2192, 2557, 2922, 3288, 3653], var.deployment_options.CWLogRetention)
119 | error_message = "The value of deployment_options.CWLogRetention must be one of the following integers: -1,0,1,3,5,7,14,30,60,90,120,150,180,365,400,545,731,1096."
120 | }
121 | }
122 |
--------------------------------------------------------------------------------