├── CHANGELOG.md
├── INSTALLING.md
├── README.md
├── aws-extra
├── README.md
├── base-vpc
│ ├── .gitignore
│ ├── GNUmakefile
│ ├── README.md
│ └── terraform
│ │ ├── main.tf
│ │ └── modules
│ │ └── vpc
│ │ ├── endpoints.tf
│ │ ├── flowlogs.tf
│ │ ├── interface.tf
│ │ ├── nat.tf
│ │ ├── routing.tf
│ │ └── subnets.tf
└── minimum-viable-iam
│ ├── README.md
│ ├── ct.rb
│ ├── mvi.tf
│ └── user.tf
├── aws-standard
├── README.md
├── main.tf
└── terraform.tfvars.example
├── docs
├── README.md
├── about-the-ami.md
├── advanced-terraform.md
├── ami-ids.md
├── assets
│ ├── admin-tools-edit.png
│ ├── admin-tools-index.png
│ ├── aws-infra-architecture.png
│ ├── billing-checkboxes.png
│ ├── bootstrap-status.png
│ ├── create-organization.png
│ ├── encrypt-ebs.png
│ ├── enterprise-disabled.png
│ ├── enterprise-enabled.png
│ ├── select-ami.png
│ ├── terraform-enterprise-banner.png
│ └── tfe-data-flow-arch.png
├── configuring.md
├── debug.md
├── disaster-recovery.md
├── encrypt-ami.md
├── legacy.md
├── logs.md
├── managing-tool-versions.md
├── migrating-from-tfe-saas.md
├── network-access.md
├── services
│ ├── archivist.md
│ ├── atlas.md
│ ├── build-pipeline.md
│ └── slugs.md
├── settings.md
├── storing-tfe-state.md
├── support.md
├── test-tf-configuration.md
├── tfe-architecture.md
├── vault-rekey.md
└── vpc.md
└── modules
├── rds
└── rds.tf
├── redis
└── redis.tf
├── tfe-instance
├── aws.tf
├── iam.tf
└── s3.tf
└── tfe-route53
└── route53.tf
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | Terraform Enterprise release versions have the format:
4 |
5 | ```
6 | vYYYYMM-N
7 | ```
8 |
9 | Where:
10 |
11 | * `YYYY` and `MM` are the year and month of the release.
12 | * `N` is increased with each release in a given month, starting with `1`
13 |
14 | ## v201808-1 (Aug 6, 2018)
15 |
16 | APPLICATION LEVEL FEATURES:
17 |
18 | 1. Added a system email when a site admin is removed. This email is sent to all remaining site admins.
19 | 1. Added settings that allow site admins to configure API rate limiting. By default, API rate limiting is enabled and limits clients to making 30 requests per second. However, by visiting the site admin and navigating to general settings, the global rate limit can be raised or disabled entirely. Due to the number of API requests Terraform Enterprise makes with regular use, the rate limit cannot be set lower than the default value of 30 requests per second.
20 | 1. Added a download button for SAML metadata to the admin settings. An XML file is downloaded that can be imported into an identity provider.
21 | 1. Changed link to module status page to always display if any module versions had ingress errors, instead of only when the latest version had ingress errors.
22 | 1. Changed organization dropdown to simplify it. Organization settings are now linked at the top level.
23 | 1. Changed webpage page titles for all pages to be unique.
24 | 1. Added URL validation, to help prevent typos when adding VCS providers or setting up SAML authentication.
25 | 1. Added ability to create state versions for a workspace using the v2 API. See API documentation for details.
26 | 1. Added current state version as a relationship to workspaces in the API. See API documentation for details.
27 | 1. Changed run comments design to make them more prominent.
28 | 1. Changed the workspace "working directory" setting to be available even if no VCS integration is being used.
29 | 1. Added a new site admin setting that allows customization of the support@hashicorp.com email address.
30 | 1. Added confirmation step before regenerating or deleting API tokens, and before deleting SSH keys.
31 | 1. Added "speculative" as a boolean attribute when creating configuration-versions via the API in order to trigger plan only runs. See the API documentation for details.
32 | 1. Removed plan state versions output in the UI when there are no state versions for a run.
33 | 1. Added enhanced error reporting for Sentinel policy checks. A policy check that encountered multiple evaluation errors will now have all errors displayed in the policy check error output.
34 | 1. Added the ability to use the types standard library import with Sentinel policy checks.
35 | 1. Parse summary metadata from `terraform plan` output
36 |
37 | APPLICATION LEVEL BUG FIXES:
38 |
39 | 1. Fixed a sporadically displaying error message when successfully queueing a plan.
40 | 1. Fixed an issue where applies that take longer than 3 hours would fail to upload their state when finished. Applies will still time out after 24 hours.
41 | 1. Fixed issue where the apply or plan logs were blank for the current run.
42 | 1. Fixed an issue that prevented Bitbucket Cloud commits from getting Terraform plan status updates.
43 | 1. Fixed an issue with pull request runs being initiated regardless of the branch the workspace is tracking.
44 | 1. Fixed spacing on "clear filters" icon in Safari.
45 | 1. Fixed an issue that caused malformed GitLab commit URLs.
46 | 1. Fixed message when password update fails.
47 | 1. Fixed an issue with missing SAML settings during migration.
48 | 1. Improved postgresql container startup orchestration
49 | 1. Fixed a bug in the Run state machine when certain transitions fail.
50 | 1. Fixed an issue where policy checked runs would transition before confirmation.
51 | 1. Fixed an issue that could cause Sentinel policy checks to hang when a configuration uses a module where all provider blocks in the module have aliases assigned to them.
52 | 1. Fixed state truncation issue when Terraform dumps an "errored.tfstate" file.
53 |
54 | APPLICATION API BREAKING CHANGES:
55 |
56 | 1. Removed two-factor settings and recovery codes from API calls with an API token. They can now only be accessed with an active session.
57 |
58 | APPLICATION LEVEL SECURITY FIXES:
59 |
60 | 1. Purify module README Markdown before rendering.
61 |
62 | ## v201807-2 (July 20, 2018)
63 |
64 | APPLICATION LEVEL BUG FIXES:
65 |
66 | 1. Fixed an issue where partial SAML settings prevented the application from booting.
67 |
68 | ## v201807-1 (July 16, 2018)
69 |
70 | APPLICATION LEVEL FEATURES:
71 |
72 | 1. Added ability to cancel a run during a Sentinel policy check.
73 | 1. Added ability for user to update their username and email in the UI.
74 | 1. Added ability to rename a workspace.
75 | 1. Added user's email address to account details API.
76 | 1. Added a new API for configuring an installation's Twilio settings, and a migration from previous settings.
77 | 1. Added a link to runs created from pull requests to the pull request.
78 | 1. Added a new first user creation page for new PTFE installations.
79 | 1. Added a new API for configuring an installation's SMTP settings, and a migration from previous settings.
80 | 1. Changed runs page to display Terraform logs by default when they are last event.
81 | 1. Added a new API for configuring an installations SAML settings, and a migration from previous settings.
82 | 1. Added a new site admin user interface, and a link to it in the account drop down.
83 | 1. Added a periodic worker to kill Sentinel jobs that hang or otherwise error out in a way that does not send a correct response back to TFE and mark the run as errored.
84 | 1. Added validation of OAuth client API URL at creation time.
85 | 1. Changed the GitLab.com base API URL to the v4 API for new OAuth clients.
86 | 1. Added current user avatar in site header when it is available.
87 | 1. Removed "Plan successfully queued" notification.
88 | 1. Added confirmation dialog when deleting a Sentinel policy.
89 | 1. Added a debug setting for SAML SSO that allows an admin to see the SAMLResponse XML and processed values when login fails.
90 | 1. Added a copyable signup link when adding new team members.
91 | 1. Added ability to migrate legacy environments to workspaces. When creating a workspace, select the "Import from legacy (Atlas) environment" tab. For more information, see the Upgrade guide.
92 | 1. Added an error when unpacking an unknown file type from a slug.
93 | 1. Added warning log and halt execution of runs when any Terraform state file is detected in the configuration version.
94 | 1. Change regsitry module repo validation to disallow uppercase characters.
95 | 1. Enable bootstrap-less workflow in PTFE
96 | 1. Split the PostgreSQL URI into component parts
97 |
98 | APPLICATION LEVEL BUG FIXES:
99 |
100 | 1. Fixed an issue where duplicate headers were being sent to Bitbucket Server, sometimes causing a 400 response.
101 | 1. Fixed an issue where Bitbucket Server would send malformed webhook settings.
102 | 1. Fixed an issue that brok browser history when API or network errors were encountered.
103 | 1. Fixed an issue when Bitbucket Cloud sends a webhook payload with a changeset with no commits.
104 | 1. Fixed an issue where Bitbucket Server returns an empty body during an unauthorized request.
105 | 1. Fixed an issue where an organization could not be deleted due to dependencies being referenced.
106 | 1. Fixed an issue where a run would attempt to queue up the wrong operation.
107 | 1. Fixed an issue where changing workspace VCS settings then navigating away and back resulted in a broken VCS settings page.
108 | 1. Fixed an issue with publishing registry modules inside of organizations with long names.
109 | 1. Fixed an issue that prevented `X-Forwarded-For` headers from be respected and used for audit log entries.
110 | 1. Fixed an issue where `no_proxy` environment variable was being ignored.
111 | 1. Fixed an issue that prevented editing a variable name immediately after creating it
112 | 1. Fixed an issue where auto-apply combined with a Sentinel policy check would prevent some runs from applying after confirmation.
113 | 1. Fixed an issue that caused git branches ending in `master` to be treated as the default branch.
114 | 1. Fixed an issue that prevented SAML SSO for admins logging in when SMTP was not properly configured.
115 | 1. Fixed an issue that prevented GitLab merge request webhooks from creating plans and reporting back the plans status.
116 | 1. Fixed an issue that caused tage creation events to sometimes incorrectly cause a run to be created.
117 | 1. Fixed an issue that prevented providers from being filtered properly during module registry downloads.
118 | 1. Fixed an issue where renaming organizations with published registry modules did not correctly re-namespace the modules and remain published in the same organization.
119 | 1. Fixed an issue that prevented password managers from working well when updating a user's password.
120 | 1. Fixed an issue that prevented sensitive variables from being edited in the UI.
121 | 1. Fixed an issue where the first run on a workspace when using Gitlab as the VCS provider displayed a malformed commit url.
122 | 1. Fixed an issue where run that take longer than 3 hours will no longer fail to upload their state when finished applying. Applies will still time out after 24 hours.
123 | 1. Fixed an issue that prevented registry modules with plus signs in their version number from being accessed.
124 |
125 | ## v201806-2 (June 22, 2018)
126 |
127 | APPLICATION LEVEL FEATURES:
128 |
129 | - Add GCS support for artifact storage. (Replicated only)
130 | - Allow for tuning of max usable memory per run. (Replicated only)
131 |
132 | APPLICATION LEVEL BUG FIXES:
133 |
134 | - Error when a state file is found in a VCS repository.
135 | - Fix occasional proxy config failure. (Replicated only)
136 | - Use `X-Forwarded-For` in audit log entries, if provided. (Replicated only)
137 |
138 | ## v201806-1 (June 7th, 2018)
139 |
140 | APPLICATION LEVEL FEATURES:
141 |
142 | - Added links on state version page to run page and VCS commit.
143 | - Added the workspace repo’s webhook URL to the VCS settings page.
144 | - Added ability to submit run comments using the keyboard: just hit Command-Enter or Ctrl-Enter.
145 | - Added appropriate error message when an invalid JSON request body is passed
146 | - Added ability to disabling auto-queueing of runs when creating configuration versions via the API.
147 | - Added pagination support to configuration versions API endpoint.
148 | - Added download button to the modules configuration designer.
149 | - Updated privacy policy.
150 | - Removed SAML "Enable Team Membership Management" setting. Instead of enabling or disabling team membership management within TFE, team membership management is enabled or disabled by adding or removing the "Team Attribute Name" attribute in the identity provider.
151 |
152 | APPLICATION LEVEL BUG FIXES:
153 |
154 | - Fixed bug caused by starting the application while database migrations are running, resulting in inconsistent behavior.
155 | - Fixed Bitbucket Cloud build status update failure for workspaces with long names.
156 | - Fixed bug where spaces were added when copying VCS client URLs and tokens.
157 | - Fixed display of newlines in run comments.
158 | - Fixed display issues when VCS branch information is missing.
159 | - Fixed an issue where runs would not show up for Bitbucket Server if the default branch was not "master".
160 | - Fixed an issue that allowed a run to be applied before Sentinel policy checks have completed.
161 | - Fixed an error when invalid Terraform versions are specified during workspace creation.
162 | - Fixed errors generated when passing invalid filter parameters to the runs API.
163 | - Fixed an issue with Bitbucket Server where non-default branch Runs did not appear.
164 | - Fixed bug preventing users from generating personal authentication tokens via the UI.
165 | - Fixed issue that caused some newly created workspaces to select a disabled Terraform verion as the latest version.
166 | - Fixed issues with Terraform modules using semver metadata in their version number.
167 | - Fixed an issue where duplicate headers were being sent to Bitbucket Server, sometimes causing a 400 response.
168 | - Fixed an issue that caused Bitbucket Server to send malformed webhook settings.
169 |
170 | APPLICATION API BREAKING CHANGES:
171 |
172 | - Updated API endpoints to return external id as the user's id, changed from username.
173 |
174 | ## v201805-1 (May 2nd, 2018)
175 |
176 | APPLICATION LEVEL FEATURES:
177 |
178 | 1. Added API rate request limiting. Attempting to make more than 30 requests/second will result in throttling.
179 | 1. Added Terraform 0.11.2, 0.11.3, 0.11.4, 0.11.5, 0.11.6, and 0.11.7 to available version list.
180 | 1. Added VCS repo autocompletion when adding modules to the registry.
181 | 1. Added `message` as an attribute when creating a run through the API.
182 | 1. Added ability for modules in the registry to include git submodules.
183 | 1. Added compression and encryption for Terraform plan and apply logs at rest.
184 | 1. Added resiliency for temporary network failures when accesing configuration, state, and log files.
185 | 1. Added the ability to enable and disable SMTP integration.
186 | 1. Added a link to Support docs in the footer.
187 | 1. Added automatic unlinking of workspaces connected to VCS repositories when authorization repeatedly fails for the VCS repo.
188 | 1. Added copy to organization API token page to explain permissions for different token types.
189 | 1. Added one-click copy to clipboard for VCS provider callback URLs.
190 | 1. Added pagination controls to module registry list pages.
191 | 1. Added the repo identifier to the run page.
192 | 1. Changed module registry search to only returns results from the current organization.
193 | 1. Changed 2FA SMS issue name from `Atlas by HashiCorp` to `Terraform Enterprise`.
194 | 1. Changed 2FA application issuer name from `Atlas by HashiCorp` to `Terraform Enterprise`.
195 | 1. Changed redirect after deleting a module versions to redirect to the module page instead of the index.
196 |
197 | APPLICATION LEVEL BUG FIXES:
198 |
199 | 1. Fixed issue that allowed plan-only destroy runs to be queued when the `CONFIRM_DESTROY` variable was not set.
200 | 1. Fixed issue copying two-factor auth recovery codes.
201 | 1. Fixed issue where returning to edit variables page after update displayed the old values.
202 | 1. Fixed issue creating a workspace with a Bitbucket Server repo that has no preexisting webhooks.
203 | 1. Fixed issue creating a workspace with no VCS repo in UI following validation error.
204 | 1. Fixed issue that allowed runs to be queued from the UI before the configuration version had finished uploading.
205 | 1. Fixed issue that caused unneeded polling for plan logs by stopping polling once the logs are complete.
206 | 1. Fixed issue that caused unwanted scrollbars to show up in run and state version lists.
207 | 1. Fixed issue that prevented auto-refresh on the workspace page while waiting for first run.
208 | 1. Fixed issue that prevented enabling the requirement of 2FA for an organization.
209 | 1. Fixed issue that sometimes caused the list of organizations to be out of date in the UI.
210 | 1. Fixed issue when editing some variables in the UI.
211 | 1. Fixed issue with module designer code generation when using modules with no required variables.
212 | 1. Fixed issue that prevented some build status updates to Bitbucket Cloud to fail.
213 | 1. Fixed some cases where editing a workspace would error.
214 | 1. Fixed some outdated documentation links.
215 | 1. Removed error message that displayed when a user visited the login page when they're already signed in.
216 |
217 | APPLICATION API BREAKING CHANGES:
218 |
219 | APPLICATION LEVEL SECURITY FIXES:
220 |
221 | ## v201804-3 (April 17, 2018)
222 |
223 | APPLICATION LEVEL BUG FIXES:
224 |
225 | - Do not configure statsd; works around a possible Rails bug when starting Atlas.
226 | - Remove race condition when starting Vault. (installer only)
227 | - More sane timeouts when unable to download slugs.
228 |
229 | ## v201804-2 (April 10, 2018)
230 |
231 | APPLICATION LEVEL BUG FIXES:
232 |
233 | * Fix terraform being able to properly authenticate to the Module Registry running in the cluster (installer only)
234 |
235 | ## v201804-1 (April 5, 2018)
236 |
237 | APPLICATION LEVEL FEATURES:
238 |
239 | * GitLab is now supported as a source for Terraform modules.
240 | * When SAML SSO is enabled, the user session must have an active session to make API commands. The API token timeout may be set site-wide when SSO is enabled.
241 | * Remove the AuthContextClassRef element from the SAML SSO request so the identity provider's default AuthenticationMethod setting is used.
242 | * The username of a user can be specified for SAML SSO.
243 | * Add the ability to connect to SMTP without authentication.
244 | * We now enforce that ENV variable names conform to typical `bash` rules (beginning with a letter or underscore, and containing only alphanumerics and underscores) and that Terraform variable names only contain alphanumerics, underscores, and hyphens.
245 | * Allow ecdsa (`EC`) and ed25519 (`OPENSSH`) private keys.
246 | * Add an API and UI to allow organizations and users to mange their 2FA settings.
247 | * Revoking a VCS connection or deleting a VCS client now requires confirmation.
248 | * When a workspace becomes unlinked from its VCS repository, an email is sent to the organization owners.
249 | * Add ability to cancel in-progress plans and applies.
250 | * Workspaces can be created without a VCS connection in the UI.
251 | * A workspace's VCS connection can be edited through the UI and API.
252 | * Configuration error messages are exposed in the UI and API.
253 | * Rename workspace "Integrations" page to "Version Control" in UI.
254 | * Rename organization "OAuth Configuration" page to "Version Control" in UI.
255 | * Add Azure blob storage support.
256 |
257 | APPLICATION LEVEL BUG FIXES:
258 |
259 | * Data cleanup is more reliable via HTTP retries and better exception handling.
260 | * Fixes issue with registry modules that require deep cloning of git submodules.
261 | * Fixed an internal server error that prevented some workspaces from being created with specific repsitories.
262 | * Fixed inline error messages for form fields with multi word labels.
263 | * Improved error messages in the API and UI when adding an OAuth client.
264 | * Fixed UI layout widths for widescreen.
265 | * The email address entered when a user forgot their password is no longer case sensitive.
266 | * We now redirect back to the OAuth page with error message when there is a failure to connect the client.
267 | * The UI now clarifies that Bitbucket Server VCS connections require a private SSH key.
268 | * Fixed missing "Override & Continue" button for Sentinel soft-mandatory policy check failures.
269 | * Very long lines in statefile diffs are now wrapped in the UI.
270 | * Flash notices in the UI now auto hide after 3 seconds, instead of 10.
271 | * Users are redirected to the v2 page they were trying to access after login.
272 | * Show an error message in the UI instead of a 500 error when an exception occurs while connecting an org oauth client to a VCS provider.
273 | * Fix a data race that cause some runs to get stuck when using the "Run this plan now" feature.
274 |
275 | APPLICATION API BREAKING CHANGES:
276 |
277 | * The Registry Modules creation API has changed. Instead of supplying `ingress-trigger-attributes`, supply a `vcs-repo` object. Additionally, instead of supplying a `linkable-repo-id`, supply a `vcs-repo.identifier`
278 | * The `linkable-repos` resource has been renamed to `authorized-repos`, so please use that phrase in API requests.
279 | * Requests which contain invalid `include` parameters will now return 400 as required by the JSON API spec.
280 |
281 | APPLICATION LEVEL SECURITY FIXES:
282 |
283 | * Upgrade loofah gem to 2.2.1 to address CVE-2018-8048.
284 | * Upgrade rails-html-sanitizer to 1.0.3 to address CVE-2018-374.
285 |
286 | MACHINE IMAGE FIXES:
287 |
288 | * Adjust for EBS volumes have unexpected tags
289 |
290 | ## v201802-3 (Feb 28, 2018)
291 |
292 | APPLICATION LEVEL FEATURES:
293 |
294 | * Enable the Module Registry for everyone, but disable unsupported VCS providers (GitLab, Bitbucket Cloud).
295 | * Allow site admin membership to be managed through SAML and make the team name configurable.
296 | * The SAML email address setting was removed in favor of always using `urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress`.
297 | * The SAML `AuthnContextClassRef` is hardcoded to `urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport` instead of being sent as a blank value.
298 | * Improved SAML error handling and reporting.
299 | * Adds support for configuring the "including submodules on clone" setting when creating a workspace. Previously this had to be configured after creating a workspace, on the settings page.
300 | * Move workspace SSH key setting from integrations page to settings page in UI.
301 | * Add top bar with controls to plan and apply log viewer when in full screen mode.
302 | * Improvements to VCS respository selection UI when creating a workspace.
303 | * Improvements to error handling in the UI when creating a workspace.
304 | * Remove VCS root path setting from workspaces in UI and API.
305 | * API responses with a 422 status have a new format with better error reporting.
306 | * Remove unused attributes from run API responses: `auto-apply`, `error-text`, `metadata`, `terraform-version`, `input-state-version`, `workspace-run-alerts`.
307 | * Remove the `compound-workspace` API endpoints in favor of the `workspace` API endpoints.
308 | * API responses that contain `may-` keys will have those keys moved into a new part of the response: `actions`.
309 | * Organizations now have names in place of usernames. API responses will no longer serialize a `username` key but will instead serialize a `name`.
310 |
311 | APPLICATION LEVEL BUG FIXES:
312 |
313 | * Allow workspace admins to read and update which SSH key is associated with a workspace. Previously only workspace owners could do this.
314 | * Reject environment variables that contain newlines.
315 | * Fix a bug that caused the VCS repo associated with a workspace displayed incorrectly for new workspaces without runs and workspaces whose current run used to a promoted configuration version.
316 | * Fix a bug that resulted in plan and apply log output to sometimes be truncated in the UI.
317 | * Fix a bug that prevented some environments and workspaces from being deleted.
318 | * A few small fixes to Sentinel error reporting during creation and policy check in the UI.
319 | * A few small fixes to UI when managing SSH keys.
320 | * Missing translation added for configuration versions uploaded via the API.
321 |
322 | APPLICATION LEVEL SECURITY FIXES:
323 |
324 | * Upgrade ruby-saml gem to 1.7.0 to address CVE-2017-11428.
325 | * Upgrade sinatra gem to 1.4.8 to address CVE-2018-7212.
326 |
327 | ## v201802-2 (Feb 15, 2018)
328 |
329 | APPLICATION LEVEL FEATURES:
330 | * Ensure the Archivist storage service sets the `x-amz-server-side-encryption` and `x-amz-server-side-encryption-aws-kms-key-id` HTTP headers on all `PutObject` calls to S3, when a KMS key is configured.
331 | * Add new `no_proxy` variable to support hosts to exclude from being proxied via `proxy_url`, if configured.
332 |
333 | APPLICATION LEVEL BUG FIXES:
334 | * Fix a bug related to audit logging that prevented webhooks from being handled properly.
335 |
336 | CUSTOMER NOTES:
337 | * Added documentation to `aws-standard/README.md` describing character constraints on user-provided values, such as `db_password`.
338 |
339 | ## v201802-1 (Feb 6, 2018)
340 |
341 | APPLICATION LEVEL FEATURES:
342 | * Adds support to publish modules from Github and Github Enterprise to the Module Registry
343 | * Adds the ability to search by keyword and filter the workspace list
344 | * Improves UI response to permissions by hiding/disabling forms/pages based on user’s access level.
345 | * Adds audit events to the logs to capture the user identity, operation performed and target resource.
346 | * Configurations can now be promoted across workspace via the API
347 | * Improves SAML team mapping to use custom names to perform matching
348 |
349 | APPLICATION LEVEL BUG FIXES:
350 | * Fixes the provider and module name parsing when ‘-’ is used in publishing of modules to the module registry.
351 | * Fixes broken redirects to external site on bad requests.
352 | * Fixes bug with triggering runs from Bitbucket Server when a module and workspace are both linked to the same source.
353 | * Fixes rendering issues in IE11
354 | * Fixes rendering issue with scroll bars on some input fields in Chrome 40.0.2214.115+ on Windows
355 |
356 | ## v201801-2 (Jan 18, 2018)
357 |
358 | APPLICATION LEVEL BUG FIXES:
359 | * Increase memory allocation for Terraform Module Registry to prevent forced termination when processing large modules.
360 |
361 | ## v201801-1 (Jan 12, 2018)
362 |
363 | APPLICATION LEVEL BUG FIXES:
364 | * Fix a bug in the Terraform Module Registry where multiple jobs trying to ingress the same version of the same module concurrently errored and would not be retried
365 |
366 | MACHINE IMAGE BUG FIXES:
367 | * Includes OS-level security updates to address Meltdown (see https://usn.ubuntu.com/usn/usn-3522-1/)
368 |
369 | ## v201712-2 (Dec 18, 2017)
370 |
371 | APPLICATION LEVEL BUG FIXES:
372 | * Clarify the purpose of organization API tokens
373 |
374 | MACHINE IMAGE BUG FIXES:
375 | * Fix postgres compatibility with the private module registry
376 |
377 |
378 | ## v201712-1 (Dec 7, 2017)
379 |
380 | APPLICATION LEVEL FEATURES:
381 | * Includes new Terraform Enterprise interface featuring Workspaces (see https://www.hashicorp.com/blog/hashicorp-terraform-enterprise-beta for details)
382 |
383 | APPLICATION LEVEL BUG FIXES:
384 | * Properly handle repositories with many webhooks
385 | * Screens with many elements now use pages to display all data
386 |
387 | ## v201711-1 (Nov 1, 2017)
388 |
389 | APPLICATION LEVEL BUG FIXES:
390 | * The Bitbucket Server integration no longer sends empty JSON payloads with get requests
391 | * Force Canceled runs will create a run event so that they no longer appear to be planning in the UI
392 |
393 | MACHINE IMAGE BUG FIXES:
394 | * Increase the capacity of the UI to prevent it being unavailable due
395 | to starvation.
396 |
397 | ## v201709-3 (Sep 29, 2017)
398 |
399 | MACHINE IMAGE BUG FIXES:
400 | * Properly write the vault root key envvar file.
401 |
402 | ## v201709-2 (Sep 28, 2017)
403 |
404 | MACHINE IMAGE BUG FIXES:
405 | * cloud.cfg no longer conflicts with the cloud-init package.
406 | * Restoring from an older, timestamp based backup no longer hangs when
407 | there are a large number of backups.
408 |
409 | ## v201709-1 (Sep 13, 2017)
410 |
411 | APPLICATION LEVEL FEATURES:
412 | * Sends a flag to terraform workers on all new runs to enable filesystem
413 | preservation between plan/apply.
414 | * Add support for Terraform 0.10.
415 |
416 | APPLICATION LEVEL BUG FIXES:
417 | * State uploads now validate lineage values.
418 | * Fixes a potential race during Terraform state creation.
419 | * Fixes a subtle bug when loading Terraform states which were created prior
420 | to having the MD5 checksum in a database column.
421 | * Gradually migrate all Terraform states out of the Postgres DB and
422 | into our storage service, Archivist.
423 |
424 | MACHINE IMAGE FEATURES:
425 |
426 | * Add ability to prompt for setup data and store it inside Vault rather than
427 | store it in S3+KMS (activated via new `local_setup` Terraform option).
428 |
429 | TERRAFORM CONFIG FEATURES:
430 |
431 | * Add `local_setup` variable to tell TFE to prompt for setup data on first
432 | boot and store it within Vault rather than rely on S3+KMS for setup data.
433 | * Make `key_name` variable optional, allowing for deployments without SSH
434 | access.
435 |
436 | ## v201708-2 (Aug 16, 2017)
437 |
438 | MACHINE IMAGE BUG FIXES:
439 |
440 | * Correct out of memory condition with various internal services that prevent
441 | proper operation.
442 |
443 | ## v201708-1 (Aug 8, 2017)
444 |
445 | APPLICATION LEVEL BUG FIXES:
446 |
447 | * Fixes a bug where TF slugs would only get encrypted during terraform push.
448 | * Fixes state parser triggering for states stored in external storage (Archivist).
449 | * Fixes a bug where encryption contexts could be overwritten.
450 | * Send commit status updates to the GitHub VCS provider when plan is "running" (cosmetic)
451 |
452 | MACHINE IMAGE BUG FIXES:
453 |
454 | * Manage upgrading from v201706-4 and earlier properly.
455 |
456 | ## v201707-2 (July 26, 2017)
457 |
458 | APPLICATION LEVEL BUG FIXES:
459 |
460 | * Send commit status updates to VCS providers while waiting for MFA input
461 |
462 | ## v201707-1 (July 18, 2017)
463 |
464 | APPLICATION LEVEL FEATURES:
465 |
466 | * Add support for releases up to Terraform 0.9.9.
467 |
468 | APPLICATION LEVEL BUG FIXES:
469 |
470 | * Displays an error message if the incorrect MFA code is entered to confirm a Run.
471 | * Address issue with large recipient groups in new admin notification emails.
472 | * Fixes a 500 error on the Events page for some older accounts.
473 | * Fix provider names in new environment form.
474 | * Update wording in the Event Log for version control linking and unlinking.
475 | * Fetch MFA credential from private registries when enabled.
476 | * Fix ability to cancel Plans, Applies, and Runs
477 |
478 | MACHINE IMAGE FEATURES:
479 |
480 | * Add ability to use local redis.
481 | * This adds a new dependency on EBS to store the redis data.
482 |
483 | TERRAFORM CONFIG FEATURES:
484 |
485 | * Add `local_redis` variable to configure cluster to use redis locally, eliminating
486 | a dependency on ElasticCache.
487 | * Add `ebs_size` variable to configure size of EBS volumes to create to store local
488 | redis data.
489 | * Add `ebs_redundancy` variable to number of EBS volumes to mirror together for
490 | redundancy in storing redis data.
491 | * Add `iam_role` as an output to allow for additional changes to be applied to
492 | the IAM role used by the cluster instance.
493 |
494 |
495 | ## v201706-4 (June 26, 2017)
496 |
497 | APPLICATION LEVEL FEATURES:
498 |
499 | * Add support for releases up to Terraform 0.9.8.
500 |
501 | APPLICATION LEVEL BUG FIXES:
502 |
503 | * VCS: Send commit status updates after every `terraform plan` that has a
504 | commit.
505 | * Fix admin page that displays Terraform Runs.
506 | * Remove application identifying HTTP headers.
507 |
508 | MACHINE IMAGE BUG FIXES:
509 |
510 | * Fix `rails-console` to be more usable and provide a command prompt.
511 | * Fix DNS servers exposed to builds to use DNS servers that are configured
512 | for the instance.
513 | * Redact sensitive information from error output generated while talking to
514 | VCS providers.
515 | * Refresh tokens for Bitbucket and GitLab properly.
516 | * Update build status on Bitbucket Cloud PRs.
517 |
518 | EXTRAS CHANGES:
519 |
520 | * Parametirez s3 endpoint region used for setup of S3 <=> VPC peering.
521 |
522 |
523 | ## v201706-3 (June 7, 2017)
524 |
525 | MACHINE IMAGE BUG FIXES:
526 |
527 | * Exclude all cluster local traffic from using the outbound proxy.
528 |
529 | ## v201706-2 (June 5, 2017)
530 |
531 | APPLICATION LEVEL BUG FIXES:
532 |
533 | * Clear all caches on boot to prevent old records from being used.
534 |
535 | MACHINE IMAGE FEATURES:
536 |
537 | * Added `clear-cache` to clear all caches used by the cluster.
538 | * Added `rails-console` to provide swift access to the Ruby on Rails
539 | console, used for lowlevel application debugging and inspection.
540 |
541 | ## v201706-1 (June 1, 2017)
542 |
543 | APPLICATION LEVEL FEATURES:
544 |
545 | * Improve page load times.
546 | * Add support for releases up to Terraform 0.9.6.
547 | * Make Terraform the default page after logging in.
548 |
549 | APPLICATION LEVEL BUG FIXES:
550 |
551 | * Bitbucket Cloud stability improvements.
552 | * GitLab stability improvements.
553 | * Address a regression for Terraform Runs using Terraform 0.9.x that
554 | caused Plans run on non-default branches (e.g. from Pull Requests)
555 | to push state and possibly conflict with default branch Terraform Runs.
556 | * Ignore any state included by a `terraform push` and always use state
557 | within Terraform Enterprise.
558 | * Prevent `terraform init` from accidentially asking for any input.
559 | * Allow sensitive variable to be updated.
560 | * Fix "Settings" link in some cases.
561 |
562 | MACHINE IMAGE FEATURES:
563 |
564 | * Automatically scale the number of total concurrent builds up based on
565 | the amount of memory available in the instance.
566 | * Add ability to configure instance to send all outbound HTTP and HTTPS
567 | traffic through a user defined proxy server.
568 |
569 | TERRAFORM CONFIG FEATURES:
570 |
571 | * Add `proxy_url` variable to configure outbound HTTP/HTTPS proxy.
572 |
573 | DOCUMENTATION CHANGES:
574 |
575 | * Remove deprecated references to Consul environments.
576 | * Include [Encrypted AMI](docs/encrypt-ami.md) for information on using
577 | encrypted AMIs/EBS.
578 | * Add [`network-access`](docs/network-access.md) with information about
579 | the network access required by TFE.
580 | * Add [`managing-tool-versions`](docs/managing-tool-versions.md) to document
581 | usage of the `/admin/tools` control panel.
582 |
583 | ## v201705-2 (May 23, 2017)
584 |
585 | APPLICATION LEVEL CHANGES:
586 |
587 | * Prevent sensitive variables from being sent in the clear over the API.
588 | * Improve setup UI.
589 | * Add support for releases up to Terraform 0.9.5.
590 | * Fix bug that prevented packer runs from having their logs automatically
591 | display.
592 |
593 | MACHINE IMAGE CHANGES:
594 |
595 | * Fix archivist being misreported as failing checks.
596 | * Add ability to add SSL certificates to be trusted into /etc/ssl/certs.
597 | * Add awscli to build context for use with local\_exec.
598 | * Infer the s3 endpoint from the region to support different AWS partitions.
599 | * Add ability to run custom shell code on the first boot.
600 |
601 | TERRAFORM CONFIG CHANGES:
602 |
603 | * Add `startup_script` to allow custom shell code to be injected and run on
604 | first boot.
605 | * Allow for customer managed security groups.
606 |
607 |
608 | DOCUMENTATION CHANGES:
609 |
610 | * Include [documentation](docs/support.md) on sending support information via the
611 | `hashicorp-support` tool.
612 |
613 | ## v201705-1 (May 12, 2017)
614 |
615 | APPLICATION LEVEL CHANGES:
616 |
617 | * Improve UI performance by reducing how often and when pages poll for new
618 | results.
619 | * Add support for releases up to Terraform 0.9.4.
620 | * Add support for releases up to Packer 0.12.3.
621 | * Fix the From address in email sent by the system.
622 | * Allow amazon-ebssurrogate builder in Packer.
623 | * Handle sensitive variables from `packer push`.
624 | * Improve speed of retrieving and uploading artifacts over HTTP.
625 | * Added integrations with GitLab and BitBucket Cloud.
626 | * Removed Consul and Applications functionality.
627 |
628 | MACHINE IMAGE CHANGES:
629 |
630 | * Fix an issue preventing the `hashicorp-support` command from successfully
631 | generating a diagnostic bundle.
632 | * Fix ability to handle more complex database paswords.
633 | * More explicit region utilization in S3 access to support S3 in Govcloud.
634 |
635 | TERRAFORM CONFIG CHANGES:
636 |
637 | * Make `region` a required input variable to prevent any confusion from the
638 | default value being set to an unexpected value. Customers who were not
639 | already setting this can populate it with the former default: `"us-west-2"`
640 | * Add ability to specify the aws partition to support govcloud.
641 | * Reorganize supportive modules into a separate `aws-extra` directory
642 | * Remove a stale output being referenced in `vpc-base`
643 | * Work around a Terraform bug that prevented the outputs of `vpc-base` from
644 | being used as inputs for data subnets.
645 | * Explicitly specify the IAM policy of the KMS key when creating it.
646 | * Add an Alias to the created KMS key so it is more easily identifiable AWS
647 | console.
648 | * Add ability to start the ELB in internal mode.
649 | * Specify KMS key policy to allow for utilization of the key explicitly by
650 | the TFE instance role.
651 | * Add KMS alias for key that is utilized for better inventory tracking.
652 |
653 |
654 | ## v201704-3
655 |
656 | APPLICATION LEVEL CHANGES:
657 |
658 | (none)
659 |
660 | MACHINE IMAGE CHANGES:
661 |
662 | * Properly handle database passwords with non-alphanumeric characters
663 | * Remove nginx's `client_max_body_size` limit so users can upload files larger than 1MB
664 |
665 | TERRAFORM CONFIG CHANGES:
666 |
667 | * Fix var reference issues when specifying `kms_key_id` as an input
668 | * Add explicit IAM policy to KMS key when Terraform manages it
669 | * Add explicit KMS Key Alias for more easily referencing the KMS key in the AWS Web Console
670 |
671 | ## v201704-2
672 |
673 | APPLICATION LEVEL CHANGES:
674 |
675 | (none)
676 |
677 | MACHINE IMAGE CHANGES:
678 |
679 | * Add `hashicorp-support` script to create an encrypted bundle of diagnostic information for passing to HashiCorp Support
680 | * Switch main SSH username to `tfe-admin` from default `ubuntu`
681 | * Allow AMI to be used in downstream Packer builds without triggering bootstrap behavior
682 |
683 | TERRAFORM CONFIG CHANGES:
684 |
685 | * Allow `kms_key_id` to be optionally specified as input
686 | * Remove unused `az` variable
687 |
688 | ## v201704-1
689 |
690 | APPLICATION LEVEL CHANGES:
691 |
692 | (none)
693 |
694 | MACHINE IMAGE CHANGES:
695 |
696 | * Disable Consul remote exec
697 | * Install git inside build worker Docker container to facilitate terraform module fetching
698 | * Don't redirect traffic incoming from local build workers
699 |
700 | TERRAFORM CONFIG CHANGES:
701 |
702 | * Prevent extraneous diffs after RDS database creation
703 |
704 | ## v201703-2
705 |
706 | APPLICATION LEVEL CHANGES:
707 |
708 | (none)
709 |
710 | MACHINE IMAGE CHANGES:
711 |
712 | * Prevent race condition by waiting until Consul is running before continuing boot
713 | * Ensure that Vault is unsealed when instance reboots
714 |
715 | ## v201703-1
716 |
717 | * Initial release!
718 |
--------------------------------------------------------------------------------
/INSTALLING.md:
--------------------------------------------------------------------------------
1 | # Installing Terraform Enterprise
2 |
3 | Terraform Enterprise currently targets Amazon Web Services environments. Support for additional infrastructure providers is planned.
4 |
5 | ## Amazon Web Services (AWS)
6 |
7 | 
8 |
9 | In AWS, a Terraform Enterprise install consists of:
10 |
11 | * Compute Tier
12 | * Elastic Load Balancer (ELB)
13 | * Single EC2 instance launched as part of an AutoScaling Group (ASG)
14 | * Data Tier
15 | * RDS PostgreSQL for primary application storage
16 | * An S3 Bucket for object storage
17 |
18 | ### Primary Installation Config Files
19 |
20 | The [`aws-standard`](aws-standard/) directory contains the primary Terraform Enterprise installation config files. Its [`README`](aws-standard/README.md) is the next step for learning about how to install Terraform Enterprise.
21 |
22 | ### Tertiary Terraform Configs
23 |
24 | The [`aws-extra`](aws-extra/) directory contains supplementary configs. Its [`README`](aws-extra/README.md) contains a list of descriptions of the configs available.
25 |
26 | ## Documentation
27 |
28 | Further documentation about various aspects of the Terraform Enterprise install can be found in the [`docs`](docs/) subdir.
29 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Terraform Enterprise Modules
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | These are Terraform modules for setting up Terraform Enterprise inside a customer's infrastructure (aka Private Terraform Enterprise).
14 |
15 | [](https://www.hashicorp.com/products/terraform/)
16 |
17 | ## Interested in Terraform Enterprise?
18 |
19 | Visit the [product page](https://www.hashicorp.com/products/terraform/) for more information.
20 |
21 | ## Installing Terraform Enterprise?
22 |
23 | See [INSTALLING.md](INSTALLING.md)
24 |
--------------------------------------------------------------------------------
/aws-extra/README.md:
--------------------------------------------------------------------------------
1 | # Tertiary AWS Terraform Configs
2 |
3 | Each subdirectory contains a set of Terraform Configuration meant to support the primary Terraform Enterprise installation configs present in [`aws-standard`](../aws-standard).
4 |
5 | * [`base-vpc`](base-vpc/) - Configuration for creating a basic VPC and subnets that meet [the documented requirements for TFE installation](../aws-standard/README.md#preflight).
6 | * [`minimum-viable-iam`](minimum-viable-iam/) - Configuration for creating an AWS user with a minimum access policy required to perform a Terraform Enterprise installation.
7 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/.gitignore:
--------------------------------------------------------------------------------
1 | ## General operating system artefacts
2 | .DS_Store
3 |
4 | ## Vagrant artefacts
5 | .vagrant/
6 |
7 | ## Terraform artefacts
8 | .terraform/
9 | *.tfstate*
10 |
11 | ## IntelliJ
12 | .idea/
13 | *.iml
14 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/GNUmakefile:
--------------------------------------------------------------------------------
1 | default: help
2 |
3 | ATLAS_ENVIRONMENT = "hashicorp-engservices/demo-vpc"
4 |
5 | .PHONY: config
6 | config: ## Configure remote state for Atlas - set TF_OPTS for additional flags
7 | terraform remote config \
8 | -backend="atlas" \
9 | -backend-config="name=$(ATLAS_ENVIRONMENT)" \
10 | $(TF_OPTS)
11 |
12 | .PHONY: plan
13 | plan: ## Run a Terraform plan operation - set TF_OPTS for additional flags
14 | terraform get $(TF_OPTS) terraform/
15 | terraform plan $(TF_OPTS) terraform/
16 |
17 | .PHONY: apply
18 | apply: ## Run a Terraform apply operation - set TF_OPTS for additional flags
19 | terraform apply $(TF_OPTS) terraform/
20 |
21 | .PHONY: refresh
22 | refresh: ## Run a Terraform refresh operation - set TF_OPTS for additional flags
23 | terraform refresh $(TF_OPTS) terraform/
24 |
25 | .PHONY: destroy
26 | destroy: ## Run a Terraform destroy operation - set TF_OPTS for additional flags
27 | @terraform destroy $(TF_OPTS) terraform/
28 |
29 | .PHONY: output
30 | output: ## Show Terraform outputs - set TF_OPTS for individual fields
31 | @terraform output $(TF_OPTS)
32 |
33 | .PHONY: graph
34 | graph: ## Draw a graph of the infrastructure dependencies
35 | @terraform graph terraform/ | dot -Tpdf > graph.pdf
36 |
37 | .PHONY: help
38 | help:
39 | @echo "Valid targets:"
40 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
41 |
42 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/README.md:
--------------------------------------------------------------------------------
1 | # VPC and Networking - TFE
2 |
3 | ## Description
4 |
5 | This repository contains Terraform code under the `terraform/` directory for
6 | configuring a VPC in AWS, complete with:
7 |
8 | - Private and public subnets distributed across availability zones accessible
9 | by the account in use.
10 |
11 | - Internet Gateway and associated routing configuration to allow internet
12 | access from instances in public subnets.
13 |
14 | - EIPs and NAT Gateways to allow internet access from instances in private
15 | subnets.
16 |
17 | - A VPC Endpoint for accessing S3 without leaving the confines of the VPC.
18 |
19 | ## Usage
20 |
21 | The `GNUmakefile` in the repository contains targets for most Terraform
22 | operations, and keeps state and variables in the root of the repository. If
23 | running commands manually, be sure to include the path to the configuration
24 | root as `terraform/`.
25 |
26 | Run `make` or `make help` to see a list of valid targets.
27 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/main.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 |
3 | variable "vpc_name" {
4 | type = "string"
5 | }
6 |
7 | variable "cidr_block" {
8 | type = "string"
9 | default = "172.23.0.0/16"
10 | }
11 |
12 | provider "aws" {
13 | region = "${var.region}"
14 | }
15 |
16 | data "aws_availability_zones" "zones" {}
17 |
18 | module "vpc" {
19 | source = "modules/vpc"
20 |
21 | vpc_name = "${var.vpc_name}"
22 |
23 | cidr_block = "${var.cidr_block}"
24 |
25 | private_subnets = [
26 | "${cidrsubnet(var.cidr_block, 3, 5)}",
27 | "${cidrsubnet(var.cidr_block, 3, 6)}",
28 | ]
29 |
30 | public_subnets = [
31 | "${cidrsubnet(var.cidr_block, 5, 0)}",
32 | ]
33 |
34 | availability_zones = ["${data.aws_availability_zones.zones.names}"]
35 |
36 | region = "${var.region}"
37 | }
38 |
39 | output "private_subnet_ids" {
40 | value = ["${module.vpc.private_subnets}"]
41 | }
42 |
43 | output "public_subnet_ids" {
44 | value = ["${module.vpc.public_subnets}"]
45 | }
46 |
47 | output "private_availability_zones" {
48 | value = ["${module.vpc.private_availability_zones}"]
49 | }
50 |
51 | output "public_availability_zones" {
52 | value = ["${module.vpc.public_availability_zones}"]
53 | }
54 |
55 | output "vpc_name" {
56 | value = "${module.vpc.vpc_name}"
57 | }
58 |
59 | output "vpc_id" {
60 | value = "${module.vpc.vpc_id}"
61 | }
62 |
63 | output "s3_vpce_id" {
64 | value = "${module.vpc.s3_vpce_id}"
65 | }
66 |
67 | output "cidr_block" {
68 | value = "${module.vpc.cidr_block}"
69 | }
70 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/endpoints.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc_endpoint" "private_s3" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 | service_name = "com.amazonaws.${var.region}.s3"
4 | route_table_ids = [
5 | "${aws_route_table.private.*.id}",
6 | "${aws_route_table.public.*.id}"
7 | ]
8 | }
9 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/flowlogs.tf:
--------------------------------------------------------------------------------
1 | resource "aws_cloudwatch_log_group" "vpc_log_group" {
2 | name = "${lower(replace(var.vpc_name, " ", "-"))}-vpc-flow-logs"
3 | }
4 |
5 | data "aws_iam_policy_document" "flow_log_assume_role" {
6 | statement {
7 | effect = "Allow"
8 | principals {
9 | type = "Service"
10 | identifiers = ["vpc-flow-logs.amazonaws.com"]
11 | }
12 | actions = ["sts:AssumeRole"]
13 | }
14 | }
15 |
16 |
17 | resource "aws_iam_role" "vpc_role" {
18 | name = "${lower(replace(var.vpc_name, " ", "-"))}-vpc-flow-logs"
19 | assume_role_policy = "${data.aws_iam_policy_document.flow_log_assume_role.json}"
20 | }
21 |
22 | data "aws_iam_policy_document" "flow_log" {
23 | statement {
24 | effect = "Allow"
25 | resources = ["*"]
26 | actions = [
27 | "logs:CreateLogGroup",
28 | "logs:CreateLogStream",
29 | "logs:PutLogEvents",
30 | "logs:DescribeLogGroups",
31 | "logs:DescribeLogStreams"
32 | ]
33 | }
34 | }
35 |
36 | resource "aws_iam_role_policy" "vpc_role_policy" {
37 | name = "${lower(replace(var.vpc_name, " ", "-"))}-vpc-flow-logs"
38 | role = "${aws_iam_role.vpc_role.id}"
39 | policy = "${data.aws_iam_policy_document.flow_log.json}"
40 | }
41 |
42 | resource "aws_flow_log" "vpc_flow_log" {
43 | log_group_name = "${aws_cloudwatch_log_group.vpc_log_group.name}"
44 | iam_role_arn = "${aws_iam_role.vpc_role.arn}"
45 | vpc_id = "${aws_vpc.vpc.id}"
46 | traffic_type = "ALL"
47 | }
48 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/interface.tf:
--------------------------------------------------------------------------------
1 | variable "vpc_name" {
2 | description = "The name of the VPC"
3 | type = "string"
4 | }
5 |
6 | variable "cidr_block" {
7 | description = "The VPC-wide CIDR block"
8 | type = "string"
9 | }
10 |
11 | variable "public_subnets" {
12 | description = "List of CIDR blocks for public subnets"
13 | type = "list"
14 | }
15 |
16 | variable "private_subnets" {
17 | description = "List of CIDR blocks for private subnets"
18 | type = "list"
19 | }
20 |
21 | variable "availability_zones" {
22 | description = "List of availability zones over which to distribute subnets"
23 | type = "list"
24 | }
25 |
26 | variable "region" {
27 | description = "The AWS region where the VPC will be created"
28 | type = "string"
29 | }
30 |
31 | output "private_subnets" {
32 | value = ["${aws_subnet.private.*.id}"]
33 | }
34 |
35 | output "public_subnets" {
36 | value = ["${aws_subnet.public.*.id}"]
37 | }
38 |
39 | output "private_availability_zones" {
40 | value = ["${aws_subnet.private.*.availability_zone}"]
41 | }
42 |
43 | output "public_availability_zones" {
44 | value = ["${aws_subnet.public.*.availability_zone}"]
45 | }
46 |
47 | output "vpc_name" {
48 | value = "${var.vpc_name}"
49 | }
50 |
51 | output "vpc_id" {
52 | value = "${aws_vpc.vpc.id}"
53 | }
54 |
55 | output "s3_vpce_id" {
56 | value = "${aws_vpc_endpoint.private_s3.id}"
57 | }
58 |
59 | output "cidr_block" {
60 | value = "${aws_vpc.vpc.cidr_block}"
61 | }
62 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/nat.tf:
--------------------------------------------------------------------------------
1 | resource "aws_route" "nat_routes" {
2 | count = "${length(var.private_subnets)}"
3 | destination_cidr_block = "0.0.0.0/0"
4 |
5 | route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
6 | nat_gateway_id = "${element(aws_nat_gateway.private.*.id, count.index)}"
7 | }
8 |
9 | resource "aws_eip" "nat_eip" {
10 | count = "${length(var.private_subnets)}"
11 | vpc = true
12 | }
13 |
14 | resource "aws_nat_gateway" "private" {
15 | count = "${length(var.private_subnets)}"
16 |
17 | allocation_id = "${element(aws_eip.nat_eip.*.id, count.index)}"
18 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
19 | }
20 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/routing.tf:
--------------------------------------------------------------------------------
1 | resource "aws_internet_gateway" "vpc" {
2 | vpc_id = "${aws_vpc.vpc.id}"
3 |
4 | tags {
5 | Name = "${format("%s Gateway", var.vpc_name)}"
6 | }
7 | }
8 |
9 | resource "aws_route_table" "private" {
10 | count = "${length(var.private_subnets)}"
11 | vpc_id = "${aws_vpc.vpc.id}"
12 |
13 | tags {
14 | Name = "${format("%s Private", var.vpc_name)}"
15 | }
16 | }
17 |
18 | resource "aws_route_table_association" "private" {
19 | count = "${length(var.private_subnets)}"
20 |
21 | subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
22 | route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
23 | }
24 |
25 | resource "aws_route_table" "public" {
26 | vpc_id = "${aws_vpc.vpc.id}"
27 |
28 | route {
29 | cidr_block = "0.0.0.0/0"
30 | gateway_id = "${aws_internet_gateway.vpc.id}"
31 | }
32 |
33 | tags {
34 | Name = "${format("%s Public", var.vpc_name)}"
35 | }
36 | }
37 |
38 | resource "aws_route_table_association" "public" {
39 | count = "${length(var.public_subnets)}"
40 |
41 | subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
42 | route_table_id = "${aws_route_table.public.id}"
43 | }
44 |
--------------------------------------------------------------------------------
/aws-extra/base-vpc/terraform/modules/vpc/subnets.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc" "vpc" {
2 | cidr_block = "${var.cidr_block}"
3 |
4 | enable_dns_hostnames = true
5 | enable_dns_support = true
6 |
7 | tags {
8 | Name = "${format("%s", var.vpc_name)}"
9 | }
10 | }
11 |
12 | resource "aws_subnet" "private" {
13 | count = "${length(var.private_subnets)}"
14 |
15 | vpc_id = "${aws_vpc.vpc.id}"
16 | cidr_block = "${element(var.private_subnets, count.index)}"
17 | availability_zone = "${element(var.availability_zones, count.index)}"
18 | map_public_ip_on_launch = false
19 |
20 | tags {
21 | Name = "${format("%s Private %d", var.vpc_name, count.index + 1)}"
22 | }
23 | }
24 |
25 | resource "aws_subnet" "public" {
26 | count = "${length(var.public_subnets)}"
27 |
28 | vpc_id = "${aws_vpc.vpc.id}"
29 | cidr_block = "${element(var.public_subnets, count.index)}"
30 | availability_zone = "${element(var.availability_zones, count.index)}"
31 | map_public_ip_on_launch = true
32 |
33 | tags {
34 | Name = "${format("%s Public %d", var.vpc_name, count.index + 1)}"
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/aws-extra/minimum-viable-iam/README.md:
--------------------------------------------------------------------------------
1 | # Minimum Viable IAM Config
2 |
3 | This directory contains Terraform Config to create a user with the minimum IAM access policy required to successfully perform a Terraform Enterprise Installation.
4 |
--------------------------------------------------------------------------------
/aws-extra/minimum-viable-iam/ct.rb:
--------------------------------------------------------------------------------
1 | # This script can be used to generate a list of IAM perms from
2 | # cloudtrail logs. It was used to generate the mvi profile.
3 | require 'json'
4 |
5 | perms = []
6 |
7 | ARGV.each do |file|
8 | data = JSON.parse File.read(file)
9 |
10 | data['Records'].each do |rec|
11 | user = rec['userIdentity']
12 | next unless user['userName'] == "tfe"
13 |
14 | service = rec['eventSource'][0...-(".amazonaws.com".size)]
15 |
16 | perms << %Q!"#{service}:#{rec['eventName']}",!
17 | end
18 | end
19 |
20 | perms.sort.uniq.each do |perm|
21 | puts perm
22 | end
23 |
--------------------------------------------------------------------------------
/aws-extra/minimum-viable-iam/mvi.tf:
--------------------------------------------------------------------------------
1 | data "aws_iam_policy_document" "mvi" {
2 | statement {
3 | sid = "tfemvi"
4 |
5 | resources = ["*"]
6 |
7 | // generated by running cloudtrail logs through ct.rb
8 | actions = [
9 | "autoscaling:CreateAutoScalingGroup",
10 | "autoscaling:CreateLaunchConfiguration",
11 | "autoscaling:DescribeAutoScalingGroups",
12 | "autoscaling:DescribeLaunchConfigurations",
13 | "ec2:AuthorizeSecurityGroupEgress",
14 | "ec2:AuthorizeSecurityGroupIngress",
15 | "ec2:CreateSecurityGroup",
16 | "ec2:CreateTags",
17 | "ec2:DescribeAccountAttributes",
18 | "ec2:DescribeImages",
19 | "ec2:DescribeNetworkInterfaces",
20 | "ec2:DescribeSecurityGroups",
21 | "ec2:DescribeSubnets",
22 | "ec2:RevokeSecurityGroupEgress",
23 | "elasticache:CreateCacheCluster",
24 | "elasticache:CreateCacheParameterGroup",
25 | "elasticache:CreateCacheSubnetGroup",
26 | "elasticache:DescribeCacheClusters",
27 | "elasticache:DescribeCacheParameterGroups",
28 | "elasticache:DescribeCacheParameters",
29 | "elasticache:DescribeCacheSubnetGroups",
30 | "elasticache:ListTagsForResource",
31 | "elasticache:ModifyCacheParameterGroup",
32 | "elasticloadbalancing:AddTags",
33 | "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
34 | "elasticloadbalancing:AttachLoadBalancerToSubnets",
35 | "elasticloadbalancing:ConfigureHealthCheck",
36 | "elasticloadbalancing:CreateLoadBalancer",
37 | "elasticloadbalancing:CreateLoadBalancerListeners",
38 | "elasticloadbalancing:DescribeInstanceHealth",
39 | "elasticloadbalancing:DescribeLoadBalancerAttributes",
40 | "elasticloadbalancing:DescribeLoadBalancers",
41 | "elasticloadbalancing:DescribeTags",
42 | "elasticloadbalancing:ModifyLoadBalancerAttributes",
43 | "kms:CreateGrant",
44 | "kms:CreateKey",
45 | "kms:DescribeKey",
46 | "kms:GenerateDataKey",
47 | "kms:GetKeyPolicy",
48 | "kms:GetKeyRotationStatus",
49 | "kms:ListResourceTags",
50 | "kms:ScheduleKeyDeletion",
51 | "kms:UpdateKeyDescription",
52 | "iam:CreateInstanceProfile",
53 | "iam:CreateRole",
54 | "iam:GetInstanceProfile",
55 | "iam:GetRole",
56 | "iam:GetRolePolicy",
57 | "iam:PutRolePolicy",
58 | "iam:AddRoleToInstanceProfile",
59 | "iam:PassRole",
60 | "rds:CreateDBInstance",
61 | "rds:CreateDBSubnetGroup",
62 | "rds:DescribeDBInstances",
63 | "rds:DescribeDBSubnetGroups",
64 | "rds:ListTagsForResource",
65 | "route53:ChangeResourceRecordSets",
66 | "route53:GetHostedZone",
67 | "route53:GetChange",
68 | "route53:ListResourceRecordSets",
69 | "s3:CreateBucket",
70 | "s3:GetBucketCors",
71 | "s3:GetBucketLifecycle",
72 | "s3:GetBucketLocation",
73 | "s3:GetBucketLogging",
74 | "s3:GetBucketReplication",
75 | "s3:GetBucketRequestPayment",
76 | "s3:GetBucketTagging",
77 | "s3:GetBucketVersioning",
78 | "s3:GetBucketWebsite",
79 | "s3:GetObject",
80 | "s3:PutBucketAcl",
81 | "s3:PutBucketVersioning",
82 | "s3:PutObject",
83 | ]
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/aws-extra/minimum-viable-iam/user.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-west-2"
3 | }
4 |
5 | resource "aws_iam_user" "tfe" {
6 | name = "tfe-deploy"
7 | path = "/system/"
8 | }
9 |
10 | resource "aws_iam_access_key" "tfe" {
11 | user = "${aws_iam_user.tfe.name}"
12 | }
13 |
14 | resource "aws_iam_group" "tfe-deploy" {
15 | name = "tfe-deploy"
16 | path = "/system/"
17 | }
18 |
19 | resource "aws_iam_group_policy" "tfe-deploy" {
20 | name = "tfe-deploy"
21 | group = "${aws_iam_group.tfe-deploy.id}"
22 | policy = "${data.aws_iam_policy_document.mvi.json}"
23 | }
24 |
25 | resource "aws_iam_group_membership" "tfe-deploy" {
26 | name = "tfe-deploy"
27 | group = "${aws_iam_group.tfe-deploy.name}"
28 |
29 | users = [
30 | "${aws_iam_user.tfe.name}",
31 | ]
32 | }
33 |
--------------------------------------------------------------------------------
/aws-standard/README.md:
--------------------------------------------------------------------------------
1 | # Private Terraform Enterprise
2 |
3 | ## Delivery
4 |
5 | The goal of this installation procedure is to set up a Terraform Enterprise
6 | cluster that is available on a DNS name that is accessed via HTTPS. This
7 | standard configuration package uses Terraform to create both the compute and data layer resources, and optionally uses Route53 to configure the DNS automatically.
8 |
9 | ## Preflight
10 |
11 | ### Dependencies
12 |
13 | Before setup begins, a few resources need to be provisioned. We consider these out of scope for the cluster provisioning because they depend on the user's environment.
14 |
15 | The following are required to complete installation:
16 |
17 | * **AWS IAM credentials** capable of creating new IAM roles configuring various services. We suggest you use an admin role for this. The credentials are only used for setup, during runtime only an assumed role is used.
18 | * **AWS VPC** containing at least 2 subnets. These will be used to launch the cluster into. Subnets do not need to be public, but they do need an internet gateway at present.
19 | * **SSH Key Pair** configured with AWS EC2. This will be used to configure support access to the cluster. This SSH key can be optionally removed from the instance once installation is complete.
20 | * To create a new one: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
21 | * A **Publicly Trusted TLS certificate** registered with AWS Certificate Manager. This can be one created by ACM for a hostname or the certificate can be imported into it.
22 | * To create a new ACM-managed cert: https://console.aws.amazon.com/acm/home#/wizard/
23 | * To import an existing cert: https://console.aws.amazon.com/acm/home#/importwizard/
24 | * *NOTE:* Certificates are per region, so be sure to create them in the same region as you'll be deploying Terraform Enterprise
25 | * *NOTE:* The certificate must allow the fully qualified hostname that Terraform Enterprise will be using. This means you need to decide on the value of hostname when creating the certificates and use the same value in the configuration.
26 |
27 | The following details will be requested during the application bootstrapping process. It's helpful to have them prepared beforehand.
28 |
29 | * **SMTP Credentials**: Terraform Enterprise requires SMTP information to send email. SMTP configuration can be skipped if necessary during the installation, but HashiCorp recommends configuring it during the initial bootstrapping process.
30 | * Hostname and port of SMTP service
31 | * Type of authentication (plain or login)
32 | * Username and password
33 | * Email address to use as the sender for emails originating from TFE
34 | * (Optional) **Twilio Credentials**: Terraform Enterprise can use Twilio for SMS-based 2-factor authentication. If Twilio is not configurated, virtual MFA device based 2FA (e.g. Google Authenticator) will still be available.
35 |
36 | ### Required Variables
37 |
38 | The following variables are required inputs and must be populated prior to beginning installation.
39 |
40 | **NOTE**: use only alphanumeric characters (upper- and lower-case), as well as dashes, underscores, colons, and forward-slashes (`-`, `_`, `:`, `/`). Other characters may cause the TFE instance to be unable to boot.
41 |
42 | * `region`: The AWS region to deploy into.
43 | * `ami_id`: The ID of a Terraform Enterprise Base AMI. See [`ami-ids`](../docs/ami-ids.md) to look one up.
44 | * `fqdn`: The hostname that the cluster will be accessible at. This value needs to match the DNS setup for proper operations. Example: `tfe-eng01.mycompany.io`
45 | * `cert_id`: An AWS certificate ARN. This is the certification that will be used by the ELB for the cluster. Example: `arn:aws:acm:us-west-2:241656615859:certificate/f32fa674-de62-4681-8035-21a4c81474c6`
46 | * `instance_subnet_id`: Subnet ID of the subnet that the cluster's instance will be placed into. If this is a public subnet, the instance will be assigned a public IP. This is not required as the primary cluster interface is an ELB registered with the hostname. Example: `subnet-0de26b6a`
47 | * `elb_subnet_id`: Subnet ID of the subnet that the cluster's load balancer will be placed into. If this is a public subnet, the load balancer will be accessible from the public internet. This is not required — the ELB can be marked as private via the `internal_elb` option below.
48 | * `data_subnet_ids`: Subnet IDs that will be used to create the data services (RDS and ElastiCache) used by the cluster. There must be 2 subnet IDs given for proper redundency. Example: `["subnet-0ce26b6b", "subnet-d0f35099"]`
49 | * `db_password`: Password that will be used to access RDS. Example: `databaseshavesecrets`
50 | * `bucket_name`: Name of the S3 bucket to store artifacts used by the cluster into. This bucket is automatically created. We suggest you name it `tfe--data`, as convention.
51 |
52 | ### Optional Variables
53 |
54 | These variables can be populated, but they have defaults that can also be used.
55 |
56 | **NOTE**: use only alphanumeric characters (upper- and lower-case), as well as dashes, underscores, colons, and forward-slashes (`-`, `_`, `:`, `/`). Other characters may cause the TFE instance to be unable to boot.
57 |
58 | * `key_name`: Name of AWS SSH Key Pair that will be used (as shown in the AWS console). The pair needs to already exist, it will not be created. **If this variable is not set, no SSH access will be available to the Terraform Enterprise instance.**
59 | * `manage_bucket` Indicate if this Terraform state should create and own the bucket. Set this to false if you are reusing an existing bucket.
60 | * `kms_key_id` Specify the ARN for a KMS key to use rather than having one
61 | created automatically.
62 | * `db_username` Username that will be used to access RDS. Default: `atlas`
63 | * `db_size_gb` Disk size of the RDS instance to create. Default: `80`
64 | * `db_instance_class` Instance type of the RDS instance to create. Default: `db.m4.large`
65 | * `db_multi_az` Configure if the RDS cluster should use multiple AZs to improve snapshot performance. Default: `true`
66 | * `db_snapshot_identifier` Previously made snapshot to restore when RDS is created. This is for migration of data between clusters. Default is to create the database fresh.
67 | * `db_name` This only needs to be set if you're migrating from an RDS instance with a different database name.
68 | * `zone_id` The ID of a Route53 zone that a record for the cluster will be installed into. Leave this blank if you need to manage DNS elsewhere. Example: `ZVEF52R7NLTW6`
69 | * `hostname` If specifying `zone_id`, this should be set to the name that is used for the record to be registered with the zone. This value combined with the zone information will form the full DNS name for Terraform Enterprise. Example: `emp-test`
70 | * `arn_partition` Used mostly for govcloud installations. Example: `aws-us-gov`
71 | * `internal_elb` Indicate to AWS that the created ELB is internal only. Example: `true`
72 | * `startup_script` Shell code that should run on the first boot. This is explained in more detail below.
73 | * `external_security_group_ids` The IDs of existing EC2 Security Groups to assign to the ELB for "external" access to the system. By default, a Security Group will be created that allows ingress to ports 80 and 443 from `0.0.0.0/0`.
74 | * `internal_security_group_ids` The IDs of existing EC2 Security Groups to assign to the instance for "internal" access to the system. By default, a Security group will be created that allows ingress to ports 22 and 8080 from `0.0.0.0/0`.
75 | * `proxy_url` A url (http or https, with port) to proxy all external http/https request from the cluster to. This is explained in more detail below.
76 | * `no_proxy` Hosts to exclude from proxying, in addition to the default set. (Only applies when `proxy_url` is set.)
77 | * `local_redis` If true, use a local Redis server on the cluster instance, eliminating the need for ElasticCache. Default: `false`
78 | * `local_setup` If true, write the setup data to a local file called `tfe-setup-data` instead of into S3. The instance will prompt for this setup data on its first boot, after which point it will be stored in Vault. (Requires a release `v201709-1` or later to be set to true.) Default: `false`
79 | * `ebs_size` The size (in GB) to configure the EBS volumes used to store redis data. Default: `100`
80 | * `ebs_redundancy` The number of EBS volumes to mirror together for redundancy in storing redis data. Default: `2`
81 | * `archivist_sse` Setting for server-side encryption of objects in S3; if provided, _must_ be set to `aws:kms`. Default: ``
82 | * `archivist_kms_key_id` KMS key ID (full ARN) for server-side encryption of objects stored in S3.
83 |
84 | #### Startup Script (Optional)
85 |
86 | The `startup_script` variable can contain any shell code and will be executed on
87 | the first boot. This mechanism can be used to customize the AMI, adding
88 | additional software or configuration.
89 |
90 | For example, to install a custom SSL certificate for the services to trust:
91 |
92 | ```
93 | curl -o /usr/local/share/ca-certificates/cert.crt https://my.server.net/custom.pem
94 | update-ca-certificates
95 | ```
96 |
97 | Be sure that files in `/usr/local/share/ca-certificates` end in `.crt` and that `update-ca-certificates` is run after they're placed.
98 |
99 | Or to install additional Ubuntu packages:
100 |
101 | ```
102 | apt-get install -y emacs
103 | ```
104 |
105 | Because the content is likely to be multiple lines, we suggest you use the
106 | heredoc style syntax to define the variable. For example, in your
107 | `terraform.tfvars` file, you'd have:
108 |
109 | ```
110 | startup_script = < **Note:** This is only for outbound HTTP and HTTPS requests. Other traffic
129 | such as SMTP and NTP are not proxied and will attempt to connect directly.
130 |
131 | ### Populating Variables
132 |
133 | The values for these variables should be placed into terraform.tfvars. Simply copy terraform.tfvars.example to terraform.tfvars and edit it with the proper values.
134 |
135 | ## Planning
136 |
137 | Terraform Enterprise uses terraform itself for deployment. Once you have filled in the `terraform.tfvars` file, simply run: `terraform plan`. This will output the manifest of all the resources that will be created.
138 |
139 | ## Deployment
140 |
141 | Once you're ready to deploy Terraform Enterprise, run `terraform apply`. This will take approximately 10 minutes (mostly due to RDS creation time).
142 |
143 | ## Upgrade
144 |
145 | To upgrade your instance of Terraform Enterprise, simply update the repository containing the terraform configuration, plan, and apply.
146 |
147 | ## Outputs
148 |
149 | * `dns_name` - The DNS name of the load balancer for TFE. If you are managing
150 | DNS separately, you'll need to make a CNAME record from your indicated
151 | hostname to this value.
152 | * `zone_id` - The Route53 Zone ID of the load balancer for TFE. If you are
153 | managing DNS separately but still using Route53, this value may be useful.
154 | * `url` - The URL where TFE will become available when it boots.
155 |
156 | ## Configuration
157 |
158 | After completing a new install you should head to the
159 | [configuration page](../docs/configuring.md) to create users and teams.
160 |
--------------------------------------------------------------------------------
/aws-standard/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.9.3"
3 | }
4 |
5 | variable "fqdn" {
6 | description = "The fully qualified domain name the cluster is accessible as"
7 | }
8 |
9 | variable "hostname" {
10 | description = "The name the cluster will be register as under the zone (optional if separately managing DNS)"
11 | default = ""
12 | }
13 |
14 | variable "zone_id" {
15 | description = "The route53 zone id to register the hostname in (optional if separately managing DNS)"
16 | default = ""
17 | }
18 |
19 | variable "cert_id" {
20 | description = "CMS certificate ID to use for TLS attached to the ELB"
21 | }
22 |
23 | variable "instance_subnet_id" {
24 | description = "Subnet to place the instance into"
25 | }
26 |
27 | variable "elb_subnet_id" {
28 | description = "Subnet that will hold the ELB"
29 | }
30 |
31 | variable "data_subnet_ids" {
32 | description = "Subnets to place the data services (RDS) into (2 required for availability)"
33 | type = "list"
34 | }
35 |
36 | variable "db_password" {
37 | description = "RDS password to use"
38 | }
39 |
40 | variable "bucket_name" {
41 | description = "S3 bucket to store artifacts into"
42 | }
43 |
44 | variable "manage_bucket" {
45 | description = "Indicate if the S3 bucket should be created/owned by this terraform state"
46 | default = true
47 | }
48 |
49 | variable "key_name" {
50 | description = "Keypair name to use when started the instances, leave blank for no SSH access"
51 | default = ""
52 | }
53 |
54 | variable "db_username" {
55 | description = "RDS username to use"
56 | default = "atlas"
57 | }
58 |
59 | variable "local_db" {
60 | description = "Use the database on the instance (alpha feature)"
61 | default = false
62 | }
63 |
64 | variable "local_redis" {
65 | description = "Use redis on the instance"
66 | default = true
67 | }
68 |
69 | variable "region" {
70 | description = "AWS region to place cluster into"
71 | }
72 |
73 | variable "ami_id" {
74 | description = "The AMI of a Terraform Enterprise Base image"
75 | }
76 |
77 | variable "ebs_size" {
78 | default = 100
79 | description = "Size of the EBS volume"
80 | }
81 |
82 | variable "ebs_redundancy" {
83 | description = "Number of redundent EBS volumes to configure"
84 | default = 2
85 | }
86 |
87 | variable "local_setup" {
88 | description = "Write the setup configuration data local, not in S3"
89 | default = false
90 | }
91 |
92 | variable "instance_type" {
93 | description = "AWS instance type to use"
94 | default = "m4.2xlarge"
95 | }
96 |
97 | data "aws_subnet" "instance" {
98 | id = "${var.instance_subnet_id}"
99 | }
100 |
101 | data "aws_vpc" "vpc" {
102 | id = "${data.aws_subnet.instance.vpc_id}"
103 | }
104 |
105 | variable "db_size_gb" {
106 | description = "Disk size of the RDS instance to create"
107 | default = "80"
108 | }
109 |
110 | variable "db_instance_class" {
111 | default = "db.m4.large"
112 | }
113 |
114 | variable "db_name" {
115 | description = "Name of the Postgres database. Set this blank on the first run if you are restoring using a snapshot_identifier. Subsequent runs should let it take its default value."
116 | default = "atlas_production"
117 | }
118 |
119 | // Multi AZ allows database snapshots to be taken without incurring an I/O
120 | // penalty on the primary node. This should be `true` for production workloads.
121 | variable "db_multi_az" {
122 | description = "Multi-AZ sets up a second database instance for perforance and availability"
123 | default = true
124 | }
125 |
126 | variable "db_snapshot_identifier" {
127 | description = "Snapshot of database to use upon creation of RDS"
128 | default = ""
129 | }
130 |
131 | variable "bucket_force_destroy" {
132 | description = "Control if terraform should destroy the S3 bucket even if there are contents. This wil destroy any backups."
133 | default = false
134 | }
135 |
136 | variable "kms_key_id" {
137 | description = "A KMS Key to use rather than having a new one created"
138 | default = ""
139 | }
140 |
141 | variable "archivist_sse" {
142 | type = "string"
143 | description = "Setting for server-side encryption of objects in S3; if provided, must be set to 'aws:kms'"
144 | default = ""
145 | }
146 |
147 | variable "archivist_kms_key_id" {
148 | type = "string"
149 | description = "An optional KMS key for use by Archivist to enable S3 server-side encryption"
150 | default = ""
151 | }
152 |
153 | variable "arn_partition" {
154 | description = "AWS partition to use (used mostly by govcloud)"
155 | default = "aws"
156 | }
157 |
158 | variable "internal_elb" {
159 | description = "Indicates that this installation is to be accessed only by a private subnet"
160 | default = false
161 | }
162 |
163 | variable "startup_script" {
164 | description = "Shell or other cloud-init compatible code to run on startup"
165 | default = ""
166 | }
167 |
168 | variable "external_security_group_ids" {
169 | description = "The IDs of existing security groups to use for the ELB instead of creating one."
170 | type = "list"
171 | default = []
172 | }
173 |
174 | variable "internal_security_group_ids" {
175 | description = "The IDs of existing security groups to use for the instance instead of creating one."
176 | type = "list"
177 | default = []
178 | }
179 |
180 | variable "proxy_url" {
181 | description = "A url (http or https, with port) to proxy all external http/https request from the cluster to."
182 | default = ""
183 | }
184 |
185 | variable "no_proxy" {
186 | type = "string"
187 | description = "hosts to exclude from proxying (only applies when proxy_url is set)"
188 | default = ""
189 | }
190 |
191 | # A random identifier to use as a suffix on resource names to prevent
192 | # collisions when multiple instances of TFE are installed in a single AWS
193 | # account.
194 | resource "random_id" "installation-id" {
195 | byte_length = 6
196 | }
197 |
198 | provider "aws" {
199 | region = "${var.region}"
200 | }
201 |
202 | data "aws_caller_identity" "current" {}
203 |
204 | output "account_id" {
205 | value = "${data.aws_caller_identity.current.account_id}"
206 | }
207 |
208 | resource "aws_kms_key" "key" {
209 | count = "${var.kms_key_id != "" ? 0 : 1}"
210 | description = "TFE resource encryption key"
211 |
212 | tags {
213 | Name = "terraform_enterprise-${random_id.installation-id.hex}"
214 | }
215 |
216 | # This references the role created by the instance module as a name
217 | # rather than a resource attribute because it causes too much churn.
218 | # So if the name is changed in the instance module, you need to change
219 | # the name here too.
220 | policy = <<-JSON
221 | {
222 | "Version": "2012-10-17",
223 | "Statement": [
224 | {
225 | "Sid": "Allow KMS for TFE creator",
226 | "Effect": "Allow",
227 | "Principal": {
228 | "AWS": [
229 | "${data.aws_caller_identity.current.arn}",
230 | "arn:${var.arn_partition}:iam::${data.aws_caller_identity.current.account_id}:root",
231 | "arn:${var.arn_partition}:iam::${data.aws_caller_identity.current.account_id}:role/tfe_iam_role-${random_id.installation-id.hex}"
232 | ]
233 | },
234 | "Action": "kms:*",
235 | "Resource": "*"
236 | }
237 | ]
238 | }
239 | JSON
240 | }
241 |
242 | resource "aws_kms_alias" "key" {
243 | name = "alias/terraform_enterprise-${random_id.installation-id.hex}"
244 | target_key_id = "${coalesce(var.kms_key_id, join("", aws_kms_key.key.*.key_id))}"
245 | }
246 |
247 | module "route53" {
248 | source = "../modules/tfe-route53"
249 | hostname = "${var.hostname}"
250 | zone_id = "${var.zone_id}"
251 | alias_dns_name = "${module.instance.dns_name}"
252 | alias_zone_id = "${module.instance.zone_id}"
253 | }
254 |
255 | module "instance" {
256 | source = "../modules/tfe-instance"
257 | installation_id = "${random_id.installation-id.hex}"
258 | ami_id = "${var.ami_id}"
259 | instance_type = "${var.instance_type}"
260 | hostname = "${var.fqdn}"
261 | vpc_id = "${data.aws_subnet.instance.vpc_id}"
262 | cert_id = "${var.cert_id}"
263 | instance_subnet_id = "${var.instance_subnet_id}"
264 | elb_subnet_id = "${var.elb_subnet_id}"
265 | key_name = "${var.key_name}"
266 | db_username = "${var.local_db ? "atlasuser" : var.db_username}"
267 | db_password = "${var.local_db ? "databasepassword" : var.db_password}"
268 | db_endpoint = "${var.local_db ? "127.0.0.1:5432" : module.db.endpoint}"
269 | db_database = "${var.local_db ? "atlas_production" : module.db.database}"
270 | redis_host = "${var.local_redis ? "127.0.0.1" : module.redis.host}"
271 | redis_port = "${var.local_redis ? "6379" : module.redis.port}"
272 | bucket_name = "${var.bucket_name}"
273 | bucket_region = "${var.region}"
274 | kms_key_id = "${coalesce(var.kms_key_id, join("", aws_kms_key.key.*.arn))}"
275 | archivist_sse = "${var.archivist_sse}"
276 | archivist_kms_key_id = "${var.archivist_kms_key_id}"
277 | bucket_force_destroy = "${var.bucket_force_destroy}"
278 | manage_bucket = "${var.manage_bucket}"
279 | arn_partition = "${var.arn_partition}"
280 | internal_elb = "${var.internal_elb}"
281 | ebs_redundancy = "${(var.local_redis || var.local_db) ? var.ebs_redundancy : 0}"
282 | startup_script = "${var.startup_script}"
283 | external_security_group_ids = "${var.external_security_group_ids}"
284 | internal_security_group_ids = "${var.internal_security_group_ids}"
285 | proxy_url = "${var.proxy_url}"
286 | no_proxy = "${var.no_proxy}"
287 | local_setup = "${var.local_setup}"
288 | }
289 |
290 | module "db" {
291 | source = "../modules/rds"
292 | disable = "${var.local_db}"
293 | instance_class = "${var.db_instance_class}"
294 | multi_az = "${var.db_multi_az}"
295 | name = "tfe-${random_id.installation-id.hex}"
296 | username = "${var.db_username}"
297 | password = "${var.db_password}"
298 | storage_gbs = "${var.db_size_gb}"
299 | subnet_ids = "${var.data_subnet_ids}"
300 | engine_version = "9.4"
301 | vpc_cidr = "${data.aws_vpc.vpc.cidr_block}"
302 | vpc_id = "${data.aws_subnet.instance.vpc_id}"
303 | backup_retention_period = "31"
304 | storage_type = "gp2"
305 | kms_key_id = "${coalesce(var.kms_key_id, join("", aws_kms_key.key.*.arn))}"
306 | snapshot_identifier = "${var.db_snapshot_identifier}"
307 | db_name = "${var.db_name}"
308 | }
309 |
310 | module "redis" {
311 | source = "../modules/redis"
312 | disable = "${var.local_redis}"
313 | name = "tfe-${random_id.installation-id.hex}"
314 | subnet_ids = "${var.data_subnet_ids}"
315 | vpc_cidr = "${data.aws_vpc.vpc.cidr_block}"
316 | vpc_id = "${data.aws_subnet.instance.vpc_id}"
317 | instance_type = "cache.m3.medium"
318 | }
319 |
320 | output "kms_key_id" {
321 | value = "${coalesce(var.kms_key_id, join("", aws_kms_key.key.*.arn))}"
322 | }
323 |
324 | output "url" {
325 | value = "https://${var.fqdn}"
326 | }
327 |
328 | output "dns_name" {
329 | value = "${module.instance.dns_name}"
330 | }
331 |
332 | output "zone_id" {
333 | value = "${module.instance.zone_id}"
334 | }
335 |
336 | output "iam_role" {
337 | value = "${module.instance.iam_role}"
338 | }
339 |
--------------------------------------------------------------------------------
/aws-standard/terraform.tfvars.example:
--------------------------------------------------------------------------------
1 | # Please refer to the README.md for additional documentation on each variable and additional optional variables that may be set.
2 |
3 | # Required Variables
4 | #
5 | # The AWS region to deploy into.
6 | # Example: us-east-1
7 | # region =
8 | #
9 | # ami_id =
10 | #
11 | # The name that cluster will be known as. This value needs to match the DNS setup for proper operations.
12 | # Example: tfe-eng01.mycompany.io
13 | # fqdn =
14 | #
15 | # An AWS certificate ARN. This is the certification that will be used by the ELB for the cluster.
16 | # Example: arn:aws:acm:us-west-2:241656615859:certificate/f32fa674-de62-4681-8035-21a4c81474c6
17 | # cert_id =
18 | #
19 | # Subnet id of the subnet that the cluster's instance will be placed into.
20 | # If this is a public subnet, the instance will be assigned a public IP.
21 | # This is not required as the primary cluster interface is an ELB registered with the hostname.
22 | # Example: subnet-0de26b6a
23 | # instance_subnet_id =
24 | #
25 | #
26 | # Subnet id of the subnet that the cluster's load balancer will be placed into.
27 | # If this is a public subnet, the load balancer will be accessible from the
28 | # public internet. This is not required - the ELB can be marked as private via
29 | # the `internal_elb` option below.
30 | # Example: subnet-0de26b6a
31 | # elb_subnet_id =
32 | #
33 | # Subnet ids that will be used to create the data services (RDS and ElastiCache) used by the cluster.
34 | # There must be 2 subnet ids given for proper redundency.
35 | # Example: ["subnet-0ce26b6b", "subnet-d0f35099"]
36 | # data_subnet_ids =
37 | #
38 | # Password that will be used to access RDS. Example: databaseshavesecrets
39 | # db_password =
40 | #
41 | # Name of the S3 bucket to store artifacts used by the cluster into. This bucket is automatically created.
42 | # Example: tfe-mycompany-data
43 | # bucket_name =
44 |
45 | # Optional variables
46 | # Name of AWS ssh key pair that will be used. The pair must already exist, it
47 | # will not be created. If this variable is not set, no SSH access will be
48 | # available to the TFE instance.
49 | # key_name =
50 | #
51 | # This Terraform config will create and manage the bucket named in `bucket_name`.
52 | # Set this to false if you are reusing an existing bucket.
53 | # Default: true
54 | # manage_bucket =
55 | #
56 | # Specifies the ARN for a KMS key to use rather than having one created automatically.
57 | # Example: arn:aws:kms:us-west-2:98765432123:key/9c54321-fff3-4333-55d5-7f8a9aefb34c
58 | # kms_key_id =
59 | #
60 | # Username that will be used to access RDS.
61 | # Default: atlas
62 | # db_username =
63 | #
64 | # Disk size of the RDS instance to create.
65 | # Default: 80
66 | # db_size_gb =
67 | #
68 | # Instance type of the RDS instance to create.
69 | # Default: db.m4.large
70 | # db_instance_class =
71 | #
72 | # Configure if the RDS cluster should multiple AZs to improve snapshot performance.
73 | # Default: true
74 | # db_multi_az =
75 | #
76 | # Previously made snapshot to restore when RDS is created.
77 | # This is for migration of data between clusters.
78 | # Default: "" (Create...)
79 | # db_snapshot_identifier =
80 | #
81 | # The id of a Route53 zone that a record for the cluster will be installed into.
82 | # Leave this blank if your DNS is managed elsewhere.
83 | # Example: ZVEF52R7NLTW6
84 | # Default: "" (DNS record will not be managed by this Terraform config)
85 | # zone_id =
86 | #
87 | # If specifying zone_id, this should be set to the name that is used for the record to be registered with the zone.
88 | # This value combined with the zone information will form the full DNS name for TFE.
89 | # Example: emp-test
90 | # Default: "" (DNS record will not be managed by this Terraform config)
91 | # hostname =
92 | #
93 | # Used mostly for govcloud installations.
94 | # Example: aws-us-gov
95 | # Default: "aws" (This is the partition name for all standard AWS Regions)
96 | # arn_partition =
97 | #
98 | # Indicate to AWS that the created ELB is internal only.
99 | # Default: false (Marks the ELB public, assuming it is launched into a public subnet.)
100 | # internal_elb =
101 | #
102 | # Shell code that should run on the first boot.
103 | # Default: ""
104 | # startup_script =
105 | #
106 | # No ElastiCache cluster will be created if this is set to true.
107 | # We recommend setting this to true for new installations, as it will be the default in a future release.
108 | # Default: false
109 | # local_redis =
110 | #
111 | # A url (http or https, with port) to proxy all external http/https request from the cluster to.
112 | # Default: ""
113 | # proxy_url =
114 | #
115 | # Hosts to exclude from proxying, in addition to the default set. (Only applies when proxy_url is set.)
116 | # Default: ""
117 | # no_proxy =
118 | #
119 | # Setting for server-side encryption of objects in S3; if provided, must be set to "aws:kms".
120 | # Default: ""
121 | # archivist_sse =
122 | #
123 | # KMS key ID (full ARN) for server-side encryption of objects stored in S3.
124 | # Default: ""
125 | # archivist_kms_key_id =
126 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # TFE Release Documentation
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | This directory contains supporting documentation for Terraform Enterprise (TFE)
14 | releases.
15 |
16 | * [`about-the-ami`](about-the-ami.md) - Details about the TFE Base AMI
17 | * [`ami-ids`](ami-ids.md) - A list of the TFE release AMI IDs for each region
18 | * [`aws-extra`](../aws-extra/README.md) - Some additional configuration information meant to support deployment of Private TFE in AWS.
19 | * [`aws-standard`](../aws-standard/README.md) - Instructions for deploying Private TFE in AWS
20 | * [`legacy`](legacy.md) - Information about upgrading from a Legacy TFE architecture
21 | * [`logs`](logs.md) - Information about working with TFE logs
22 | * [`managing-tool-versions`](managing-tool-versions.md) - Details about managing the versions and locations of Packer and Terraform used by TFE
23 | * [`migrating-from-tfe-saas`](migrating-from-tfe-saas.md) - Instructions on how to move config from TFE SaaS to a Private TFE installation
24 | * [`network-access`](network-access.md) - Information about the network access required by TFE
25 | * [`storing-tfe-state`](storing-tfe-state.md) - Recommendations on how to manage the Terraform State of the TFE install process
26 | * [`support`](support.md) - Getting help when things are not working as expected
27 | * [`tfe-architecture`](tfe-architecture.md) - Information on TFE's architecture
28 | * [`advanced-terraform`](advanced-terraform.md) - Information on more advanced ways to use the terraform modules
29 | * [`vault-rekey`](vault-rekey.md) - Information on rekeying the Vault instance used by TFE
30 |
--------------------------------------------------------------------------------
/docs/about-the-ami.md:
--------------------------------------------------------------------------------
1 | # About the TFE AMI
2 |
3 | This document contains information about the Terraform Enterprise AMI.
4 |
5 | -----
6 |
7 | ## Deprecation warning:
8 |
9 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
10 |
11 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
12 |
13 | -----
14 |
15 | ## Operating System
16 |
17 | The TFE AMI is based on the latest release of Ubuntu 16.04 with all security
18 | patches applied.
19 |
20 | ## Network Ports
21 |
22 | The TFE AMI requires that port :8080 be accessible. This is where all traffic
23 | from the ELB is routed. Many other internal TFE services listen on the host,
24 | but they do not require external traffic. The AWS security group for the
25 | instance as well as software firewall rules within the runtime enforce this.
26 |
27 | ## `ulimits`
28 |
29 | The necessary limits on open file descriptors are raised within
30 | `/etc/security/limits.d/nofile.conf` on the machine image.
31 |
32 | ## Critical Services
33 |
34 | The TFE AMI contains dozens of services that are required for proper operation
35 | of Terraform Enterprise. These services are all configured to launch on boot.
36 | Application-level services are managed via Nomad and system-level automation is
37 | managed via `systemd`.
38 |
39 | More information on the various application-level services can be found in
40 | [`tfe-architecture`](tfe-architecture.md)
41 |
--------------------------------------------------------------------------------
/docs/advanced-terraform.md:
--------------------------------------------------------------------------------
1 | # Advanced Terraform
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | The `aws-standard` terraform module can be as a true terraform module
14 | to enable some additional features for configure the cluster.
15 |
16 |
17 | ### Additional IAM Role policies
18 |
19 | The module outputs the role name used by the instance, allowing you
20 | to attach additional policies to configure access:
21 |
22 | ```hcl
23 | provider "aws" {
24 | region = "us-west-2"
25 | }
26 |
27 | module "standard" {
28 | source = "../../terraform/aws-standard"
29 | # Variables that would be in terraform.tfvars go here
30 | }
31 |
32 | data "aws_iam_policy_document" "extra-s3-perms" {
33 | statement {
34 | sid = "AllowS3Access"
35 | effect = "Allow"
36 |
37 | resources = [
38 | "arn:aws:s3:::my-private-artifacts/*",
39 | "arn:aws:s3:::my-private-artifacts",
40 | ]
41 |
42 | actions = [
43 | "s3:*",
44 | ]
45 | }
46 | }
47 |
48 | resource "aws_iam_role_policy" "extra-s3-perms" {
49 | role = "${module.standard.iam_role}"
50 | policy = "${data.aws_iam_policy_document.extra-s3-perms.json}"
51 | }
52 |
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/ami-ids.md:
--------------------------------------------------------------------------------
1 | # Terraform Enterprise AMI IDs
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Below are the AMI IDs for each Terraform Enterprise release.
14 |
15 | Once your AWS Account is granted launch permissions, these AMI IDs should be
16 | used as inputs to the Terraform configuration in this repository for launching
17 | Terraform Enterprise.
18 |
19 | See also the [CHANGELOG](../CHANGELOG.md).
20 |
21 | If you wish to use an AMI with encrypted EBS snapshots, you'll need to make
22 | a private copy. We have documented that procedure in [Encrypted AMI](encrypt-ami.md).
23 |
24 | ### NOTE
25 |
26 | The `v201707-1` and `v201707-2` releases have issues with being used to upgrade
27 | from previous releases. Best to skip them entirely.
28 |
29 | | TFE Release | Region | AMI ID |
30 | | ------------------------------------------ | ------------- | -------------- |
31 | | **[v201703-1](../CHANGELOG.md#v201703-1)** | us-west-2 | `ami-4844d128` |
32 | | | us-east-1 | `ami-0273cd14` |
33 | | **[v201703-2](../CHANGELOG.md#v201703-2)** | us-west-2 | `ami-a664f0c6` |
34 | | | us-east-1 | `ami-a5dc66b3` |
35 | | **[v201704-1](../CHANGELOG.md#v201704-1)** | us-west-2 | `ami-f81e8898` |
36 | | | us-east-1 | `ami-20ab2836` |
37 | | **[v201704-2](../CHANGELOG.md#v201704-2)** | us-west-2 | `ami-3be5785b` |
38 | | | us-east-1 | `ami-a0b222b6` |
39 | | **[v201704-3](../CHANGELOG.md#v201704-3)** | us-west-2 | `ami-8471ede4` |
40 | | | us-east-1 | `ami-33841b25` |
41 | | **[v201705-1](../CHANGELOG.md#v201705-1)** | us-west-2 | `ami-966105f6` |
42 | | | us-east-1 | `ami-38d2a22e` |
43 | | | us-gov-west-1 | `ami-c120a4a0` |
44 | | **[v201705-2](../CHANGELOG.md#v201705-2)** | us-west-2 | `ami-4d70102d` |
45 | | | us-east-1 | `ami-518ecf47` |
46 | | | us-gov-west-1 | `ami-48fb7c29` |
47 | | **[v201706-1](../CHANGELOG.md#v201706-1)** | us-west-2 | `ami-f00d6190` |
48 | | | us-east-1 | `ami-46530450` |
49 | | | us-gov-west-1 | `ami-5151d630` |
50 | | | eu-west-1 | `ami-14c3d372` |
51 | | **[v201706-2](../CHANGELOG.md#v201706-2)** | us-west-2 | `ami-c0353ab9` |
52 | | | us-east-1 | `ami-cef6a6d8` |
53 | | | us-gov-west-1 | `ami-176ceb76` |
54 | | | eu-west-1 | `ami-c4e0f3a2` |
55 | | **[v201706-3](../CHANGELOG.md#v201706-3)** | us-west-2 | `ami-4a010f33` |
56 | | | us-east-1 | `ami-27623d31` |
57 | | | us-gov-west-1 | `ami-13028572` |
58 | | | eu-west-1 | `ami-0869746e` |
59 | | **[v201706-4](../CHANGELOG.md#v201706-4)** | us-west-2 | `ami-0c5c4a75` |
60 | | | us-east-1 | `ami-dcbb8aca` |
61 | | | us-gov-west-1 | `ami-d0ab2db1` |
62 | | | eu-west-1 | `ami-b72ecbce` |
63 | | ~~**[v201707-1](../CHANGELOG.md#v201707-1)**~~ | us-west-2 | `ami-e8d7c991` |
64 | | | us-east-1 | `ami-91dc83ea` |
65 | | | us-gov-west-1 | `ami-37870656` |
66 | | | eu-west-1 | `ami-90d833e9` |
67 | | ~~**[v201707-2](../CHANGELOG.md#v201707-2)**~~ | us-west-2 | `ami-830218fa` |
68 | | | us-east-1 | `ami-dbda85a0` |
69 | | | us-gov-west-1 | `ami-e764e586` |
70 | | | eu-west-1 | `ami-53d3382a` |
71 | | **[v201708-1](../CHANGELOG.md#v201708-1)** | us-west-2 | `ami-0585657d` |
72 | | | us-east-1 | `ami-42311839` |
73 | | | us-gov-west-1 | `ami-04e16165` |
74 | | | eu-west-1 | `ami-d7f607ae` |
75 | | **[v201708-2](../CHANGELOG.md#v201708-2)** | us-west-2 | `ami-3cb95444` |
76 | | | us-east-1 | `ami-6d4f7c16` |
77 | | | us-gov-west-1 | `ami-ff9e1e9e` |
78 | | | eu-west-1 | `ami-8133c1f8` |
79 | | **[v201709-1](../CHANGELOG.md#v201709-1)** | us-west-2 | `ami-4b15e533` |
80 | | | us-east-1 | `ami-c7ff1bbd` |
81 | | | us-gov-west-1 | `ami-6950d308` |
82 | | | eu-west-1 | `ami-039f5c7a` |
83 | | **[v201709-2](../CHANGELOG.md#v201709-2)** | us-west-2 | `ami-7abe4602` |
84 | | | us-east-1 | `ami-69738113` |
85 | | | us-gov-west-1 | `ami-5efd7f3f` |
86 | | | eu-west-1 | `ami-425a913b` |
87 | | **[v201709-3](../CHANGELOG.md#v201709-3)** | us-west-2 | `ami-d63bc3ae` |
88 | | | us-east-1 | `ami-dece37a4` |
89 | | | us-gov-west-1 | `ami-ad9d1fcc` |
90 | | | eu-west-1 | `ami-ba2ffac3` |
91 | | **[v201711-1](../CHANGELOG.md#v201711-1)** | us-west-2 | `ami-36b8734e` |
92 | | | us-east-1 | `ami-4cd37b36` |
93 | | | us-gov-west-1 | `ami-0e018c6f` |
94 | | | eu-west-1 | `ami-1164c568` |
95 | | **[v201712-1](../CHANGELOG.md#v201712-1)** | us-west-1 | `ami-540a0f34` |
96 | | | us-west-2 | `ami-a53e9bdd` |
97 | | | us-gov-west-1 | `ami-6dbd320c` |
98 | | | us-east-1 | `ami-8e8fedf4` |
99 | | | eu-west-1 | `ami-a27dc5db` |
100 | | | eu-west-2 | `ami-fdeef099` |
101 | | **[v201712-2](../CHANGELOG.md#v201712-2)** | us-west-1 | `ami-9f0f08ff` |
102 | | | us-west-2 | `ami-0c319274` |
103 | | | us-gov-west-1 | `ami-e83db289` |
104 | | | us-east-1 | `ami-fbf48881` |
105 | | | eu-west-1 | `ami-20af2f59` |
106 | | | eu-west-2 | `ami-57243d33` |
107 | | **[v201801-1](../CHANGELOG.md#v201801-1)** | us-west-1 | `ami-94e7e4f4` |
108 | | | us-west-2 | `ami-bf68dec7` |
109 | | | us-gov-west-1 | `ami-93da53f2` |
110 | | | us-east-1 | `ami-5cf3d726` |
111 | | | eu-west-1 | `ami-0225b87b` |
112 | | | eu-west-2 | `ami-34687350` |
113 | | **[v201801-2](../CHANGELOG.md#v201801-2)** | us-west-1 | `ami-73e1e313` |
114 | | | us-west-2 | `ami-f70dbe8f` |
115 | | | us-gov-west-1 | `ami-e3aa2382` |
116 | | | us-east-1 | `ami-c9e9c7b3` |
117 | | | eu-west-1 | `ami-dd6bf3a4` |
118 | | | eu-west-2 | `ami-26fde642` |
119 | | ~~**[v201802-1](../CHANGELOG.md#v201802-1)**~~ | us-west-1 | `ami-eb6e608b` |
120 | | | us-west-2 | `ami-b78106cf` |
121 | | | us-gov-west-1 | `ami-26ad2547` |
122 | | | us-east-1 | `ami-b7e2eacd` |
123 | | | eu-west-1 | `ami-bce78fc5` |
124 | | | eu-west-2 | `ami-7d8d681a` |
125 | | | ap-southeast-1| `ami-76a5e50a` |
126 | | **[v201802-2](../CHANGELOG.md#v201802-2)** | us-west-1 | `ami-0dd2db6d` |
127 | | | us-west-2 | `ami-4535b63d` |
128 | | | us-gov-west-1 | `ami-17169e76` |
129 | | | us-east-1 | `ami-eb100e91` |
130 | | | eu-west-1 | `ami-eaf48793` |
131 | | | eu-west-2 | `ami-4975902e` |
132 | | | ap-southeast-1 | `ami-d790ddab` |
133 | | **[v201802-3](../CHANGELOG.md#v201802-3)** | us-west-1 | `ami-835258e3` |
134 | | | us-west-2 | `ami-506be028` |
135 | | | us-gov-west-1 | `ami-3e67ec5f` |
136 | | | us-east-1 | `ami-c1d92ebc` |
137 | | | eu-west-1 | `ami-1ed59267` |
138 | | | eu-west-2 | `ami-4f49ad28` |
139 | | | ap-southeast-1 | `ami-08276d74` |
140 | | **[v201804-1](../CHANGELOG.md#v201804-1)** | us-west-1 | `ami-292b3b49` |
141 | | | us-west-2 | `ami-12e5826a` |
142 | | | us-gov-west-1 | `ami-0372e762` |
143 | | | us-east-1 | `ami-9b79d7e6` |
144 | | | eu-west-1 | `ami-5559002c` |
145 | | | eu-west-2 | `ami-5d78993a` |
146 | | | ap-southeast-1 | `ami-efb79293` |
147 | | **[v201804-2](../CHANGELOG.md#v201804-2)** | us-west-1 | `ami-292b3b49` |
148 | | | us-west-2 | `ami-12e5826a` |
149 | | | us-gov-west-1 | `ami-0372e762` |
150 | | | us-east-1 | `ami-9b79d7e6` |
151 | | | eu-west-1 | `ami-5559002c` |
152 | | | eu-west-2 | `ami-5d78993a` |
153 | | | ap-southeast-1 | `ami-efb79293` |
154 | | **[v201804-3](../CHANGELOG.md#v201804-3)** | us-west-1 | `ami-ebfdef8b` |
155 | | | us-west-2 | `ami-f3167a8b` |
156 | | | us-gov-west-1 | `ami-fa81159b` |
157 | | | us-east-1 | `ami-68df7217` |
158 | | | eu-west-1 | `ami-a4d9f8dd` |
159 | | | eu-west-2 | `ami-86be5de1` |
160 | | | ap-southeast-1 | `ami-bd4062c1` |
161 | | **[v201805-1](../CHANGELOG.md#v201805-1)** | us-west-1 | `ami-c9edf1a9` |
162 | | | us-west-2 | `ami-8c4337f4` |
163 | | | us-gov-west-1 | `ami-309b0c51` |
164 | | | us-east-1 | `ami-357cc74a` |
165 | | | eu-west-1 | `ami-5edbf327` |
166 | | | eu-west-2 | `ami-a9f311ce` |
167 | | | ap-southeast-1 | `ami-c4a78fb8` |
168 | | **[v201806-1](../CHANGELOG.md#v201806-1)** | us-west-1 | `ami-4f26c32c` |
169 | | | us-west-2 | `ami-61afe919` |
170 | | | us-gov-west-1 | `ami-7571e014` |
171 | | | us-east-1 | `ami-50f7882f` |
172 | | | eu-west-1 | `ami-bb6969c2` |
173 | | | eu-west-2 | `ami-7839d61f` |
174 | | | ap-southeast-1 | `ami-03251f7f` |
175 | | **[v201806-2](../CHANGELOG.md#v201806-2)** | us-west-1 | `ami-43bf5920` |
176 | | | us-west-2 | `ami-99ce80e1` |
177 | | | us-gov-west-1 | `ami-ff79e99e` |
178 | | | us-east-1 | `ami-2fc69050` |
179 | | | eu-west-1 | `ami-98484772` |
180 | | | eu-west-2 | `ami-abbc55cc` |
181 | | | ap-southeast-1 | `ami-b9eeedc5` |
182 | | **[v201807-1](../CHANGELOG.md#v201807-1)** | us-west-1 | `ami-90ff1df3` |
183 | | | us-west-2 | `ami-c46439bc` |
184 | | | us-gov-west-1 | `ami-d1e97bb0` |
185 | | | us-east-1 | `ami-85f9c7fa` |
186 | | | eu-west-1 | `ami-21b1adcb` |
187 | | | eu-west-2 | `ami-c625cea1` |
188 | | | ap-southeast-1 | `ami-dd780237` |
189 | | **[v201807-2](../CHANGELOG.md#v201807-2)** | us-west-1 | `ami-efb05d8c` |
190 | | | us-west-2 | `ami-ffb0ef87` |
191 | | | us-gov-west-1 | `ami-4744d626` |
192 | | | us-east-1 | `ami-fa333485` |
193 | | | eu-west-1 | `ami-abc9d041` |
194 | | | eu-west-2 | `ami-c0a248a7` |
195 | | | ap-southeast-1 | `ami-221256c8` |
196 | | **[v201808-1](../CHANGELOG.md#v201808-1)** | us-west-1 | `ami-410ce322` |
197 | | | us-west-2 | `ami-37fdda4f` |
198 | | | us-gov-west-1 | `ami-d68c10b7` |
199 | | | us-east-1 | `ami-58342c27` |
200 | | | eu-west-1 | `ami-03b0b941377d5e4f5` |
201 | | | eu-west-2 | `ami-05e816ee` |
202 | | | ap-southeast-1 | `ami-00dda0493c3aa6179` |
203 |
--------------------------------------------------------------------------------
/docs/assets/admin-tools-edit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/admin-tools-edit.png
--------------------------------------------------------------------------------
/docs/assets/admin-tools-index.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/admin-tools-index.png
--------------------------------------------------------------------------------
/docs/assets/aws-infra-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/aws-infra-architecture.png
--------------------------------------------------------------------------------
/docs/assets/billing-checkboxes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/billing-checkboxes.png
--------------------------------------------------------------------------------
/docs/assets/bootstrap-status.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/bootstrap-status.png
--------------------------------------------------------------------------------
/docs/assets/create-organization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/create-organization.png
--------------------------------------------------------------------------------
/docs/assets/encrypt-ebs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/encrypt-ebs.png
--------------------------------------------------------------------------------
/docs/assets/enterprise-disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/enterprise-disabled.png
--------------------------------------------------------------------------------
/docs/assets/enterprise-enabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/enterprise-enabled.png
--------------------------------------------------------------------------------
/docs/assets/select-ami.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/select-ami.png
--------------------------------------------------------------------------------
/docs/assets/terraform-enterprise-banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/terraform-enterprise-banner.png
--------------------------------------------------------------------------------
/docs/assets/tfe-data-flow-arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/hashicorp/terraform-enterprise-modules/d198a95b9eef64482f3b5fab2ede5da27d149f8e/docs/assets/tfe-data-flow-arch.png
--------------------------------------------------------------------------------
/docs/configuring.md:
--------------------------------------------------------------------------------
1 | # Configuring Private Terraform Enterprise
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | After you have completed the installation process you will need to create an
14 | administrator account, configure your Terraform Enterprise installation (TFE) so
15 | it can send emails and SMS, and connect TFE to your git hosting provider.
16 |
17 | When the system configuration has been completed you will proceed with team
18 | setup by creating your first organizations and users, and enabling the
19 | enterprise features for those accounts.
20 |
21 | **Note:** If you are performing an upgrade or restore for an existing
22 | installation you _do not_ need to follow these steps. If your upgraded or
23 | restored installation does not function without the steps below then it was not
24 | correctly restored from backup. Please contact HashiCorp for help.
25 |
26 | Here is a rough overview of what we will cover in these sections:
27 |
28 | 1. System Configuration
29 | 1. Create the administrator account
30 | 2. Configure SMTP for sending emails
31 | 3. Configure Twilio for sending SMS messages
32 | 4. Configure connectivity to git
33 | 2. Team Setup
34 | 1. Inviting Colleagues
35 | 2. Creating an Organization
36 | 3. Creating a Team
37 | 4. Enabling Enterprise Features
38 | 5. Accessing Terraform and Packer Features
39 | 6. Creating Additional Administrators
40 |
41 | ## System Configuration
42 |
43 | Navigate to in your browser. You will
44 | be presented with a status page that looks like this:
45 |
46 | 
47 |
48 | Along the left side you will notice a list of items including `Create
49 | Administrator` and several `Configure ...`. These will help you navigate between
50 | the subsequent steps, or to skip ahead. If at any point you get lost you can
51 | return to to pick up where you left
52 | off.
53 |
54 | **Note:** Please substitute the hostname for your TFE installation in place of
55 | `tfe.example.com`.
56 |
57 | ### Creating an Administrator
58 |
59 | First, click on `Create Administrator` and create an administrator account. You
60 | will be able to create additional administrators later.
61 |
62 | After clicking `Save and Continue` you will be prompted to login with the
63 | account credentials you just created. Do so and you will be returned to the
64 | `Configure SMTP` step.
65 |
66 | ### Configure SMTP
67 |
68 | TFE sends emails occasionally for features like password reset and to alert you
69 | when a new administrator is added. Fill out the details for your SMTP server.
70 | You may use an external provider such as Amazon SES or Mailgun.
71 |
72 | When you have added your configuration press `Save and Continue`, or skip to the
73 | next step.
74 |
75 | ### Configure Twilio
76 |
77 | TFE uses Twilio to send SMS if a user selects SMS as their multi-factor
78 | authentication option. If Twilio is not configured they must use the Google
79 | Authenticator app on their smartphone instead.
80 |
81 | When you have added your configuration press `Save and Continue`, or skip to the
82 | next step.
83 |
84 | ### Configure Git(Hub)
85 |
86 | To use most TFE features you will need to configure TFE to ingest data from git.
87 | This step is titled `Configure GitHub`, and the example steps assume GitHub, but
88 | you can select other git providers such as GitLab or Bitbucket from the dropdown.
89 |
90 | 1. Fill in HTTP URL and API URL in TFE for your git provider
91 |
92 | > Note to users of github.com - due to a bug in GitHub's OAuth
93 | authentication, the HTTP URL must be set to https://github.com. This differs
94 | from the example text, which includes a www.
95 |
96 | 2. Create a new OAuth application in GitHub (for example,
97 | )
98 |
99 | 3. Fill out the Homepage URL in GitHub to match your TFE URL
100 |
101 | 4. Leave the Authorization callback URL on GitHub **blank** and click `Create
102 | Application`
103 |
104 | 5. Copy the Client ID and Client Secret from GitHub into the corresponding TFE
105 | fields.
106 |
107 | 6. Click `Save and Continue`.
108 |
109 | 7. Copy the new callback URL from TFE into GitHub. (For example:
110 | ) and click `Update
111 | application` in GitHub.
112 |
113 | 8. Click `Connect` and follow the GitHub OAuth flow.
114 |
115 | 9. When you see "Successfully linked to GitHub" click `Logged in with GitHub` to
116 | proceed to the next step.
117 |
118 | If you need to change a setting, click `Reconfigure` at the bottom of the page.
119 | If you have already completed this step or navigated away from the page, you can
120 | edit the OAuth configuration by way of the `here` link in the yellow bar at the
121 | top of the `Configure GitHub` page.
122 |
123 | ### Configure Acceptances Tests, Verify, and Complete
124 |
125 | The `Configure Acceptance Tests`, `Configure Acceptance Test GitHub`, `Verify`,
126 | and `Complete` steps are no longer used and will be removed in a future version
127 | of TFE.
128 |
129 | After configuring your git provider, you are finished with the System
130 | Configuration section and should move on to Team Setup.
131 |
132 | ## Team Setup
133 |
134 | TFE models Packer and Terraform projects as Builds and Environments,
135 | respectively.
136 |
137 | TFE models user access via Organizations, Teams, and Users. An organization
138 | contains one or more teams, and a team contains one or more users. Additionally,
139 | each user has a corresponding organization that can be used to share personal
140 | projects with other users.
141 |
142 | Enterprise features such as Terraform and Packer are enabled at the organization
143 | level. Visibility and access controls to Packer Builds and Terraform
144 | Environments can be made at the Organization, Team, or individual user levels.
145 |
146 | Below, we will cover inviting users, creating an organization, and adding
147 | permissions.
148 |
149 | ### Inviting Colleagues
150 |
151 | You must invite your colleagues to create TFE accounts by sharing the signup URL
152 | for your TFE installaton. They must complete the signup process before you can
153 | add them to teams or grant them additional privileges. Send this URL to your
154 | colleagues and have them sign up .
155 |
156 | Remember to replace `tfe.example.com` with your TFE hostname.
157 |
158 | ### Creating an Organization
159 |
160 | Create an organization by navigating to the TFE homepage. You will need to login
161 | if you have not already done so. Then, click your username in the bottom left,
162 | and click `Account Settings`. On the following page, click `Create Organization`
163 | on the left menu.
164 |
165 | 
166 |
167 | You can jump to this page via .
168 |
169 | You will need to fill in a name and email for the organization.
170 |
171 | To return to the organization page at a later time, follow the same flow through
172 | the UI. Click your username, click `Account Settings`, and then click the
173 | organization in the left menu.
174 |
175 | You will notice that you also have an organization matching your username. This
176 | allows you to share projects owned by your personal account (rather than an
177 | organization) with other users. You cannot create teams under your personal
178 | organization.
179 |
180 | ### Creating a Team
181 |
182 | After creating an organization in the previous step, choose `Teams` from the
183 | left menu. Type in a name for the team and then click `Create Team`. Next, click
184 | `Manage` next to the new team to add members by typing their names into the
185 | `Username` box. If no one else has joined yet you can add yourself.
186 |
187 | Teams are primarily used to assign read or read/write access controls to various
188 | projects inside an organization. Each organization has an "Owners" team that has
189 | full access to projects in that organization.
190 |
191 | ### Enabling Enterprise Features
192 |
193 | By default organizations do not have enterprise features enabled. You will need
194 | to turn these on in order to use Terraform and Packer features in your TFE
195 | installation.
196 |
197 | Navigate to in your browser. Since
198 | this is part of the admin interface there is no way to navigate here organically
199 | and you will need to remember this URL.
200 |
201 | From the Organizations admin page click the `edit` link next to your
202 | organization, then scroll to the bottom of the page. Check the `Terraform` and
203 | `Packer` check boxes under `Enterprise billing active?`, and then click `Update
204 | Organization`.
205 |
206 | 
207 |
208 | ### Accessing Terraform and Packer Features
209 |
210 | Finally, you will be able to access Terraform and Packer features by selecting
211 | them from the dropdown in the top left (which shows Vagrant by default). You
212 | should see the following page:
213 |
214 | 
215 |
216 | You are now ready to use Terraform Enterprise!
217 |
218 | If you see this page instead, you must follow the directions under **Enabling
219 | Enterprise Features**, above:
220 |
221 | 
222 |
223 | If you have trouble getting past this page, please reach out to HashiCorp for
224 | help.
225 |
226 | ### Creating Additional Administrators
227 |
228 | Navigate to in your browser. Since
229 | this is part of the admin interface there is no way to navigate here organically
230 | and you will need to remember this URL.
231 |
232 | Here you can search for and select a user to promote to administrator. Click
233 | `edit` and then click the `Grant Admin Privileges` button.
234 |
235 | Please note that administrators are superusers and are intended to assist with
236 | TFE maintenance, account rescue, and troubleshooting. Normal users should never
237 | need admin privileges and you should audit access to admin accounts, since they
238 | can bypass user security controls such as MFA, impersonate other users, and read
239 | secrets stored in any project.
240 |
241 | ## Known Issues
242 |
243 | The setup process has some extra steps and UI components. HashiCorp is currently
244 | working to improve this and these procedures will be streamlined in a future
245 | release. Please direct any feedback in this area to HashiCorp support.
246 |
--------------------------------------------------------------------------------
/docs/debug.md:
--------------------------------------------------------------------------------
1 | # Debugging PTFE
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | There will be times when PTFE doesn't behave as you'd like. This document is
14 | designed to provide you, the human trying to run PTFE, with information about
15 | how to figure out what is going wrong.
16 |
17 | ## Problem: Access to HTTPS - DNS
18 |
19 | If you are trying to access the URL you configured for PTFE, check that the
20 | hostname resolves in DNS. If you did not use route53, you had to configure
21 | this separately from `terraform apply`, so it's possible it was not configured.
22 | It should be configured to the `dns_name` value that was output (an ELB address CNAME).
23 |
24 | ## Problem: Access to HTTPS - Hanging
25 |
26 | If connections to the hostname hang and do not return any results, check in
27 | EC2 that the instance is booted and registered with the ELB. If the instance
28 | is up but failing health checks for more than a few minutes, you'll need to
29 | ssh to the instance to continue. Jump to [Accessing via SSH](#ssh).
30 |
31 | # Accessing via SSH
32 | Connect as the `tfe-admin` user.
33 |
34 | On the instance, there are a few elements you'll want to check exist before
35 | continuing:
36 |
37 | ## `/etc/atlas/boot.env`
38 |
39 | This file written by terraform during the apply
40 | process to S3 and downloaded by the instance using a script that runs at
41 | boot. If this file is absent, that means that the necessary services to run
42 | the system did not start properly. You'll need to look at the output of
43 | `systemctl status cloud-final` and see if it is listed as `active (exited)`.
44 | If so, then there was an error accessing the S3 bucket to download boot.env.
45 |
46 | Run `sudo cat /var/lib/cloud/instance/user-data.txt` and check the bucket
47 | listed on the last line. If it's not the bucket you believe it should be,
48 | then PTFE was misconfigured and you'll need to rebuild the cluster.
49 |
50 | If it is the right bucket, then run that command again and look at the
51 | output like this: `sudo bash /var/lib/cloud/instance/user-data.txt`. If the
52 | output mentions permission or access errors, then the IAM role that the
53 | instance is using was not configured properly and does not have access to
54 | the bucket and/or object. Please review the IAM rules that you used in the
55 | terraform configuration.
56 |
57 | ## UI
58 |
59 | You can verify that the UI is running by executing:
60 | `curl -s localhost:80/admin/bootstrap/status.json`. If you get JSON output
61 | that says `"All Systems Operational"`, then the UI is up and running. If
62 | you still can't connect to the hostname for the cluster, then you now know
63 | that the issue is either between the instance and the ELB or between your
64 | browser and the ELB.
65 |
66 | If `curl` can not get output, then the UI job was unable to run. You need to
67 | query the job and find out if it was able to run even: `nomad status atlas`.
68 | If that reports that there was no job found, then the system was unable to
69 | bootstrap at all, and you likely did not have boot.env present when the system
70 | booted. We suggest you rebuild the machine and make sure boot.env is present
71 | at the booting of the instance.
72 |
73 | If the output reports that the status of the allocations is not `running`,
74 | then the system has suffered some unrecoverable damage and needs to be
75 | rebuilt.
76 |
77 | If the output from curl doesn't report that everything is ok, for instance it
78 | says that there was any kind of error, then it's possible that the database
79 | migrations did not run properly. See the `Rails Console` section for that.
80 |
81 | ## Rails Console
82 |
83 | There are generally 2 activities associated with using the rails console.
84 |
85 | ### Running Migrations
86 |
87 | To run migrations, exec: `sudo docker exec -ti -u app $(sudo docker ps -q -f name=atlas-frontend) bash -ic 'cd /home/app/atlas && rake db:migrate'`
88 |
89 | You can also simply list all the migrations without running them, and see if
90 | any are pending: `sudo docker exec -ti -u app $(sudo docker ps -q -f name=atlas-frontend) bash -ic 'cd /home/app/atlas && bin/rake db:migrate:status'`
91 |
92 | ### Ruby Console
93 |
94 | To access the ruby console to be able to type in code, exec: `sudo docker exec -ti -u app $(sudo docker ps -q -f name=atlas-frontend) bash -ic 'cd /home/app/atlas && bin/rails c`
95 |
--------------------------------------------------------------------------------
/docs/disaster-recovery.md:
--------------------------------------------------------------------------------
1 | # Disaster Recovery (DR)
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Private Terraform Enterprise (PTFE) is designed to withstand failures of the
14 | primary host and loss of a single availability zone or RDS replica in AWS, and
15 | to be capable of self-upgrade and automatic recovery within a single region.
16 | PTFE relies on features in AWS KMS, S3, RDS, and EC2 to accomplish this.
17 |
18 | For a combination of cost, security, and complexity reasons, HashiCorp does not
19 | configure cross-region backups in a normal installation. You may add this
20 | capability yourself.
21 |
22 | The rest of this document will explain the mechanisms that PTFE uses to perform
23 | automated backup and restore, and how you can reproduce this process across
24 | regions (with some manual intervention). Familiarity with AWS, Terraform, and
25 | Linux is assumed.
26 |
27 | ## Automated Upgrade and Recovery
28 |
29 | When you install PTFE, the default configs place a bundle of data into EC2 user
30 | data. At boot time, the instance reads this data to discover the S3 bucket from
31 | which it retrieves additional encrypted data, including its database password
32 | and any existing instance state. After retrieving this data, the instance
33 | proceeds with the upgrade/recovery process automatically.
34 |
35 | The upgrade/recovery process is idempotent and runs each time the instance
36 | boots, allowing seamless unattended operation under normal circumstances.
37 |
38 | ### Limitations
39 |
40 | Automated upgrade and recovery may cease to work if the instance is booted in a
41 | new region, if configuration changes have been made outside of HashiCorp's
42 | Terraform configuration or provided AMI, or if some other service (such as RDS,
43 | S3, or ElastiCache) is unavailable, such as in a region-wide failure scenario.
44 |
45 | ## State
46 |
47 | PTFE stores state in 6 places:
48 |
49 | - KMS
50 | - S3
51 | - RDS
52 | - EC2 API
53 | - PTFE EC2 instance
54 | - ElastiCache (ephemeral data)
55 |
56 | ### KMS
57 |
58 | PTFE uses AWS KMS to encrypt data at rest, including data in S3 and RDS. KMS
59 | keys cannot be exported or copied between regions. Instead, when migrating data
60 | between regions, data is decrypted from rest using the KMS in the source region,
61 | encrypted in transit using TLS, and then re-encrypted using a new KMS key in the
62 | target region.
63 |
64 | As such, when performing a cross-region backup you will need to have access to
65 | both keys. However, when performing a restore operation you will only need to
66 | have the KMS key in the target region.
67 |
68 | #### Action Items
69 |
70 | - Perform a cross-region backup using the source KMS key created by Hashicorp's
71 | Terraform configuration and a new target KMS key
72 | - Make sure the target KMS key is available for restore
73 |
74 | ### S3
75 |
76 | PTFE stores blob data (tarballs, logs, etc) in S3. S3 data must be copied in
77 | order for the application to continue functioning after a restore operation. It
78 | should be sufficient to copy the bucket verbatim using any of S3's cross-region
79 | bucket replication features.
80 |
81 | You will want to sync and replicate the entire contents of the bucket _except_
82 | for the `tfe-backup` folder, which includes encrypted backup files. For more
83 | details see the **Internal Data** section, below.
84 |
85 | #### Action Items
86 |
87 | - Configure [cross-region S3 replication](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
88 | - Perform a one-time sync of existing data (replication only applies to _new_
89 | objects after it has been enabled)
90 | - Remember to exclude objects with the `tfe-backup` prefix, which must be backed
91 | up separately
92 |
93 | ### RDS
94 |
95 | PTFE stores data (builds, ids, etc.) in RDS. RDS data must be copied in order
96 | for the application to continue functioning after a restore operation.
97 |
98 | You may either setup a cross-region streaming backup (read replica) or copy
99 | individual snapshots. We commend setting up a [cross-region read replica](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html#USER_ReadRepl.XRgn),
100 | which will proactively provide you with a copy of your database in the event of
101 | a total region outage.
102 |
103 | Note that when restoring PTFE using a read replica, you will need to promote the
104 | read replica into primary as well as convert it into a cluster. This will take
105 | some time depending on the size of your database. You may opt to create a read
106 | replica cluster to reduce downtime, though this will incur additional costs.
107 |
108 | Additionally, after you promote your new primary but before you spin up a PTFE
109 | VM, take a snapshot of your cross-region replica. This ensures you can roll your
110 | database back to a consistent state if there is a problem with the restore
111 | process.
112 |
113 | #### Action Items
114 |
115 | - Configure a cross-region encrypted read replica of your PTFE RDS
116 | instance
117 | - (During restore) take a snapshot of your RDS instance so you can restore your
118 | data to a point in time if there is a problem during the restore operation
119 | - (During restore) promote the read-replica to act as a cluster primary
120 |
121 | ### EC2 and EC2 User Data
122 |
123 | You will have a different AMI ID in the target region. Make sure this AMI is the
124 | same release or later than the one you are restoring from. For example if you
125 | take the backup with 201705-1 make sure you use that version, or a later
126 | version like 201705-2 or 201706-1. Newer versions of the AMI sometimes include
127 | data migrations but we do not test these in reverse, so downgrading from e.g.
128 | 201705-2 to 201705-1 may cause data loss.
129 |
130 | The default Terraform configuration provided by HashiCorp uses EC2 user data to
131 | specify where the instance should find its bootstrapping data, which includes
132 | the database password and S3 bucket to search for backups.
133 |
134 | When you want to restore your instance to a new region, you will need to specify
135 | the new RDS instance data, password, KMS key, and S3 endpoint in Terraform so
136 | the instance will find it.
137 |
138 | #### Action Items
139 |
140 | - Make sure the PTFE AMI in the target region is at least the same version as
141 | the one you are restoring from
142 | - (During restore) make sure to reconfigure Terraform to reflect the new RDS
143 | read replica, S3 bucket, and KMS key
144 |
145 | ### Internal Data
146 |
147 | PTFE captures a rolling snapshot of its internal data every hour and during a
148 | clean OS shutdown (during typical maintenance, for example). This snapshot data
149 | is pushed into S3 and encrypted at rest using AWS KMS.
150 |
151 | You can invoke this process manually by running `/usr/local/bin/atlas-backup`
152 | inside the instance. The corresponding restore command is
153 | `/usr/local/bin/atlas-restore`. Both of these are bash scripts and you can
154 | inspect them to learn more about how they work.
155 |
156 | Because KMS keys are restricted to individual regions, you will need to decrypt
157 | and re-encrypt the snapshot file(s) in order to copy them to another region and
158 | have them usable by PTFE.
159 |
160 | For example, you may copy from S3 to an EC2 instance in the source region, `scp`
161 | the archive to an EC2 instance in the target region, and then copy to the S3
162 | bucket in the target region. Using this approach you can keep your KMS keys
163 | restricted using IAM roles, and run the backup process on a regular interval.
164 |
165 | # source region EC2 instance
166 | aws configure set s3.signature_version s3v4
167 | aws s3 cp $BACKUP_BUCKET/tfe-backup/$BACKUP_FILE ./$BACKUP_FILE
168 |
169 | scp ./$BACKUP_FILE target.host:/tmp/$BACKUP_FILE
170 |
171 | # target region EC2 instance
172 | aws configure set s3.signature_version s3v4
173 | aws s3 cp --sse=aws:kms --sse-kms-key-id=$KMS_KEY_ID $BACKUP_FILE $BACKUP_BUCKET/tfe-backup/$BACKUP_FILE
174 |
175 | Please note that the path inside the bucket _must_ use `tfe-backup`. For more
176 | details please refer to `/usr/local/bin/atlas-restore` inside the PTFE VM.
177 |
178 | #### Action Items
179 |
180 | - Make sure the latest `atlas-backup-[timestamp].tar.gz` file is copied into the
181 | S3 bucket you will use to restore your PTFE installation.
182 | - Make sure the `atlas-backup-[timestamp].tar.gz` file is decrypted with the KMS
183 | key from the source region and re-encrytped with the KMS key in the target
184 | region.
185 |
186 | ## Testing the Restore
187 |
188 | Inside a single-region context you can verify the restore process is working
189 | simply by terminating your VM and allowing AWS autoscaling to replace it with
190 | a new one. Using HashiCorp's default configuration, the restore process will
191 | happen automatically.
192 |
193 | In a cross-region context you will need to take the extra steps outlined above,
194 | but if you do so the restore process should also happen automatically. Because
195 | of the way the startup process works, there is no "manual" restore process.
196 | As long as you have your S3 backup in place and correct user data configured for
197 | the instance, it will boot and automatically perform the restore operation.
198 |
199 | It is _possible_ to invoke the various commands manually to restore (for
200 | example) an unencrypted backup archive so in the worst case scenario, as long as
201 | your data is intact, we will be able to assist you with a restore operation.
202 | However, for simplicity and forward-compatibility with new versions of PTFE, we
203 | strongly recommend verifying that the automated restore process works in your
204 | second region before you encounter a disaster scenario.
205 |
206 | ### Validation Steps
207 |
208 | To validate the restore you should login to the Private Terraform Enterprise UI
209 | and inspect a secret from one of your environments, as well as viewing logs from
210 | a past build. This validates the following actions took place:
211 |
212 | 1. The VM is booted correctly
213 | 2. The database was restored
214 | 3. Vault has unsealed and is able to decrypt secret data stored in the database
215 | 4. PTFE is able to retrieve data (logs) from S3
216 |
217 | This is not an exhaustive verification of functionality but these criteria
218 | indicate a successful restore.
219 |
220 | If you have trouble validating the restore the following logs may be helpful:
221 |
222 | journalctl -u atlas-setup
223 |
224 | The primary indicator of a failed restore operation is a sealed vault, or a
225 | vault that has been initialized from scratch. This will prevent you from logging
226 | into with your admin credentials or from decrypting secrets inside the Terraform
227 | Enterprise UI.
228 |
229 | If you are unable to reach the instance (or it is stuck in a getting ready
230 | state) verify that all of the required Terraform resources are available. You
231 | can also inspect:
232 |
233 | journalctl -u nomad
234 | journalctl -u vault
235 | journalctl -u consul
236 |
237 | to see other types of startup issues.
238 |
239 | If you get stuck, please use the `hashicorp-support` command and reach out
240 | to HashiCorp support for assistance.
241 |
242 | ## After a Restore
243 |
244 | After a cross-region restore operation some ephemeral data from PTFE is lost,
245 | including the list of work that is currently in progress. This is mostly a
246 | cosmetic issue. Because Terraform operates in an idempotent, transactional way
247 | all essential data can be refreshed from your cloud provider's APIs.
248 |
249 | However, you may need to manually cancel jobs via the admin UI, and any work
250 | that is in flight at the time of restore (including Packer and Terraform runs)
251 | will need to be re-queued. Additionally, if Terraform was in the process of
252 | creating or destroying resources, these operations may happen twice. Make sure
253 | to check your cloud provider(s) for orphaned resources after performing a
254 | restore.
255 |
--------------------------------------------------------------------------------
/docs/encrypt-ami.md:
--------------------------------------------------------------------------------
1 | # Using an Encrypted AMI
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | If you wish to backup your instance using EBS snapshots with encryption,
14 | you'll need to copy the official AMI to your account and configure it with
15 | encryption.
16 |
17 | In the AWS Console, go to AMIs under the EC2 subsystem. Change the selector to
18 | "Private Images", and select the AMI you wish to make a copy of:
19 |
20 | 
21 |
22 | Then in the copy configuration, select Encryption:
23 |
24 | 
25 |
26 | And click *Copy AMI*. Once the copy operation is finished, you can use the
27 | newly created AMI's ID in your `terraform.tfvars` file to install Terraform
28 | Enterprise.
29 |
--------------------------------------------------------------------------------
/docs/legacy.md:
--------------------------------------------------------------------------------
1 | *NOTE:* This document only applies to customers who are running the Legacy TFE
2 | architecture (mid-late 2016). If you're unsure if that's you, it likely is not.
3 |
4 | -----
5 |
6 | ## Deprecation warning:
7 |
8 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
9 |
10 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
11 |
12 | -----
13 |
14 | ## Migrating from a Legacy Terraform Enterprise (TFE) Installation
15 |
16 | The legacy TFE platform (v2) shipped as a complex multi-VM deployment. The
17 | current generation (v3) ships as a single VM with supporting AWS services,
18 | including S3 and RDS. This vastly simplifies deployment considerations and
19 | reduces estimated operating costs by almost 90%.
20 |
21 | Additionally, v2 shipped with a proprietary orchestration tool that was critical
22 | for installation and maintenance. In v3, these functions are either performed
23 | by Terraform configuration, with full source provided to you, or by simple
24 | scripts self-contained inside the VM.
25 |
26 | We hope that the improvements in v3 reduce the time you spend managing your TFE
27 | installation and increase your confidence in delivering TFE to the rest of your
28 | organization.
29 |
30 | ### Data
31 |
32 | To upgrade your version of TFE to v3, you will need to access your existing
33 | v2 installation to create backups and copy some configuration. Afterwards, v3
34 | will be ready to resume work where you left off.
35 |
36 | - Both v2 and v3 store most of their data in RDS. v3 is designed to use RDS
37 | encryption by default. When migrating from v2 to v3 we _strongly_ recommend
38 | enabling RDS encryption. To do this, you will need to create an encrypted RDS
39 | snapshot and specify the encrypted snapshot and KMS key in the v3 terraform
40 | config.
41 |
42 | - Vault and Consul data will be migrated using a supplemental backup script
43 | provided by HashiCorp. This must be run from a bastion instance created by the
44 | legacy installer tool. The script will create a v3-compatible backup from your
45 | v2 consul and vault clusters.
46 |
47 | - The primary S3 bucket in the v2 installation is the one with the
48 | `-storagelocker` suffix. This S3 bucket may be left as-is, and you will have
49 | the option to configure your existing S3 bucket in the v3 Terraform
50 | configuration by adding `manage_bucket = false` to your `tfvars` file. Please
51 | note that v3 expects the bucket to be versioned to facilitate cross-region
52 | disaster recovery. If your bucket is not versioned please use this opportunity
53 | to enable versioning. The remainder of the buckets in the v2 installation are
54 | administrative and can be cleaned up after a successful upgrade.
55 |
56 | ### Installation Steps
57 |
58 | #### Step 1. Configuration
59 |
60 | Begin by configuring the `tfvars` file found in the `aws-standard`
61 | directory. Please reference [the README.md](../aws-standard/README.md) for
62 | full descriptions of all the variables. You will provide your existing S3 bucket
63 | as `bucket_name`, and `tfe-legacy-upgrade` as the `db_snapshot_identifier`.
64 |
65 | * Set existing `-storagelocker` S3 bucket as `bucket_name`
66 | * Set `manage_bucket` to `false` to indicate that this Terraform config will
67 | not create and manage a new bucket
68 | * Set `tfe-legacy-upgrade` as the `db_snapshot_identifier` - this will be the
69 | name of the encrypted snapshot copy below.
70 |
71 | Specify `fqdn` as the DNS name used to access your installation, for example
72 | `tfe.mycompany.io`. This value is used internally for redirects and CSRF.
73 | Externally, you will need to direct your DNS server to the CNAME output from
74 | `terraform apply`. **NOTE:** This value does not have to be the same as the the
75 | value used for the v2 TFE installation. You're free to pick whatever you'd
76 | like, but the `fqdn` must match your v3 installation's external DNS or you will
77 | be unable to login.
78 |
79 | #### Step 2. KMS key creation
80 |
81 | TFE v3 uses KMS to encrypt sensitive data stored in S3 and RDS. Because you will
82 | be migrating data, you will need to create a KMS key in advance. We will use
83 | this key to create an encrypted RDS snapshot and to encrypt the v2-to-v3 backup
84 | in S3.
85 |
86 | First, plan the change: `$ terraform plan --target=aws_kms_key.key`
87 |
88 | This should only be creating the kms key, nothing else. Once this is approved:
89 |
90 | `$ terraform apply --target=aws_kms_key.key`
91 |
92 | This will output a KMS key ARN as `aws_kms_key.key`. You will use this value in
93 | subsequent steps.
94 |
95 | **Note:** The Terraform configuration provided by HashiCorp assumes that the v3
96 | install is taking place in the same account and region where your v2
97 | installation is located. If you are migrating to a new account or region you
98 | will need to make minor adjustments, such as creating a new KMS key in the
99 | target region or sharing a KMS key between accounts. Please refer to the AWS
100 | documentation for details.
101 |
102 | #### Step 3. Shutting down Legacy Application
103 |
104 | Terminate the Atlas job inside your v2 installation. This will allow you
105 | to perform a clean data migration without losing in-flight work.
106 |
107 | To do this, bring up a bastion in the legacy installation and run the following
108 | commands:
109 |
110 | ```
111 | # Back up Atlas job contents
112 | nomad inspect atlas > atlas.job.json
113 | # Stop atlas
114 | nomad stop atlas
115 | ```
116 |
117 | Leave the bastion host running as you will also use it to migrate Consul and
118 | Vault data in a subsequent step.
119 |
120 | **Note:** Make sure the Atlas jobs have completely terminated before you
121 | proceed. This ensures you will produce a consistent snapshot of work in TFE.
122 |
123 | #### Step 4. RDS snapshot creation
124 |
125 | Create an RDS snapshot from your v2 database. You can create a snapshot via the
126 | AWS api or via the AWS console. We suggest naming the snapshot `tfe-legacy-1`.
127 |
128 | Once the snapshot is complete, perform a copy of the snapshot with encryption
129 | enabled. This procedure is documented here:
130 | [Amazon RDS Update - Share Encrypted Snapshots, Encrypt Existing Instances](https://aws.amazon.com/blogs/aws/amazon-rds-update-share-encrypted-snapshots-encrypt-existing-instances/).
131 |
132 | Be sure to select `Yes` to `Enable Encryption` and then select the KMS
133 | key we created in Step 2 as the `Master Key`. We suggest naming the snapshot
134 | `tfe-legacy-upgrade`, which is the value we indicated earlier for
135 | `db_snapshot_identifier` in your `tfvars` file.
136 |
137 | Once the snapshot has completed, move on to the next step.
138 |
139 | #### Step 5. Consul/Vault data
140 |
141 | The v2 Consul and Vault clusters contain the encryption keys needed to access
142 | encrypted data stored in RDS (note that this application-level encryption is
143 | different from the system-wide RDS encryption we just talked about). To restore
144 | these keys into v3 we will create a special backup from the v2 data that is
145 | compatible with v3's automatic restore feature.
146 |
147 | The tools to perform this step can be found at
148 | [`hashicorp/tfe-v2-to-v3`](https://github.com/hashicorp/tfe-v2-to-v3). You can
149 | request access from HashiCorp if you are not able to see this repository.
150 | Running `make` in that repository will produce `tfe-v2-to-v3.tar.gz`.
151 |
152 | Upload `tfe-v2-to-v3.tar.gz` to the v2 bastion host you created earlier (or
153 | create a new bastion host now).
154 |
155 | Extract the `tfe-v2-to-v3.tar.gz` into a folder like `~/backup` or
156 | `/tmp/backup`. It should contain the following files:
157 |
158 | consul (binary)
159 | consul.json
160 | legacy-backup.sh
161 | vault.hcl
162 |
163 | Make sure the `consul` binary is marked executable, and then invoke
164 | `bash legacy-backup.sh`. This will connect to consul inside your v2 cluster and
165 | produce a file called `atlas-backup-[timestamp].tar.gz`, which v3 can restore.
166 | For additional details please refer to the script itself.
167 |
168 | After you have the atlas-backup file, you will need to put it in S3 and encrypt
169 | it with KMS. It should be placed under the `tfe-backup` folder in your TFE S3
170 | bucket, like `s3://my-tfe-data/tfe-backup/atlas-backup-[timestamp].tar.gz`.
171 |
172 | You must encrypt the archive when copying it to S3. Either ensure that the
173 | bastion host's IAM role can use the KMS key, or pass in a set of credentials
174 | that can access the required resources in KMS and S3. For example:
175 |
176 | aws configure set s3.signature_version s3v4
177 | aws s3 cp --sse=aws:kms --sse-kms-key-id=$KMS_KEY_ID $BACKUP_FILE $BACKUP_BUCKET/tfe-backup/$BACKUP_FILE
178 |
179 | #### Step 6. Full Terraform Run
180 |
181 | Now that all data have been migrated it's time to run a terraform plan for the
182 | remainder of the v3 installation:
183 |
184 | `$ terraform plan`
185 |
186 | Take a moment and look over the resources that will be created. It should be
187 | considerably smaller than the existing legacy install. Once you're satisfied:
188 |
189 | `$ terraform apply`
190 |
191 | The apply will take around 30 minutes to create and restore the RDS database,
192 | though the other resources should finish sooner. If there are any problems at
193 | this stage, simply run `terraform apply` again.
194 |
195 | When Terraform apply completes it will output a `dns_name` field. Use this to
196 | configure DNS for your installation by configuring a CNAME record to point to
197 | `dns_name`. **NOTE:** The CNAME you configure must match the one specified
198 | earlier in `tfvars`!
199 |
200 | Once the CNAME has propagated, you can view your v3 installation in your browser.
201 |
202 | #### Step 7. Verification
203 |
204 | Open your selected DNS name in your browser using `https://` (HTTPS is
205 | required). You will see a page indicating that Terraform Enterprise is booting.
206 | This page automatically refreshes and will redirect you to the login screen once
207 | the v3 database migrations and boot process have completed. Depending on the
208 | size of your database this may take some time, possibly up to 1 hour. If you are
209 | unsure whether your installation is making progress at this point, please reach
210 | out to HashiCorp for help.
211 |
212 | Once your instance boots you are ready to login with your existing admin or user
213 | credentials. To verify that the installation is complete, browse to a previous
214 | Terraform run and inspect the state or secrets (environment variables). If
215 | everything loads correctly, then data and encryption keys have been successfully
216 | restored your upgrade was successful. If you cannot login, cannot find previous
217 | Terraform runs, or secrets are missing, please reach out to HashiCorp for help.
218 |
219 | ### Known Issues
220 |
221 | #### GitHub Web Hooks and Callbacks
222 |
223 | If you opt to change the hostname during your migration, existing GitHub web
224 | hooks and callbacks will still be pointing to the prior installation. You will
225 | need to update these in two places:
226 |
227 | - You will need to update the GitHub OAuth Application for Terraform Enterprise
228 | so its Authorization callback URL references the new hostname. This enables
229 | users to link their Terraform Environments and Packer Builds to GitHub.
230 |
231 | - Each Terraform Environment and Packer Build Configuration that is already
232 | linked to a GitHub Repo will need to be relinked with GitHub by clicking
233 | "Update VCS Settings" on the "Integrations" page. This will update GitHub
234 | webhooks to point to the new hostname.
235 |
--------------------------------------------------------------------------------
/docs/logs.md:
--------------------------------------------------------------------------------
1 | # Terraform Enterprise Logs
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | This document contains information about interacting with Terraform Enterprise logs.
14 |
15 | ## Application-level Logs
16 |
17 | Terraform Enterprise's applcation-level services all log to CloudWatch logs, with one stream per service. The stream names take the format:
18 |
19 | ```
20 | {hostname}-{servicename}
21 | ```
22 |
23 | Where `hostname` is the fqdn you provided when setting up TFE, and `servicename` is the name of the service whose logs can be found in the stream. More information about each service can be found in [`tfe-architecture`](tfe-architecture.md).
24 |
25 | For example, if your TFE installation is available at `tfe.mycompany.io`, you'll find CloudWatch Log streams like the following:
26 |
27 | ```
28 | tfe.mycompany.io-atlas-frontend
29 | tfe.mycompany.io-atlas-worker
30 | tfe.mycompany.io-binstore
31 | tfe.mycompany.io-logstream
32 | tfe.mycompany.io-packer-build-manager
33 | tfe.mycompany.io-packer-build-worker
34 | tfe.mycompany.io-slug-extract
35 | tfe.mycompany.io-slug-ingress
36 | tfe.mycompany.io-slug-merge
37 | tfe.mycompany.io-storagelocker
38 | tfe.mycompany.io-terraform-build-manager
39 | tfe.mycompany.io-terraform-build-worker
40 | tfe.mycompany.io-terraform-state-parser
41 | ```
42 |
43 | CloudWatch logs can be searched, filtered, and read from either from the AWS Web Console or (recommended) the command line [`awslogs`](https://github.com/jorgebastida/awslogs) tool.
44 |
45 | ## System-level Logs
46 |
47 | All other system-level logs can be found in the standard locations for an Ubuntu 16.04 system.
48 |
--------------------------------------------------------------------------------
/docs/managing-tool-versions.md:
--------------------------------------------------------------------------------
1 | # Managing Tool Versions
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Terraform Enterprise has a control panel that allows admins to manage the versions of Terraform and Packer and their download locations.
14 |
15 | This control panel is available at the `/admin/tools` path or as a link in the sidebar from the general administrative interface at `/admin/manager`.
16 |
17 | 
18 |
19 | Here you'll find a list of Packer and Terraform versions. If you click the `Edit` button on an individual tool version, you'll see that each version consists of:
20 |
21 | * **Version Number** - will show up in dropdown lists for users to select
22 | * **Download URL** - must point to a `linux-amd64` build of the tool
23 | * **SHA256 Checksum Value** - must match the SHA256 checksum value of the download
24 |
25 | 
26 |
--------------------------------------------------------------------------------
/docs/migrating-from-tfe-saas.md:
--------------------------------------------------------------------------------
1 | # Migrating from Terraform Enterprise SaaS
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | If you are already a user of the Terraform Enterprise SaaS (hereafter "the SaaS"), you may have Environments that you want to migrate over to your new Private Terraform Enterprise (PTFE) installation.
14 |
15 | This document contains instructions on migrating an Environment from the SaaS to PTFE.
16 |
17 | These instructions assume Terraform 0.9 or greater. See [docs on legacy remote state](https://www.terraform.io/docs/backends/legacy-0-8.html) for information on upgrading usage of remote state in prior versions of Terraform.
18 |
19 | ### Prerequisites
20 |
21 | Have an Atlas Token handy for both PTFE and the SaaS. The following examples will assume you have these stored in `PTFE_ATLAS_TOKEN` and `SAAS_ATLAS_TOKEN`, respectively.
22 |
23 | ### Step 1: Connect local config to SaaS
24 |
25 | Set up a local copy of your Terraform config that's wired in to the SaaS via a `backend` block.
26 |
27 | Assuming your environment is located at `my-organization/my-environment` in the SaaS - make your way to a local copy of the Terraform config, and ensure you have a backend configuration like this:
28 |
29 | ```tf
30 | terraform {
31 | backend "atlas" {
32 | name = "my-organization/my-environment"
33 | }
34 | }
35 | ```
36 |
37 | Place your SaaS token in scope and initialize:
38 |
39 | ```
40 | export ATLAS_TOKEN=$SAAS_ATLAS_TOKEN
41 | terraform init
42 | ```
43 |
44 | ### Step 2: Copy state locally
45 |
46 | Now we'll want to get the latest copy of the state locally so we can push it to PTFE - you can do this by commenting out the `backend` section of your config:
47 |
48 | ```tf
49 | # Temporarily commented out to copy state locally
50 | # terraform {
51 | # backend "atlas" {
52 | # name = "my-organization/my-environment"
53 | # }
54 | # }
55 | ```
56 |
57 | Now, rerunning initialization:
58 |
59 | ```
60 | terraform init
61 | ```
62 |
63 | This will cause Terraform to detect the change in backend and ask you if you want to copy the state.
64 |
65 | Type `yes` to allow the state to be copied locally. Your state should now be present on disk as `terraform.tfstate`, ready to be uploaded to the PTFE backend.
66 |
67 | ### Step 3: Update backend configuration for PTFE
68 |
69 | Change the backend config to point to your PTFE installation:
70 |
71 | ```tf
72 | terraform {
73 | backend "atlas" {
74 | address = "https://tfe.mycompany.example.com" # the address of your PTFE installation
75 | name = "my-organization/my-environment"
76 | }
77 | }
78 | ```
79 |
80 | Now, place your PTFE token in scope and re-initialize:
81 |
82 | ```
83 | export ATLAS_TOKEN=$PTFE_ATLAS_TOKEN
84 | terraform init
85 | ```
86 |
87 | You will again be asked if you want to copy the state file. Type `yes` and the state will be uploaded to your PTFE installation.
88 |
--------------------------------------------------------------------------------
/docs/network-access.md:
--------------------------------------------------------------------------------
1 | # Network Access
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | This document details the ingress and egress network access required by Terraform Enterprise to function properly.
14 |
15 | ## Ingress Traffic
16 |
17 | Terraform Enterprise (TFE) requires certain ports to be accessible for it to function. The Terraform configuration that ships with TFE will by default create Security Groups (SGs) that make the appropriate ports available, but you can also specify custom SGs to be used instead.
18 |
19 | Here are the two SGs in the system relevant for user access and the ports they require to be open:
20 |
21 | * **Load Balancer SG**: Applied to the Elastic Load Balancer (ELB), controls incoming HTTP traffic from users
22 | * **Port 443** must be accessible to users for basic functionality, must also be accessible from the VPC itself, as certain internal services reach over the ELB to access cross-service APIs
23 | * **Port 80** is recommended to leave open for convenience - the system is set up to force SSL by redirecting users who visit TFE over HTTP to the HTTPS equivalent URL. If this port is not available, users who mistakenly visit the site over HTTP will see hanging requests in their browser
24 | * **Instance SG**: Applied to the EC2 Instance running the application
25 | * **Port 8080** must be accessible to the ELB to serve traffic
26 | * **Port 22** must be accessible to operators to perform diagnostics and troubleshooting over SSH
27 |
28 | There are also two internal SGs that are not currently user configurable:
29 |
30 | * **Database SG**: Applied to the RDS instance - allows the application to talk to PostgreSQL
31 | * **Redis SG**: Applied to the ElastiCache instance - allows the application to talk to Redis
32 |
33 | ## Egress Traffic
34 |
35 | Terraform Enterprise (TFE) makes several categories of outbound requests, detailed in the sections below.
36 |
37 | ### Primary Data Stores
38 |
39 | **S3** is used for object storage, so access to the AWS S3 API and endpoints is required for basic functionality
40 |
41 | **RDS and ElastiCache** instances are provisioned for application data storage. These instances are within the same VPC as the application, and so communication with them does not constitute outbound traffic
42 |
43 | ### Version Control System Integrations
44 |
45 | TFE can be configured with any of a number of **[Version Control Systems (VCSs)](https://www.terraform.io/docs/enterprise/vcs/index.html)**, some supporting both SaaS and private-network installations.
46 |
47 | In order to perform ingress of Terraform and Packer configuration from a configured VCS, TFE will need to be able to communciate with that provider's API, and webhooks from that provider will need to be able to reach TFE.
48 |
49 | For example, an integration with GitHub will require TFE to have access to https://github.com and for GitHub's webhooks to be able to route back to TFE. Similarly, an integration with GitHub Enterprise will require TFE to have access to the local GitHub instance.
50 |
51 | ### Packer and Terraform Execution
52 |
53 | As a part of their primary mode of operation, Packer and Terraform both make API calls out to infrastructure provider APIs. Since TFE runs Terraform and Packer on behalf of users, TFE will therefore need access to any Provider APIs that your colleagues want to manage with TFE.
54 |
55 | ### Packer and Terraform Release Downloading
56 |
57 | By default, TFE downloads the versions of Packer and Terraform that it executes from https://releases.hashicorp.com/ - though this behavior can be customized by specifying different download locations. See [`managing-tool-versions`](managing-tool-versions.md).
58 |
59 | ### Packer and Terraform Latest Version Notifications
60 |
61 | When displaying Terraform Runs and Packer Builds, TFE has JavaScript that reaches out to https://checkpoint.hashicorp.com to determine the latest released version of Packer and Terraform and notify users if there is a newer version available than the one they are running. This functionality non-essential - new version notifications will not be displayed in the Web UI if checkpoint.hashicorp.com cannot be reached from a user's browser.
62 |
63 | ### Communication Functions
64 |
65 | * TFE uses the configured SMTP endpoint for sending emails
66 | * Twilio can optionally be set up for for SMS-based 2FA (virtual TOTP support is available separately which does not make external API calls)
67 |
--------------------------------------------------------------------------------
/docs/services/archivist.md:
--------------------------------------------------------------------------------
1 | # Archivist
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Archivist is the service responsible for uploads, downloads and log streaming.
14 | It is a single service that will replace the following three services soon.
15 |
16 | ## `logstream`
17 |
18 | The Logstream service is responsible for streaming logs for Terraform and Packer runs from the service executing them to the user via JavaScript running in a web browser.
19 |
20 | The JavaScript directly polls the Logstream Service and updates the view in the browser with new data available from the run or build.
21 |
22 | This is one only a few services that is directly accessible for Terraform Enterprise users. Issues with logs not displaying are typically related to the TFE file pipeline or Packer or Terraform execution, as Logstream is just responsible for copying log data from storage to the browser.
23 |
24 | Logstream stores logs in both hot and cold storage, so initial loads can be slightly slower than cached loads.
25 |
26 | ### Impact of Failure
27 |
28 | When failing, builds and runs may not show logs to the user, making it hard determine progress or if a build is failing, where the failure is.
29 |
30 | ### Dependencies
31 |
32 | - [Atlas](atlas.md)
33 | - Storagelocker
34 | - Redis
35 |
36 | ## `binstore` & `storagelocker`
37 |
38 | Together, these services provide object storage for other services in the system. This includes configuration files, Terraform Plans, and logs from Packer Builds and Terraform Runs.
39 |
40 | ## Impact of Failure
41 |
42 | Problems with objects making their way through the build pipelines can point to problems with `binstore` or `storagelocker`.
43 |
44 | ## Dependencies
45 |
46 | - S3 for backend storage
47 | - Redis for coordination
48 |
--------------------------------------------------------------------------------
/docs/services/atlas.md:
--------------------------------------------------------------------------------
1 | # Atlas
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Atlas is the main Rails application at the core of Terraform Enterprise. It is run in two different modes: frontends, and workers.
14 |
15 | ## `atlas-frontend`
16 |
17 | The `atlas-frontend` service is responsible for running the main HTTP server used to access the Terraform Enterprise UI and API. This service represents the server responding to HTTP requests made to the central Atlas domain.
18 |
19 | The frontend service also displays and provides monitoring and debugging information, including status UIs for Terraform Enterprise used to diagnose and maintain the system.
20 |
21 | ### Impact of Failure
22 |
23 | Most services depend on the atlas-frontend service to make internal API requests, so this service being unavailable will cause widespread failure. The UI and API of Terraform Enterprise will be unavailable.
24 |
25 | Additionally, debugging and monitoring UIs will be inaccessible and cannot be used.
26 |
27 | ### Restart Behavior
28 |
29 | atlas-frontend restarts are not recommended, as it will cause the UI to be unavailable while restarting. This can take minutes to return to accessibility.
30 |
31 | Because the Terraform Enterprise API server is unavailable during restarts, alerts, runs, or builds executing during a restart of `atlas-frontend` may fail.
32 |
33 | Restarts to this service should only be issued when directed by HashiCorp support.
34 |
35 | ### Dependencies
36 |
37 | - Postgres
38 | - Redis
39 |
40 | ## atlas-worker
41 |
42 | The `atlas-worker` service is responsible for executing background work for the main Atlas application. This service is critical due to it's widespread use by the Atlas Frontend to execute work based on API or UI requests.
43 |
44 | ### Impact of Failure
45 |
46 | When the `atlas-worker` service is unhealthly, it's expected that many features will fail, including all build, run, and alert activity.
47 |
48 | Additionally, the worker queue can build up during unavailablity and cause potential performance impact when started again.
49 |
50 | However, the monitoring and diagnosis UI will still be available.
51 |
52 | ### Restart Behavior
53 |
54 | `atlas-worker` restarts are only recommended during service failure to aid in debugging or maintenance. During restart the worker queue will not be processed. This means any alerts or new runs and builds could fail. However, new work will be safely queued during the restart period.
55 |
56 | ### Dependencies
57 |
58 | - Postgres
59 | - Redis
60 |
61 |
--------------------------------------------------------------------------------
/docs/services/build-pipeline.md:
--------------------------------------------------------------------------------
1 | # Build Pipeline
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | The Terraform Enterprise build pipeline consists of a Manager and Worker service for each of Packer and Terraform.
14 |
15 | ## `packer-build-manager`
16 |
17 | The `packer-build-manager` service is responsible for queueing and providing updates for builds running in Packer back to Atlas. Atlas Frontend then presents this information to the user. `packer-build-manager` does not execute Packer.
18 |
19 | Upon running a build in Atlas, `packer-build-manager` queues and beings monitoring a build's progress, executed by `packer-build-worker`. Additionally, `packer-build-manager` will upload logs from a build to [`logstream`](archivist.md) for displaying in the TFE UI
20 |
21 | `packer-build-manager` also handles build cancelation requests made from the Atlas Frontend.
22 |
23 | ### Dependencies
24 |
25 | - [`atlas-frontend`](atlas.md)
26 | - [`logstream`](archivist.md)
27 | - `packer-build-worker`
28 | - RabbitMQ
29 |
30 | ### Impact of Failure
31 |
32 | When failing, builds can fail to queue, run, and cancel. Builds will appear to not be queued or no progress updates will be given for running builds. Essentially, all Packer related TFE activity will fail.
33 |
34 | ## `packer-build-worker`
35 |
36 | The `packer-build-worker` service is responsible for executing Packer and providing updates for builds running in Packer back to the [Packer Build Manager](/help/private-atlas/services/packer-build-manager), which then sends that infromation to the Atlas Frontend service for display to the user.
37 |
38 | Upon running a build in TFE, `packer-build-worker` pulls the build from the queue and executes it with Packer in an isolated network environment. Each build in a Packer template is split and executed independently. This service will send a signal to interrupt Packer if a user requests a cancelation.
39 |
40 | Because of its network isolation, the `packer-build-worker` service will not show up in the admin monitoring UI.
41 |
42 | ### Impact of Failure
43 |
44 | When failing, builds can fail to execute. Essentially, all Packer related TFE activity will fail.
45 |
46 | ### Dependencies
47 |
48 | - `packer-build-manager`
49 | - RabbitMQ
50 |
51 | ## `terraform-build-manager`
52 |
53 | The `terraform-build-manager` service is responsible for queueing and providing updates for runs (plans, applies) running in Terraform back to Atlas. Atlas then presents this information to the user. `terraform-build-manager` does not execute Terraform.
54 |
55 | Upon running a plan or apply in Atlas, `terraform-build-manager` queues and beings monitoring a run's progress, executed by `terraform-build-worker`.
56 | Additionally, `terraform-build-manager` will upload logs from a build to [`logstream`](archivist.md) for display in the Atlas UI.
57 | Additionally, run cancelation is handled by `terraform-build-manager`.
58 |
59 | ### Impact of Failure
60 |
61 | When failing, plans or applies can fail to queue, run, and cancel. Builds will
62 | appear to not be queued or no progress updates will be given for running
63 | plans or applies. Essentially, all Terraform related Atlas activity will fail.
64 |
65 | ### Dependencies
66 |
67 | - [`atlas-frontend`](atlas.md)
68 | - [`logstream`](archivist.md)
69 | - `terraform-build-worker`
70 | - RabbitMQ
71 |
72 | ## `terraform-build-worker`
73 |
74 | The `terraform-build-worker` service is responsible for executing Terraform and providing updates for runs (plans, applies) running in Terraform back to the `terraform-build-manager`, which then sends that information to the Atlas Frontend service for display to the user.
75 |
76 | Upon queueing a plan or confirm an apply in Atlas, `terraform-build-worker` pulls the run from the queue and executes it with Terraform in an isolated network environment. Plans and applies are executed independently. This service will send a signal to interrupt Terraform if a user requests a cancelation.
77 |
78 | Because of its network isolation, the `terraform-build-worker` service
79 | will not show up in the admin monitoring UI.
80 |
81 |
82 | ### Impact of Failure
83 |
84 | When failing, runs can fail to execute. Essentially, all Terraform related TFE activity will fail.
85 |
86 | ### Dependencies
87 |
88 | - `terraform-build-manager`
89 | - RabbitMQ
90 |
--------------------------------------------------------------------------------
/docs/services/slugs.md:
--------------------------------------------------------------------------------
1 | # Slugs
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | The `slug-*` services each perform a function on a bundle of Terraform or Packer config.
14 |
15 | ## `slug-extract`
16 |
17 | The `slug-extract` service is responsible for extracting files from archives of data, typically Packer templates and Terraform Configuration.
18 |
19 | It downloads a target archive (typically in a `.tar.gz` format) then unpacks and extracts a file. This file is then sent to the Atlas API to be stored.
20 |
21 | This service is accessed as part of the file pipeline and may have a status of `extracting` in relation to a job or run.
22 |
23 | ### Impact of Failure
24 |
25 | When failing, builds and runs may fail to proceed past the `extracting` phase. This can cause new builds and runs to not start or appear to be queued or pending.
26 |
27 | ### Dependencies
28 |
29 | - `atlas-frontend`
30 | - `binstore`
31 | - `storagelocker`
32 |
33 | ## `slug-ingress`
34 |
35 | The `slug-ingress` service is responsible for cloning files from version control services, typically git servers and services such as GitHub. These files are used in Terraform, Packer, or applications within Atlas.
36 |
37 | The service clones and checks out specific refs for version control.
38 |
39 | This service is accessed as part of the file pipeline and may have a status of `pending` in relation to a job or run.
40 |
41 | ### Impact of Failure
42 |
43 | When failing, builds and runs may fail to proceed past the `pending` phase. This can cause new builds and runs to not start or appear to be queued or pending.
44 |
45 | ### Dependencies
46 |
47 | - `atlas-frontend`
48 | - `binstore`
49 | - `storagelocker`
50 |
51 | ## `slug-merge`
52 |
53 | The `slug-merge` service is responsible for combining archives of files, typically Packer templates and Terraform Configuration, or scripts sent with Packer and Terraform push commands or via version control services.
54 |
55 | It downloads a set of target archives (typically in a `.tar.gz` format) unpacks each one and then combines and tars and compresses the result. This resulting archive is then sent to the Binstore service and a callback is issued to the Atlas Frontend.
56 |
57 | This service is accessed as part of the file pipeline and may have a status of `merging` in relation to a job or run.
58 |
59 | ### Impact of Failure
60 |
61 | When failing, builds and runs may fail to proceed past the `merging` phase. This can cause new builds and runs to not start or appear to be queued or pending.
62 |
63 | ### Dependencies
64 |
65 | - `atlas-frontend`
66 | - `binstore`
67 | - `storagelocker`
68 |
--------------------------------------------------------------------------------
/docs/settings.md:
--------------------------------------------------------------------------------
1 | # Internal Settings
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | The product has a few settings which can be changed to manipulate the behavior
14 | of the product. Below are a complete listing of them, please read the cavaets
15 | about each before using them.
16 |
17 | These values can be reand and write using the `ptfe-settings` command on the
18 | machine.
19 |
20 | ## Worker Autoscaling
21 |
22 | The options concern the product scaling the maximum number of concurent packer
23 | and terraform jobs to run concurrently. It automatically scales up based on
24 | the amount of memory available in the machine, so booting on a larger instance
25 | type will automatically increase the maximum throughput.
26 |
27 | * _reserved-system-memory_: The amount of memory (in MB) to reserve for system
28 | tasks, such as consul, vault, shells, and anything else that is installed on
29 | the machine by customers. *Default: 4000*
30 | * _minimum-workers_: The minimum number of workers to use when auto-scaling
31 | the worker count. *Default: 10*
32 | * _packer-memory_: The amount of memory (in MB) to give to each packer build
33 | as it's running: *Default: 256*
34 | * _terraform-memory_: The amount of memory (in MB) to give to each packer build
35 | as it's running: *Default: 256*
36 | * _explicit-build-workers_: Specify how many packer and terraform build
37 | workers to use. *NOTE* This value is used regardless of the amount of memory in the
38 | machine, please only use it if the above settings don't auto-scale properly.
39 | *No Default*
40 |
--------------------------------------------------------------------------------
/docs/storing-tfe-state.md:
--------------------------------------------------------------------------------
1 | # Storing Terraform Enterprise State
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | The Terraform Enterprise install process uses Terraform, and therefore must store Terraform State. This presents a bootstrapping problem, because while generally you can use Terraform Enterprise to securely store versioned Terraform State, in this case Terraform Enterprise is not ready yet.
14 |
15 | So therefore, you must choose a mechanism for storing the Terraform State produced by the install process.
16 |
17 | ## Security Considerations for Terraform State
18 |
19 | The Terraform State file for the Terraform Enterprise instance will contain the RDS Database password used by the application. While sensitive fields are separately encrypted-at-rest via Vault, this credential and network access to the database would yield access to all of the unencrypted metadata stored by Terraform Enterprise.
20 |
21 | HashiCorp recommends storing the Terraform State for the install in an encrypted data store.
22 |
23 | ## Recommended State Storage Setup
24 |
25 | Terraform supports various [Remote State](https://www.terraform.io/docs/state/remote.html) backends that can be used to securely store the Terraform State produced by the install.
26 |
27 | HashiCorp recommends a versioned, encrypted-at-rest S3 bucket as a good default choice.
28 |
29 | Here are steps for setting up and using an S3 bucket for Remote State Storage:
30 |
31 | ```bash
32 | # From the root dir of your Terraform Enterprise installation config
33 | BUCKETNAME="mycompany-terraform-enterprise-state"
34 |
35 | # Create bucket
36 | aws s3 mb "s3://${BUCKETNAME}"
37 |
38 | # Turn on versioning for the bucket
39 | aws s3api put-bucket-versioning --bucket "${BUCKETNAME}" --versioning-configuration status=Enabled
40 |
41 | # Configure terraform backend to point to the S3 bucket
42 | cat <backend.tf
43 | terraform {
44 | backend "s3" {
45 | bucket = "${BUCKETNAME}"
46 | key = "terraform-enterprise.tfstate"
47 | encrypt = true
48 | }
49 | }
50 | EOF
51 |
52 | # Initialize Terraform with the Remote Backend
53 | terraform init
54 | ```
55 |
56 | Now, if you keep the `backend.tf` file in scope when you run `terraform` operations, all state will be stored in the configured bucket.
57 |
--------------------------------------------------------------------------------
/docs/support.md:
--------------------------------------------------------------------------------
1 | # Support for Private Terraform Enterprise
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | If some aspect of Private Terraform Enterprise (PTFE) is not working as
14 | expected, please reach out to support for help.
15 |
16 | ## Email
17 |
18 | You can engage HashiCorp support via . Please make sure
19 | to use your organization email (not your personal email) when contacting us so
20 | we can associate the support request with your organization and expedite our
21 | response.
22 |
23 | ## Diagnostics
24 |
25 | For most technical issues HashiCorp support will ask you to include diagnostic
26 | information in your support request. You can create a support bundle by
27 | connecting to your PTFE instance via SSH and running
28 |
29 | sudo hashicorp-support
30 |
31 | You will see output similar to:
32 |
33 | ==> Creating HashiCorp Support Bundle in /var/lib/hashicorp-support
34 | ==> Wrote support tarball to /var/lib/hashicorp-support/hashicorp-support.tar.gz
35 | gpg: checking the trustdb
36 | gpg: marginals needed: 3 completes needed: 1 trust model: PGP
37 | gpg: depth: 0 valid: 1 signed: 0 trust: 0-, 0q, 0n, 0m, 0f, 1u
38 | gpg: next trustdb check due at 2019-04-14
39 | ==> Wrote encrypted support tarball to /var/lib/hashicorp-support/hashicorp-support.tar.gz.enc
40 | Please send your support bundle to HashiCorp support.
41 |
42 | Attach the `hashicorp-support.tar.gz.enc` file to your support request. If it is
43 | too large to attach you can send this to us via S3, FTP, or another data store
44 | you control.
45 |
46 | **Warning:** Make sure to attach the file ending in `.tar.gz.enc` as the
47 | contents of `.tar.gz` are not encrypted!
48 |
49 | **Note:** The GPG key used to encrypt the bundle is imported for the `root` user
50 | only. If you use `sudo -sH`, change `$HOME`, or take a similar action, the
51 | encryption step will fail. To assume `root` use `sudo -s` instead.
52 |
53 | ### About the Bundle
54 |
55 | The support bundle contains logging and telemetry data from various components
56 | in Private Terraform Enterprise. It may also include log data from Terraform or
57 | Packer builds you have executed on your PTFE installation. For your privacy and
58 | security, the entire contents of the support bundle are encrypted with a 2048
59 | bit RSA key.
60 |
61 | ### Scrubbing Secrets
62 |
63 | If you have extremely sensitive data in your Terraform or Packer build logs you
64 | may opt to omit these logs from your bundle. However, this may impede our
65 | efforts to diagnose any problems you are encountering. To create a custom
66 | support bundle, run the following commands:
67 |
68 | sudo -s
69 | hashicorp-support
70 | cd /var/lib/hashicorp-support
71 | tar -xzf hashicorp-support.tar.gz
72 | rm hashicorp-support.tar.gz*
73 | rm nomad/*build-worker*
74 | tar -czf hashicorp-support.tar.gz *
75 | gpg2 -e -r "Terraform Enterprise Support" \
76 | --cipher-algo AES256 \
77 | --compress-algo ZLIB \
78 | -o hashicorp-support.tar.gz.enc \
79 | hashicorp-support.tar.gz
80 |
81 | You will note that we first create a support bundle using the normal procedure,
82 | extract it, remove the files we want to omit, and then create a new one.
83 |
--------------------------------------------------------------------------------
/docs/test-tf-configuration.md:
--------------------------------------------------------------------------------
1 | -----
2 |
3 | ## Deprecation warning:
4 |
5 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
6 |
7 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
8 |
9 | -----
10 |
11 | _Below is a sample Terraform configuration designed to always have changes. The changes do not make any changes to active infrastructure. Rather, they output a random uuid to allow for quick iteration when testing Terraform Enterprise's plan and apply functionality._
12 |
13 | Copy the below HCL into `main.tf` in a test repository, as this configuration will be used to verify the Private Terraform Enterprise installation:
14 |
15 | ```HCL
16 | resource "random_id" "random" {
17 | keepers {
18 | uuid = "${uuid()}"
19 | }
20 |
21 | byte_length = 8
22 | }
23 |
24 | output "random" {
25 | value = "${random_id.random.hex}"
26 | }
27 | ```
28 |
--------------------------------------------------------------------------------
/docs/tfe-architecture.md:
--------------------------------------------------------------------------------
1 | # Terraform Enterprise Architecture
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | This document describes aspects of the architecture of Terraform Enterprise.
14 |
15 | ## Services
16 |
17 | These are the services used to run Terraform Enterprise. Each service contains a description of what actions it performs, a policy for restarts, impact of failing or degraded performance, and the service's dependencies.
18 |
19 | - [`atlas-frontend` and `atlas-worker`](services/atlas.md)
20 | - [`archivist`, `binstore`, `storagelocker`, and `logstream`](services/archivist.md)
21 | - [`packer-build-manager`, `packer-build-worker`, `terraform-build-manager`, and `terraform-build-worker`](services/build-pipeline.md)
22 | - [`slug-extract`, `slug-ingress`, `slug-merge`](services/slugs.md)
23 |
24 | ## Data Flow Diagram
25 |
26 | The following diagram shows the way data flows through the various services and data stores in Terraform Enterprise.
27 |
28 | 
29 |
30 | (Note: The services in double square brackets are soon to be replaced by the service that precedes them.)
31 |
32 |
--------------------------------------------------------------------------------
/docs/vault-rekey.md:
--------------------------------------------------------------------------------
1 | # Rekeying Vault
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | (Requires a machine image `v201709-1` or later)
14 |
15 | The Vault instance used by Terraform Enterprise (TFE) self-manages its unseal key by
16 | default. This unseal key is stored in a KMS-encrypted file on S3 and is
17 | downloaded by the instance on boot to automatically unseal Vault.
18 |
19 | If the above configuration is insufficient for your security needs, you can
20 | choose to rekey the Vault instance after bootstrapping is completed. This
21 | allows you to change the key shares and key threshold settings, places the
22 | Vault unseal keys under your control, and deactivates the auto-unseal behavior
23 | of the TFE instance.
24 |
25 | The Vault documentation has a
26 | [guide](https://www.vaultproject.io/guides/rekeying-and-rotating.html#rekeying-vault)
27 | on how to perform a rekey operation and `vault rekey -help` output provides
28 | full docs on the various options available.
29 |
30 | ## Walkthrough of Rekey Operation
31 |
32 | Here is an example of rekeying the TFE vault to use 5 key shares with a key
33 | threshold of 2. These commands are executed from an SSH session on the TFE
34 | instance as the `tfe-admin` user.
35 |
36 | ```
37 | vault rekey -init -key-shares=5 -key-threshold=2
38 |
39 | WARNING: If you lose the keys after they are returned to you, there is no
40 | recovery. Consider using the '-pgp-keys' option to protect the returned unseal
41 | keys along with '-backup=true' to allow recovery of the encrypted keys in case
42 | of emergency. They can easily be deleted at a later time with
43 | 'vault rekey -delete'.
44 |
45 | Nonce: acdd8a46-3b...
46 | Started: true
47 | Key Shares: 5
48 | Key Threshold: 2
49 | Rekey Progress: 0
50 | Required Keys: 1
51 | ```
52 |
53 | The rekey operation has now been started. The printed nonce and the current
54 | unseal key are required to complete it.
55 |
56 | The current unseal key can be found under `/data/vault-unseal-key`
57 |
58 | ```
59 | VAULT_UNSEAL_KEY=$(sudo cat /data/vault-unseal-key)
60 | vault rekey -nonce=acdd8a46-3b... $VAULT_UNSEAL_KEY
61 |
62 | Key 1: jcLit6uk...
63 | Key 2: qi/AfO30...
64 | Key 3: t3TezCbE...
65 | Key 4: 5O6E8WFU...
66 | Key 5: +bWaQapk...
67 |
68 | Operation nonce: acdd8a46-3b2a-840e-0db8-e53e84fa7e64
69 |
70 | Vault rekeyed with 5 keys and a key threshold of 2. Please
71 | securely distribute the above keys. When the Vault is re-sealed,
72 | restarted, or stopped, you must provide at least 2 of these keys
73 | to unseal it again.
74 |
75 | Vault does not store the master key. Without at least 2 keys,
76 | your Vault will remain permanently sealed.
77 | ```
78 |
79 | ## IMPORTANT: After Rekeying
80 |
81 | **Note**: After performing a rekey it's important to remove the old unseal key
82 | and trigger a backup before rebooting the machine. This will ensure that TFE
83 | knows to prompt for Vault unseal keys.
84 |
85 | ```
86 | sudo rm /data/vault-unseal-key
87 | sudo atlas-backup
88 | ```
89 |
--------------------------------------------------------------------------------
/docs/vpc.md:
--------------------------------------------------------------------------------
1 | # VPC
2 |
3 | -----
4 |
5 | ## Deprecation warning:
6 |
7 | The Terraform Enterprise AMI is no longer actively developed as of 201808-1 and will be fully decommissioned on November 30, 2018. As part of this deprecation, the modules and documentation in this repo are now unmaintained.
8 |
9 | Please see our [Migration Guide](https://www.terraform.io/docs/enterprise/private/migrate.html) to migrate to the new Private Terraform Enterprise Installer.
10 |
11 | -----
12 |
13 | Terraform Enterprise is design to be installed into a preexisting VPC. This
14 | gives you flexibility about how you run and utilize the product.
15 |
16 | The VPC needs to have the following features:
17 |
18 | * At least 2 subnets in different availability zones to perform proper RDS
19 | redundency.
20 | * If you'll be accessing the product over the internet, you'll need at least
21 | one public subnet and configured for the ELB to use.
22 |
23 |
24 | ### Demo VPC Terraform
25 |
26 | If you wish to build a new VPC from scratch, we have provided a sample
27 | terraform module you can use to do so. When you run `terraform plan` or
28 | `terraform apply` it will ask you what to call the VPC and what AWS region to
29 | put it in. Once it's finished applying, you can use the subnet ids it outputs
30 | to configure the product.
31 |
32 | [Demo VPC Terraform](./demo-base-vpc)
33 |
--------------------------------------------------------------------------------
/modules/rds/rds.tf:
--------------------------------------------------------------------------------
1 | variable "instance_class" {}
2 |
3 | variable "multi_az" {}
4 |
5 | variable "name" {}
6 |
7 | variable "password" {}
8 |
9 | variable "storage_gbs" {}
10 |
11 | variable "subnet_ids" {
12 | type = "list"
13 | }
14 |
15 | variable "username" {}
16 |
17 | variable "engine_version" {}
18 |
19 | variable "vpc_cidr" {}
20 |
21 | variable "vpc_id" {}
22 |
23 | variable "backup_retention_period" {}
24 |
25 | variable "storage_type" {}
26 |
27 | variable "kms_key_id" {}
28 |
29 | variable "snapshot_identifier" {
30 | default = ""
31 | }
32 |
33 | variable "disable" {
34 | default = false
35 | }
36 |
37 | variable "db_name" {
38 | default = "atlas_production"
39 | }
40 |
41 | resource "aws_db_subnet_group" "rds" {
42 | count = "${var.disable ? 0 : 1}"
43 | name = "${var.name}"
44 | description = "${var.name}"
45 |
46 | # In order for this module to work properly with the aws-extra/base-vpc
47 | # module, subnet_ids needs to be wrapped in square brackets even though the
48 | # variable is declared as a list until https://github.com/hashicorp/terraform/issues/13103 is resolved.
49 | subnet_ids = ["${var.subnet_ids}"]
50 | }
51 |
52 | resource "aws_security_group" "rds" {
53 | count = "${var.disable ? 0 : 1}"
54 | name = "${var.name}"
55 | vpc_id = "${var.vpc_id}"
56 |
57 | ingress {
58 | protocol = -1
59 | from_port = 0
60 | to_port = 0
61 | cidr_blocks = ["${var.vpc_cidr}"]
62 | }
63 |
64 | egress {
65 | protocol = -1
66 | from_port = 0
67 | to_port = 0
68 | cidr_blocks = ["0.0.0.0/0"]
69 | }
70 | }
71 |
72 | resource "aws_db_instance" "rds" {
73 | count = "${var.disable ? 0 : 1}"
74 | identifier = "${var.name}"
75 | engine = "postgres"
76 | engine_version = "${var.engine_version}"
77 | multi_az = "${var.multi_az}"
78 | allocated_storage = "${var.storage_gbs}"
79 | db_subnet_group_name = "${aws_db_subnet_group.rds.name}"
80 | username = "${var.username}"
81 | password = "${var.password}"
82 | instance_class = "${var.instance_class}"
83 | vpc_security_group_ids = ["${aws_security_group.rds.id}"]
84 | backup_retention_period = "${var.backup_retention_period}"
85 | storage_type = "${var.storage_type}"
86 | name = "${var.snapshot_identifier == "" ? var.db_name : ""}"
87 | final_snapshot_identifier = "${var.name}"
88 | storage_encrypted = true
89 | kms_key_id = "${var.kms_key_id}"
90 | snapshot_identifier = "${var.snapshot_identifier}"
91 |
92 | # After a snapshot restores, the DB name will be populated from the snapshot,
93 | # *but* we currently need to omit the name parameter with the ternary above.
94 | # To prevent the effective `name = ""` config from triggering a diff after
95 | # initial creation, we need to ignore changes on that field.
96 | #
97 | # After this PR lands we can revert to just a static name value, removing
98 | # both the ternary above and the ignore_changes below:
99 | # https://github.com/hashicorp/terraform/pull/13140
100 | lifecycle {
101 | ignore_changes = ["name"]
102 | }
103 |
104 | timeouts {
105 | create = "2h"
106 | }
107 | }
108 |
109 | output "database" {
110 | value = "${join("", aws_db_instance.rds.*.name)}"
111 | }
112 |
113 | output "endpoint" {
114 | value = "${join("", aws_db_instance.rds.*.endpoint)}"
115 | }
116 |
117 | output "username" {
118 | value = "${var.username}"
119 | }
120 |
121 | output "password" {
122 | value = "${var.password}"
123 | }
124 |
125 | output "address" {
126 | value = "${join("", aws_db_instance.rds.*.address)}"
127 | }
128 |
--------------------------------------------------------------------------------
/modules/redis/redis.tf:
--------------------------------------------------------------------------------
1 | variable "name" {}
2 |
3 | variable "instance_type" {
4 | default = "cache.m3.medium"
5 | }
6 |
7 | variable "subnet_ids" {
8 | type = "list"
9 | }
10 |
11 | variable "vpc_id" {}
12 | variable "vpc_cidr" {}
13 |
14 | variable "port" {
15 | default = "6379"
16 | }
17 |
18 | variable "disable" {
19 | default = false
20 | }
21 |
22 | resource "aws_elasticache_parameter_group" "redis" {
23 | count = "${var.disable ? 0 : 1}"
24 | name = "${var.name}"
25 | family = "redis2.8"
26 | description = "${var.name} parameter group"
27 |
28 | parameter {
29 | name = "appendfsync"
30 | value = "everysec"
31 | }
32 |
33 | parameter {
34 | name = "appendonly"
35 | value = "yes"
36 | }
37 | }
38 |
39 | resource "aws_security_group" "redis" {
40 | count = "${var.disable ? 0 : 1}"
41 | vpc_id = "${var.vpc_id}"
42 |
43 | ingress {
44 | protocol = -1
45 | from_port = 0
46 | to_port = 0
47 | cidr_blocks = ["${var.vpc_cidr}"]
48 | }
49 |
50 | egress {
51 | protocol = -1
52 | from_port = 0
53 | to_port = 0
54 | cidr_blocks = ["0.0.0.0/0"]
55 | }
56 |
57 | tags {
58 | Name = "terraform-enterprise"
59 | }
60 | }
61 |
62 | resource "aws_elasticache_subnet_group" "redis" {
63 | count = "${var.disable ? 0 : 1}"
64 | name = "${var.name}"
65 | description = "${var.name} subnet group"
66 |
67 | # In order for this module to work properly with the aws-extra/base-vpc
68 | # module, subnet_ids needs to be wrapped in square brackets even though the
69 | # variable is declared as a list until https://github.com/hashicorp/terraform/issues/13103 is resolved.
70 | subnet_ids = ["${var.subnet_ids}"]
71 | }
72 |
73 | resource "aws_elasticache_cluster" "redis" {
74 | count = "${var.disable ? 0 : 1}"
75 | cluster_id = "${format("%.*s", 20, var.name)}" # 20 max chars
76 | engine = "redis"
77 | engine_version = "2.8.24"
78 | node_type = "${var.instance_type}"
79 | port = "${var.port}"
80 | num_cache_nodes = "1"
81 | parameter_group_name = "${aws_elasticache_parameter_group.redis.name}"
82 | subnet_group_name = "${aws_elasticache_subnet_group.redis.name}"
83 | security_group_ids = ["${aws_security_group.redis.id}"]
84 | }
85 |
86 | output "host" {
87 | value = "${join("", aws_elasticache_cluster.redis.*.cache_nodes.0.address)}"
88 | }
89 |
90 | output "port" {
91 | value = "${var.port}"
92 | }
93 |
94 | output "password" {
95 | value = ""
96 | } # Elasticache has no auth
97 |
--------------------------------------------------------------------------------
/modules/tfe-instance/aws.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {}
2 |
3 | variable "vpc_id" {}
4 |
5 | variable "cert_id" {}
6 |
7 | variable "installation_id" {}
8 |
9 | // Used for the ELB
10 | variable "instance_subnet_id" {}
11 |
12 | // Used for the instance
13 | variable "elb_subnet_id" {}
14 |
15 | variable "key_name" {}
16 |
17 | variable "ami_id" {}
18 |
19 | variable "instance_type" {}
20 |
21 | variable "db_username" {}
22 |
23 | variable "db_password" {}
24 |
25 | variable "db_endpoint" {}
26 |
27 | variable "db_database" {}
28 |
29 | variable "redis_host" {}
30 |
31 | variable "redis_port" {}
32 |
33 | variable "kms_key_id" {}
34 |
35 | variable "archivist_sse" {
36 | type = "string"
37 | description = "Setting for server-side encryption of objects in S3; if provided, must be set to 'aws:kms'"
38 | default = ""
39 | }
40 |
41 | variable "archivist_kms_key_id" {
42 | type = "string"
43 | description = "KMS key ID used by Archivist to enable S3 server-side encryption"
44 | default = ""
45 | }
46 |
47 | variable "local_setup" {
48 | default = false
49 | }
50 |
51 | variable "ebs_size" {
52 | description = "Size (in GB) of the EBS volumes"
53 | default = 100
54 | }
55 |
56 | variable "ebs_redundancy" {
57 | description = "Number of redundent EBS volumes to configure"
58 | default = 2
59 | }
60 |
61 | variable "arn_partition" {
62 | description = "AWS partition to use (used mostly by govcloud)"
63 | default = "aws"
64 | }
65 |
66 | variable "internal_elb" {
67 | default = false
68 | }
69 |
70 | variable "startup_script" {
71 | description = "Shell or other cloud-init compatible code to run on startup"
72 | default = ""
73 | }
74 |
75 | variable "external_security_group_ids" {
76 | description = "The IDs of existing security groups to use for the ELB instead of creating one."
77 | type = "list"
78 | default = []
79 | }
80 |
81 | variable "internal_security_group_ids" {
82 | description = "The IDs of existing security groups to use for the instance instead of creating one."
83 | type = "list"
84 | default = []
85 | }
86 |
87 | variable "proxy_url" {
88 | description = "A url (http or https, with port) to proxy all external http/https request from the cluster to."
89 | type = "string"
90 | default = ""
91 | }
92 |
93 | variable "no_proxy" {
94 | description = "hosts to exclude from proxying (only applies when proxy_url is set)"
95 | type = "string"
96 | default = ""
97 | }
98 |
99 | resource "aws_security_group" "ptfe" {
100 | vpc_id = "${var.vpc_id}"
101 | count = "${length(var.internal_security_group_ids) != 0 ? 0 : 1}"
102 |
103 | ingress {
104 | from_port = 22
105 | to_port = 22
106 | protocol = "tcp"
107 | cidr_blocks = ["0.0.0.0/0"]
108 | }
109 |
110 | ingress {
111 | from_port = 8080
112 | to_port = 8080
113 | protocol = "tcp"
114 | cidr_blocks = ["0.0.0.0/0"]
115 | }
116 |
117 | # TCP All outbound traffic
118 | egress {
119 | from_port = 0
120 | to_port = 65535
121 | protocol = "tcp"
122 | cidr_blocks = ["0.0.0.0/0"]
123 | }
124 |
125 | # UDP All outbound traffic
126 | egress {
127 | from_port = 0
128 | to_port = 65535
129 | protocol = "udp"
130 | cidr_blocks = ["0.0.0.0/0"]
131 | }
132 |
133 | tags {
134 | Name = "terraform-enterprise"
135 | }
136 | }
137 |
138 | resource "aws_security_group" "ptfe-external" {
139 | count = "${length(var.external_security_group_ids) != 0 ? 0 : 1}"
140 | vpc_id = "${var.vpc_id}"
141 |
142 | ingress {
143 | from_port = 80
144 | to_port = 80
145 | protocol = "tcp"
146 | cidr_blocks = ["0.0.0.0/0"]
147 | }
148 |
149 | ingress {
150 | from_port = 443
151 | to_port = 443
152 | protocol = "tcp"
153 | cidr_blocks = ["0.0.0.0/0"]
154 | }
155 |
156 | # TCP All outbound traffic
157 | egress {
158 | from_port = 0
159 | to_port = 65535
160 | protocol = "tcp"
161 | cidr_blocks = ["0.0.0.0/0"]
162 | }
163 |
164 | # UDP All outbound traffic
165 | egress {
166 | from_port = 0
167 | to_port = 65535
168 | protocol = "udp"
169 | cidr_blocks = ["0.0.0.0/0"]
170 | }
171 |
172 | tags {
173 | Name = "terraform-enterprise-external"
174 | }
175 | }
176 |
177 | data "aws_subnet" "subnet" {
178 | id = "${var.instance_subnet_id}"
179 | }
180 |
181 | resource "aws_ebs_volume" "data" {
182 | count = "${var.ebs_redundancy}"
183 | availability_zone = "${data.aws_subnet.subnet.availability_zone}"
184 | size = "${var.ebs_size}"
185 | type = "gp2"
186 |
187 | tags {
188 | Name = "terraform-enterprise-${var.hostname}"
189 | }
190 |
191 | tags {
192 | InstallationId = "${var.installation_id}"
193 | }
194 | }
195 |
196 | resource "aws_launch_configuration" "ptfe" {
197 | image_id = "${var.ami_id}"
198 | instance_type = "${var.instance_type}"
199 | key_name = "${var.key_name}"
200 | security_groups = ["${concat(var.internal_security_group_ids, aws_security_group.ptfe.*.id)}"]
201 | iam_instance_profile = "${aws_iam_instance_profile.tfe_instance.name}"
202 |
203 | root_block_device {
204 | volume_size = 250
205 | }
206 |
207 | user_data = <<-BASH
208 | #!/bin/sh
209 |
210 | mkdir -p /etc/atlas
211 |
212 | aws configure set s3.signature_version s3v4
213 | aws configure set default.region ${var.bucket_region}
214 | echo "${var.bucket_name}\n${var.bucket_region}" > /etc/ptfe/s3-bucket
215 | ${var.local_setup ? "" : "aws s3 cp s3://${join("", aws_s3_bucket_object.setup.*.bucket)}/${join("", aws_s3_bucket_object.setup.*.key)} /etc/atlas/boot.env"}
216 |
217 | ${var.startup_script}
218 | BASH
219 | }
220 |
221 | resource "aws_autoscaling_group" "ptfe" {
222 | # Interpolating the LC name into the ASG name here causes any changes that
223 | # would replace the LC (like, most commonly, an AMI ID update) to _also_
224 | # replace the ASG.
225 | name = "terraform-enterprise - ${aws_launch_configuration.ptfe.name}"
226 |
227 | launch_configuration = "${aws_launch_configuration.ptfe.name}"
228 | desired_capacity = 1
229 | min_size = 1
230 | max_size = 1
231 | vpc_zone_identifier = ["${var.instance_subnet_id}"]
232 | load_balancers = ["${aws_elb.ptfe.id}"]
233 | wait_for_elb_capacity = 1
234 |
235 | tag {
236 | key = "Name"
237 | value = "terraform-enterprise-${var.hostname}"
238 | propagate_at_launch = true
239 | }
240 |
241 | tag {
242 | key = "Hostname"
243 | value = "${var.hostname}"
244 | propagate_at_launch = true
245 | }
246 |
247 | tag {
248 | key = "InstallationId"
249 | value = "${var.installation_id}"
250 | propagate_at_launch = true
251 | }
252 | }
253 |
254 | resource "aws_s3_bucket_object" "setup" {
255 | # Only store config in S3 if configured as such
256 | count = "${var.local_setup ? 0 : 1}"
257 |
258 | bucket = "${var.bucket_name}"
259 | key = "tfe-setup-data"
260 | kms_key_id = "${var.kms_key_id}"
261 |
262 | # This is to make sure that the bucket exists before
263 | # the object is put there. We use this because the bucket
264 | # might not be created by TF though, just referenced.
265 | depends_on = ["aws_s3_bucket.tfe_bucket"]
266 |
267 | content = <<-BASH
268 | DATABASE_USER="${var.db_username}"
269 | DATABASE_PASSWORD="${var.db_password}"
270 | DATABASE_HOST="${var.db_endpoint}"
271 | DATABASE_DB="${var.db_database}"
272 | REDIS_HOST="${var.redis_host}"
273 | REDIS_PORT="${var.redis_port}"
274 | TFE_HOSTNAME="${var.hostname}"
275 | BUCKET_URL="${var.bucket_name}"
276 | BUCKET_REGION="${var.bucket_region}"
277 | KMS_KEY_ID="${var.kms_key_id}"
278 | INSTALL_ID="${var.installation_id}"
279 | DATA_REDUNDANCY="${var.ebs_redundancy}"
280 | PROXY_URL="${var.proxy_url}"
281 | EXTRA_NO_PROXY="${var.no_proxy}"
282 | ARCHIVIST_SSE="${var.archivist_sse}"
283 | ARCHIVIST_KMS_KEY_ID="${var.archivist_kms_key_id}"
284 | BASH
285 | }
286 |
287 | resource "local_file" "setup" {
288 | count = "${var.local_setup ? 1 : 0}"
289 | filename = "${path.root}/tfe-setup-data"
290 |
291 | content = <<-BASH
292 | DATABASE_USER="${var.db_username}"
293 | DATABASE_PASSWORD="${var.db_password}"
294 | DATABASE_HOST="${var.db_endpoint}"
295 | DATABASE_DB="${var.db_database}"
296 | REDIS_HOST="${var.redis_host}"
297 | REDIS_PORT="${var.redis_port}"
298 | TFE_HOSTNAME="${var.hostname}"
299 | BUCKET_URL="${var.bucket_name}"
300 | BUCKET_REGION="${var.bucket_region}"
301 | KMS_KEY_ID="${var.kms_key_id}"
302 | INSTALL_ID="${var.installation_id}"
303 | DATA_REDUNDANCY="${var.ebs_redundancy}"
304 | PROXY_URL="${var.proxy_url}"
305 | EXTRA_NO_PROXY="${var.no_proxy}"
306 | BASH
307 | }
308 |
309 | resource "aws_elb" "ptfe" {
310 | internal = "${var.internal_elb}"
311 | subnets = ["${var.elb_subnet_id}"]
312 | security_groups = ["${concat(var.external_security_group_ids, aws_security_group.ptfe-external.*.id)}"]
313 |
314 | listener {
315 | instance_port = 8080
316 | instance_protocol = "http"
317 | lb_port = 443
318 | lb_protocol = "https"
319 | ssl_certificate_id = "${var.cert_id}"
320 | }
321 |
322 | listener {
323 | instance_port = 8080
324 | instance_protocol = "http"
325 | lb_port = 80
326 | lb_protocol = "http"
327 | }
328 |
329 | health_check {
330 | healthy_threshold = 2
331 | unhealthy_threshold = 2
332 | timeout = 3
333 | target = "TCP:8080"
334 | interval = 5
335 | }
336 |
337 | tags {
338 | Name = "terraform-enterprise"
339 | }
340 | }
341 |
342 | output "dns_name" {
343 | value = "${aws_elb.ptfe.dns_name}"
344 | }
345 |
346 | output "zone_id" {
347 | value = "${aws_elb.ptfe.zone_id}"
348 | }
349 |
350 | output "hostname" {
351 | value = "${var.hostname}"
352 | }
353 |
--------------------------------------------------------------------------------
/modules/tfe-instance/iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "tfe_iam_role" {
2 | name = "tfe_iam_role-${var.installation_id}"
3 |
4 | assume_role_policy = <