├── .editorconfig ├── .github └── workflows │ └── build.yml ├── .gitignore ├── .npmignore ├── .prettierrc ├── LICENSE ├── README.md ├── generate_slug.js ├── generate_slug.test.js ├── index.js ├── index.test.js ├── package-lock.json ├── package.json └── plugins ├── DEPRECATED_heading-linkable ├── README.md ├── index.js └── index.test.js ├── DEPRECATED_inline-code-linkable ├── README.md ├── __snapshots__ │ └── index.test.js.snap ├── index.js └── index.test.js ├── anchor-links ├── README.md ├── fixtures │ ├── 00-nested-headings │ │ ├── tutorial-terraform-aks.mdx │ │ ├── tutorial-terraform-gke.mdx │ │ └── tutorials-nomad-format-output-with-templates.mdx │ ├── 01-nested-heading │ │ ├── tutorial-terraform-aks.mdx │ │ ├── tutorial-terraform-gke.mdx │ │ └── tutorials-nomad-format-output-with-templates.mdx │ └── 02-nested-headings │ │ ├── tutorial-terraform-aks.mdx │ │ ├── tutorial-terraform-gke.mdx │ │ └── tutorials-nomad-format-output-with-templates.mdx ├── index.js └── index.test.js ├── include-markdown ├── README.md ├── fixtures │ ├── basic.expected.md │ ├── basic.md │ ├── include-nested-component.mdx │ ├── include-with-comment.mdx │ ├── include-with-component.mdx │ ├── include.js │ ├── include.md │ ├── include.mdx │ ├── invalid-path.md │ ├── mdx-format.expected.md │ ├── mdx-format.md │ ├── nested │ │ ├── include-component.mdx │ │ ├── include2.md │ │ └── include3.md │ ├── non-markdown.expected.md │ ├── non-markdown.md │ ├── resolve-from.expected.md │ └── resolve-from.md ├── index.js ├── index.test.js └── md-ast-to-mdx-ast.js ├── paragraph-custom-alerts ├── README.md ├── index.js └── index.test.js └── typography ├── README.md ├── index.js └── index.test.js /.editorconfig: -------------------------------------------------------------------------------- 1 | # This file is for unifying the coding style for different editors and IDEs 2 | # editorconfig.org 3 | 4 | root = true 5 | 6 | [*] 7 | end_of_line = lf 8 | charset = utf-8 9 | insert_final_newline = true 10 | trim_trailing_whitespace = true 11 | indent_style = space 12 | indent_size = 2 13 | 14 | [Makefile] 15 | indent_style = tab 16 | 17 | [{*.md,*.json}] 18 | max_line_length = null 19 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | name: Build 5 | on: 6 | push: 7 | branches: 8 | - main 9 | jobs: 10 | linux: 11 | defaults: 12 | run: 13 | working-directory: '~/repo' 14 | runs-on: ubuntu-latest 15 | container: 16 | image: node:latest 17 | steps: 18 | - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 19 | - name: restore_cache 20 | uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 21 | with: 22 | key: v2-dependencies-linux-{{ checksum "package.json" }} 23 | restore-keys: |- 24 | v2-dependencies-linux-{{ checksum "package.json" }} 25 | v2-dependencies-liux- 26 | path: node_modules 27 | - run: npm install 28 | - run: npm test 29 | windows: 30 | defaults: 31 | run: 32 | working-directory: '~/repo' 33 | runs-on: ubuntu-latest 34 | steps: 35 | - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 36 | - name: restore_cache 37 | uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 38 | with: 39 | key: v2-dependencies-win-{{ checksum "package.json" }} 40 | restore-keys: |- 41 | v2-dependencies-win-{{ checksum "package.json" }} 42 | v2-dependencies-win- 43 | path: node_modules 44 | - run: npm install 45 | - run: npm test 46 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.test.js 2 | .prettierrc 3 | .editorconfig 4 | plugins/*/fixtures 5 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "semi": false, 3 | "singleQuote": true 4 | } 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 HashiCorp, Inc. 2 | 3 | Mozilla Public License Version 2.0 4 | ================================== 5 | 6 | 1. Definitions 7 | -------------- 8 | 9 | 1.1. "Contributor" 10 | means each individual or legal entity that creates, contributes to 11 | the creation of, or owns Covered Software. 12 | 13 | 1.2. "Contributor Version" 14 | means the combination of the Contributions of others (if any) used 15 | by a Contributor and that particular Contributor's Contribution. 16 | 17 | 1.3. "Contribution" 18 | means Covered Software of a particular Contributor. 19 | 20 | 1.4. "Covered Software" 21 | means Source Code Form to which the initial Contributor has attached 22 | the notice in Exhibit A, the Executable Form of such Source Code 23 | Form, and Modifications of such Source Code Form, in each case 24 | including portions thereof. 25 | 26 | 1.5. "Incompatible With Secondary Licenses" 27 | means 28 | 29 | (a) that the initial Contributor has attached the notice described 30 | in Exhibit B to the Covered Software; or 31 | 32 | (b) that the Covered Software was made available under the terms of 33 | version 1.1 or earlier of the License, but not also under the 34 | terms of a Secondary License. 35 | 36 | 1.6. "Executable Form" 37 | means any form of the work other than Source Code Form. 38 | 39 | 1.7. "Larger Work" 40 | means a work that combines Covered Software with other material, in 41 | a separate file or files, that is not Covered Software. 42 | 43 | 1.8. "License" 44 | means this document. 45 | 46 | 1.9. "Licensable" 47 | means having the right to grant, to the maximum extent possible, 48 | whether at the time of the initial grant or subsequently, any and 49 | all of the rights conveyed by this License. 50 | 51 | 1.10. "Modifications" 52 | means any of the following: 53 | 54 | (a) any file in Source Code Form that results from an addition to, 55 | deletion from, or modification of the contents of Covered 56 | Software; or 57 | 58 | (b) any new file in Source Code Form that contains any Covered 59 | Software. 60 | 61 | 1.11. "Patent Claims" of a Contributor 62 | means any patent claim(s), including without limitation, method, 63 | process, and apparatus claims, in any patent Licensable by such 64 | Contributor that would be infringed, but for the grant of the 65 | License, by the making, using, selling, offering for sale, having 66 | made, import, or transfer of either its Contributions or its 67 | Contributor Version. 68 | 69 | 1.12. "Secondary License" 70 | means either the GNU General Public License, Version 2.0, the GNU 71 | Lesser General Public License, Version 2.1, the GNU Affero General 72 | Public License, Version 3.0, or any later versions of those 73 | licenses. 74 | 75 | 1.13. "Source Code Form" 76 | means the form of the work preferred for making modifications. 77 | 78 | 1.14. "You" (or "Your") 79 | means an individual or a legal entity exercising rights under this 80 | License. For legal entities, "You" includes any entity that 81 | controls, is controlled by, or is under common control with You. For 82 | purposes of this definition, "control" means (a) the power, direct 83 | or indirect, to cause the direction or management of such entity, 84 | whether by contract or otherwise, or (b) ownership of more than 85 | fifty percent (50%) of the outstanding shares or beneficial 86 | ownership of such entity. 87 | 88 | 2. License Grants and Conditions 89 | -------------------------------- 90 | 91 | 2.1. Grants 92 | 93 | Each Contributor hereby grants You a world-wide, royalty-free, 94 | non-exclusive license: 95 | 96 | (a) under intellectual property rights (other than patent or trademark) 97 | Licensable by such Contributor to use, reproduce, make available, 98 | modify, display, perform, distribute, and otherwise exploit its 99 | Contributions, either on an unmodified basis, with Modifications, or 100 | as part of a Larger Work; and 101 | 102 | (b) under Patent Claims of such Contributor to make, use, sell, offer 103 | for sale, have made, import, and otherwise transfer either its 104 | Contributions or its Contributor Version. 105 | 106 | 2.2. Effective Date 107 | 108 | The licenses granted in Section 2.1 with respect to any Contribution 109 | become effective for each Contribution on the date the Contributor first 110 | distributes such Contribution. 111 | 112 | 2.3. Limitations on Grant Scope 113 | 114 | The licenses granted in this Section 2 are the only rights granted under 115 | this License. No additional rights or licenses will be implied from the 116 | distribution or licensing of Covered Software under this License. 117 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 118 | Contributor: 119 | 120 | (a) for any code that a Contributor has removed from Covered Software; 121 | or 122 | 123 | (b) for infringements caused by: (i) Your and any other third party's 124 | modifications of Covered Software, or (ii) the combination of its 125 | Contributions with other software (except as part of its Contributor 126 | Version); or 127 | 128 | (c) under Patent Claims infringed by Covered Software in the absence of 129 | its Contributions. 130 | 131 | This License does not grant any rights in the trademarks, service marks, 132 | or logos of any Contributor (except as may be necessary to comply with 133 | the notice requirements in Section 3.4). 134 | 135 | 2.4. Subsequent Licenses 136 | 137 | No Contributor makes additional grants as a result of Your choice to 138 | distribute the Covered Software under a subsequent version of this 139 | License (see Section 10.2) or under the terms of a Secondary License (if 140 | permitted under the terms of Section 3.3). 141 | 142 | 2.5. Representation 143 | 144 | Each Contributor represents that the Contributor believes its 145 | Contributions are its original creation(s) or it has sufficient rights 146 | to grant the rights to its Contributions conveyed by this License. 147 | 148 | 2.6. Fair Use 149 | 150 | This License is not intended to limit any rights You have under 151 | applicable copyright doctrines of fair use, fair dealing, or other 152 | equivalents. 153 | 154 | 2.7. Conditions 155 | 156 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 157 | in Section 2.1. 158 | 159 | 3. Responsibilities 160 | ------------------- 161 | 162 | 3.1. Distribution of Source Form 163 | 164 | All distribution of Covered Software in Source Code Form, including any 165 | Modifications that You create or to which You contribute, must be under 166 | the terms of this License. You must inform recipients that the Source 167 | Code Form of the Covered Software is governed by the terms of this 168 | License, and how they can obtain a copy of this License. You may not 169 | attempt to alter or restrict the recipients' rights in the Source Code 170 | Form. 171 | 172 | 3.2. Distribution of Executable Form 173 | 174 | If You distribute Covered Software in Executable Form then: 175 | 176 | (a) such Covered Software must also be made available in Source Code 177 | Form, as described in Section 3.1, and You must inform recipients of 178 | the Executable Form how they can obtain a copy of such Source Code 179 | Form by reasonable means in a timely manner, at a charge no more 180 | than the cost of distribution to the recipient; and 181 | 182 | (b) You may distribute such Executable Form under the terms of this 183 | License, or sublicense it under different terms, provided that the 184 | license for the Executable Form does not attempt to limit or alter 185 | the recipients' rights in the Source Code Form under this License. 186 | 187 | 3.3. Distribution of a Larger Work 188 | 189 | You may create and distribute a Larger Work under terms of Your choice, 190 | provided that You also comply with the requirements of this License for 191 | the Covered Software. If the Larger Work is a combination of Covered 192 | Software with a work governed by one or more Secondary Licenses, and the 193 | Covered Software is not Incompatible With Secondary Licenses, this 194 | License permits You to additionally distribute such Covered Software 195 | under the terms of such Secondary License(s), so that the recipient of 196 | the Larger Work may, at their option, further distribute the Covered 197 | Software under the terms of either this License or such Secondary 198 | License(s). 199 | 200 | 3.4. Notices 201 | 202 | You may not remove or alter the substance of any license notices 203 | (including copyright notices, patent notices, disclaimers of warranty, 204 | or limitations of liability) contained within the Source Code Form of 205 | the Covered Software, except that You may alter any license notices to 206 | the extent required to remedy known factual inaccuracies. 207 | 208 | 3.5. Application of Additional Terms 209 | 210 | You may choose to offer, and to charge a fee for, warranty, support, 211 | indemnity or liability obligations to one or more recipients of Covered 212 | Software. However, You may do so only on Your own behalf, and not on 213 | behalf of any Contributor. You must make it absolutely clear that any 214 | such warranty, support, indemnity, or liability obligation is offered by 215 | You alone, and You hereby agree to indemnify every Contributor for any 216 | liability incurred by such Contributor as a result of warranty, support, 217 | indemnity or liability terms You offer. You may include additional 218 | disclaimers of warranty and limitations of liability specific to any 219 | jurisdiction. 220 | 221 | 4. Inability to Comply Due to Statute or Regulation 222 | --------------------------------------------------- 223 | 224 | If it is impossible for You to comply with any of the terms of this 225 | License with respect to some or all of the Covered Software due to 226 | statute, judicial order, or regulation then You must: (a) comply with 227 | the terms of this License to the maximum extent possible; and (b) 228 | describe the limitations and the code they affect. Such description must 229 | be placed in a text file included with all distributions of the Covered 230 | Software under this License. Except to the extent prohibited by statute 231 | or regulation, such description must be sufficiently detailed for a 232 | recipient of ordinary skill to be able to understand it. 233 | 234 | 5. Termination 235 | -------------- 236 | 237 | 5.1. The rights granted under this License will terminate automatically 238 | if You fail to comply with any of its terms. However, if You become 239 | compliant, then the rights granted under this License from a particular 240 | Contributor are reinstated (a) provisionally, unless and until such 241 | Contributor explicitly and finally terminates Your grants, and (b) on an 242 | ongoing basis, if such Contributor fails to notify You of the 243 | non-compliance by some reasonable means prior to 60 days after You have 244 | come back into compliance. Moreover, Your grants from a particular 245 | Contributor are reinstated on an ongoing basis if such Contributor 246 | notifies You of the non-compliance by some reasonable means, this is the 247 | first time You have received notice of non-compliance with this License 248 | from such Contributor, and You become compliant prior to 30 days after 249 | Your receipt of the notice. 250 | 251 | 5.2. If You initiate litigation against any entity by asserting a patent 252 | infringement claim (excluding declaratory judgment actions, 253 | counter-claims, and cross-claims) alleging that a Contributor Version 254 | directly or indirectly infringes any patent, then the rights granted to 255 | You by any and all Contributors for the Covered Software under Section 256 | 2.1 of this License shall terminate. 257 | 258 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 259 | end user license agreements (excluding distributors and resellers) which 260 | have been validly granted by You or Your distributors under this License 261 | prior to termination shall survive termination. 262 | 263 | ************************************************************************ 264 | * * 265 | * 6. Disclaimer of Warranty * 266 | * ------------------------- * 267 | * * 268 | * Covered Software is provided under this License on an "as is" * 269 | * basis, without warranty of any kind, either expressed, implied, or * 270 | * statutory, including, without limitation, warranties that the * 271 | * Covered Software is free of defects, merchantable, fit for a * 272 | * particular purpose or non-infringing. The entire risk as to the * 273 | * quality and performance of the Covered Software is with You. * 274 | * Should any Covered Software prove defective in any respect, You * 275 | * (not any Contributor) assume the cost of any necessary servicing, * 276 | * repair, or correction. This disclaimer of warranty constitutes an * 277 | * essential part of this License. No use of any Covered Software is * 278 | * authorized under this License except under this disclaimer. * 279 | * * 280 | ************************************************************************ 281 | 282 | ************************************************************************ 283 | * * 284 | * 7. Limitation of Liability * 285 | * -------------------------- * 286 | * * 287 | * Under no circumstances and under no legal theory, whether tort * 288 | * (including negligence), contract, or otherwise, shall any * 289 | * Contributor, or anyone who distributes Covered Software as * 290 | * permitted above, be liable to You for any direct, indirect, * 291 | * special, incidental, or consequential damages of any character * 292 | * including, without limitation, damages for lost profits, loss of * 293 | * goodwill, work stoppage, computer failure or malfunction, or any * 294 | * and all other commercial damages or losses, even if such party * 295 | * shall have been informed of the possibility of such damages. This * 296 | * limitation of liability shall not apply to liability for death or * 297 | * personal injury resulting from such party's negligence to the * 298 | * extent applicable law prohibits such limitation. Some * 299 | * jurisdictions do not allow the exclusion or limitation of * 300 | * incidental or consequential damages, so this exclusion and * 301 | * limitation may not apply to You. * 302 | * * 303 | ************************************************************************ 304 | 305 | 8. Litigation 306 | ------------- 307 | 308 | Any litigation relating to this License may be brought only in the 309 | courts of a jurisdiction where the defendant maintains its principal 310 | place of business and such litigation shall be governed by laws of that 311 | jurisdiction, without reference to its conflict-of-law provisions. 312 | Nothing in this Section shall prevent a party's ability to bring 313 | cross-claims or counter-claims. 314 | 315 | 9. Miscellaneous 316 | ---------------- 317 | 318 | This License represents the complete agreement concerning the subject 319 | matter hereof. If any provision of this License is held to be 320 | unenforceable, such provision shall be reformed only to the extent 321 | necessary to make it enforceable. Any law or regulation which provides 322 | that the language of a contract shall be construed against the drafter 323 | shall not be used to construe this License against a Contributor. 324 | 325 | 10. Versions of the License 326 | --------------------------- 327 | 328 | 10.1. New Versions 329 | 330 | Mozilla Foundation is the license steward. Except as provided in Section 331 | 10.3, no one other than the license steward has the right to modify or 332 | publish new versions of this License. Each version will be given a 333 | distinguishing version number. 334 | 335 | 10.2. Effect of New Versions 336 | 337 | You may distribute the Covered Software under the terms of the version 338 | of the License under which You originally received the Covered Software, 339 | or under the terms of any subsequent version published by the license 340 | steward. 341 | 342 | 10.3. Modified Versions 343 | 344 | If you create software not governed by this License, and you want to 345 | create a new license for such software, you may create and use a 346 | modified version of this License if you rename the license and remove 347 | any references to the name of the license steward (except to note that 348 | such modified license differs from this License). 349 | 350 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 351 | Licenses 352 | 353 | If You choose to distribute Source Code Form that is Incompatible With 354 | Secondary Licenses under the terms of this version of the License, the 355 | notice described in Exhibit B of this License must be attached. 356 | 357 | Exhibit A - Source Code Form License Notice 358 | ------------------------------------------- 359 | 360 | This Source Code Form is subject to the terms of the Mozilla Public 361 | License, v. 2.0. If a copy of the MPL was not distributed with this 362 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 363 | 364 | If it is not possible or desirable to put the notice in a particular 365 | file, then You may include the notice in a location (such as a LICENSE 366 | file in a relevant directory) where a recipient would be likely to look 367 | for such a notice. 368 | 369 | You may add additional accurate notices of copyright ownership. 370 | 371 | Exhibit B - "Incompatible With Secondary Licenses" Notice 372 | --------------------------------------------------------- 373 | 374 | This Source Code Form is "Incompatible With Secondary Licenses", as 375 | defined by the Mozilla Public License, v. 2.0. 376 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | __Deprecated: This repository has been archived, our remark plugins now live at [hashicorp/web-platform-packages](https://github.com/hashicorp/web-platform-packages/tree/main/packages/remark-plugins)__ 2 | 3 | # HashiCorp Remark Plugins 4 | 5 | A potpourri of [remark](https://github.com/remarkjs/remark) plugins used by [HashiCorp](https://www.hashicorp.com/) to process markdown files. 6 | 7 | ## Overview 8 | 9 | [MDX](https://mdxjs.com) uses [remark](https://github.com/remarkjs/remark) internally to process and transform markdown via [plugins](https://github.com/remarkjs/remark/blob/master/doc/plugins.md#list-of-plugins). We use MDX to process markdown content to build out our docs, learning guides, and write rich content from our CMS. This set of plugins ensures that written markdown is translated properly into markup. 10 | 11 | ### Anchor Links 12 | 13 | The `anchorLinks` plugin adds anchor links to headings and when a list begins with an `inline code` element so that users are able to easily link to a specific place even if it is further down the page. See [its readme](plugins/anchor-links/README.md) for more details. 14 | 15 | ### Include Markdown 16 | 17 | The `includeMarkdown` plugin gives authors the ability to use a directive like `@include "filename.md" to import markdown from a separate file, like a partial. See [its readme](plugins/include-markdown/README.md) for more details. 18 | 19 | ### Custom Alerts 20 | 21 | The `paragraphCustomAlerts` plugin adds a custom syntax for creating alert boxes. See [its readme](plugins/inline-code-linkable/README.md) for more details. This plugin will be deprecated for a `` component in the future in a step to move us toward full [commonmark](https://commonmark.org/) compliance. 22 | 23 | ### Typography 24 | 25 | The `typography` plugin adds css classes to certain typographical elements so that they adhere to the typography standards from our design system. See [its readme](plugins/inline-code-linkable/README.md) for more details. 26 | 27 | ## Usage 28 | 29 | Each of the plugins are individually exposed from the default export from this module and can be used as any other remark plugin would be normally. For example, with raw mdx: 30 | 31 | ```js 32 | const mdx = require('@mdx-js/mdx') 33 | const {typography, anchorLinks} = require('@hashicorp/remark-plugins') 34 | 35 | console.log(mdx.sync('some markdown content', { 36 | remarkPlugins: [typography, anchorLinks] 37 | }) 38 | ``` 39 | 40 | If you'd like to use all of the plugins in one shot, which is typically the case with this module, an array of all the plugins is returned from the `allPlugins` export, as such: 41 | 42 | ```js 43 | const mdx = require('@mdx-js/mdx') 44 | const {allPlugins} = require('@hashicorp/remark-plugins') 45 | 46 | console.log(mdx.sync('some markdown content', { 47 | remarkPlugins: allPlugins(/* options */) 48 | }) 49 | ``` 50 | 51 | Plugin options can be passed to `allPlugins` as an object, with the keys being plugin names. For example, to pass options to `headingLinkable`, you could call `allPlugins({ headingLinkable: { foo: 'bar' } })`. 52 | 53 | If you are using `next-hashicorp`, all of these plugins will be included by default. 54 | 55 | ## Publishing 56 | 57 | To publish this package to [npm](https://www.npmjs.com/package/@hashicorp/remark-plugins), simply run `npm run publish`. This command will guide you through the versioning/publishing process. 58 | 59 | > **Note**: There is no build step when publishing this library. The consumer is expected to transpile the code appropriately. 60 | -------------------------------------------------------------------------------- /generate_slug.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | module.exports = function generateSlug(headline, links = []) { 7 | let slug = headline 8 | .toLowerCase() 9 | .trim() 10 | .replace(/<\/?[^>]*>/g, '') // Strip links 11 | .replace(/\(\(#.*?\)\)/g, '') // Strip anchor link aliases 12 | .replace(/\W+/g, '-') // Whitespace to '-' 13 | .replace(/-+/g, '-') // Collapse more than one '-' 14 | .replace(/^\-/g, '') // Remove leading '-' 15 | .replace(/\-$/g, '') // Remove trailing '-' 16 | 17 | // count if there are any duplicates on the page 18 | const dupeCount = links.reduce((m, i) => { 19 | if (slug === i) m++ 20 | return m 21 | }, 0) 22 | links.push(slug) 23 | 24 | // append the count to the end of the slug if necessary 25 | if (dupeCount > 0) slug = `${slug}-${dupeCount}` 26 | 27 | return slug 28 | } 29 | 30 | module.exports.generateAriaLabel = function generateAriaLabel(headline) { 31 | return headline 32 | .toLowerCase() 33 | .replace(/<\/?[^>]*>/g, '') // Strip html 34 | .replace(/\(\(#.*?\)\)/g, '') // Strip anchor link aliases 35 | .replace(/^\-/g, '') // Remove leading '-' 36 | .replace(/\-$/g, '') // Remove trailing '-' 37 | .replace(/\W+/g, ' ') // Collapse whitespace 38 | .trim() 39 | } 40 | -------------------------------------------------------------------------------- /generate_slug.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const generateSlug = require('./generate_slug') 7 | 8 | test('numbering works', () => { 9 | const links = [] 10 | expect(generateSlug('foo bar', links)).toEqual('foo-bar') 11 | expect(generateSlug('foo bar', links)).toEqual('foo-bar-1') 12 | expect(generateSlug('foo bar', links)).toEqual('foo-bar-2') 13 | }) 14 | 15 | test('strips extra whitespace', () => { 16 | expect(generateSlug('foo bar')).toEqual('foo-bar') 17 | expect(generateSlug(' foo bar ')).toEqual('foo-bar') 18 | }) 19 | 20 | test('strips extra characters and html', () => { 21 | expect(generateSlug('foo bar (wow)')).toEqual('foo-bar-wow') 22 | expect(generateSlug('foo bar--wow -')).toEqual('foo-bar-wow') 23 | expect(generateSlug('foo bar_wow ♥ф你-💣')).toEqual('foo-bar_wow') 24 | expect(generateSlug("foo bar ')).toEqual('foo-bar') 26 | expect(generateSlug('foo bar wow')).toEqual('foo-bar-wow') 27 | }) 28 | 29 | test('downcases', () => { 30 | expect(generateSlug('fOo BAr')).toEqual('foo-bar') 31 | }) 32 | 33 | test('generates aria label', () => { 34 | expect(generateSlug.generateAriaLabel('foo bar wow')).toEqual( 35 | 'foo bar wow' 36 | ) 37 | expect(generateSlug.generateAriaLabel('foo bar ((#wow))')).toEqual('foo bar') 38 | }) 39 | 40 | test('removes anchor link aliases', () => { 41 | expect(generateSlug('foo bar ((#wow))')).toEqual('foo-bar') 42 | expect(generateSlug('foo bar ((#wow, #amaze))')).toEqual('foo-bar') 43 | expect(generateSlug('foo bar ((wow))')).toEqual('foo-bar-wow') 44 | }) 45 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const anchorLinks = require('./plugins/anchor-links') 7 | const paragraphCustomAlerts = require('./plugins/paragraph-custom-alerts') 8 | const typography = require('./plugins/typography') 9 | const includeMarkdown = require('./plugins/include-markdown') 10 | 11 | // allow individual plugins to be pulled out and used 12 | module.exports = { 13 | anchorLinks, 14 | paragraphCustomAlerts, 15 | typography, 16 | includeMarkdown, 17 | } 18 | 19 | // for easy use of everything at the same time 20 | module.exports.allPlugins = ({ 21 | anchorLinks: anchorLinksOptions, 22 | typography: typographyOptions, 23 | includeMarkdown: includeMarkdownOptions, 24 | } = {}) => [ 25 | [includeMarkdown, includeMarkdownOptions], 26 | [anchorLinks, anchorLinksOptions], 27 | paragraphCustomAlerts, 28 | [typography, typographyOptions], 29 | ] 30 | -------------------------------------------------------------------------------- /index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const remarkPlugins = require('./index') 7 | 8 | it('api works as intended', () => { 9 | expect(remarkPlugins.anchorLinks).toBeTruthy() 10 | expect(remarkPlugins.paragraphCustomAlerts).toBeTruthy() 11 | expect(remarkPlugins.typography).toBeTruthy() 12 | expect(remarkPlugins.includeMarkdown).toBeTruthy() 13 | expect(remarkPlugins.allPlugins().length).toBe(4) 14 | // passes options correctly 15 | expect(remarkPlugins.allPlugins({ anchorLinks: 'foo' })[1][1]).toBe('foo') 16 | expect(remarkPlugins.allPlugins({ includeMarkdown: 'bar' })[0][1]).toBe('bar') 17 | }) 18 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@hashicorp/remark-plugins", 3 | "description": "A potpourri of remark plugins used to process .mdx files", 4 | "version": "4.1.1", 5 | "author": "Jeff Escalante", 6 | "bugs": "https://github.com/hashicorp/remark-plugins/issues", 7 | "contributors": [ 8 | "Kevin Pruett" 9 | ], 10 | "dependencies": { 11 | "@mdx-js/util": "1.6.22", 12 | "github-slugger": "^1.3.0", 13 | "remark": "12.0.1", 14 | "remark-mdx": "1.6.22", 15 | "to-vfile": "^6.1.0", 16 | "unist-util-flatmap": "^1.0.0", 17 | "unist-util-is": "^4.0.2", 18 | "unist-util-map": "^2.0.1", 19 | "unist-util-visit": "^2.0.3" 20 | }, 21 | "devDependencies": { 22 | "@mdx-js/mdx": "^1.6.14", 23 | "@types/jest": "^26.0.7", 24 | "jest": "^26.1.0", 25 | "mdast-util-to-string": "^1.1.0", 26 | "normalize-newline": "^3.0.0", 27 | "rehype-parse": "^7.0.1", 28 | "rehype-stringify": "^8.0.0", 29 | "release": "^6.3.0", 30 | "remark-html": "^12.0.0", 31 | "remark-rehype": "^7.0.0" 32 | }, 33 | "homepage": "https://github.com/hashicorp/remark-plugins#readme", 34 | "keywords": [], 35 | "license": "MPL-2.0", 36 | "main": "index.js", 37 | "publishConfig": { 38 | "access": "public" 39 | }, 40 | "repository": "https://github.com/hashicorp/remark-plugins", 41 | "scripts": { 42 | "test": "jest --verbose", 43 | "release:patch": "release patch && npm publish", 44 | "release:minor": "release minor && npm publish", 45 | "release:major": "release major && npm publish", 46 | "release:pre": "release pre canary && npm publish --tag=canary" 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_heading-linkable/README.md: -------------------------------------------------------------------------------- 1 | # Linkable Headings 2 | 3 | This plugin processes headings to generate a slug and adds a **permalink** element and an invisible **target** element. These two elements ensure that users are able to click a link next to the heading to quickly get an anchor link directly to the heading slug, and that developers are able to customize the position that the headline appears when that anchor link is visited, respectively. 4 | 5 | ### Input: 6 | 7 | ```mdx 8 | # First Level Heading 9 | 10 | ## Second Level Heading 11 | 12 | Content here... 13 | ``` 14 | 15 | ### Output: 16 | 17 | ```html 18 |

19 | » 25 | 26 | First Level Heading 27 |

28 | 29 |

30 | » 36 | 37 | Second Level Heading 38 |

39 | 40 |

Content here...

41 | ``` 42 | 43 | Since the `__target` element actually carries the id rather than the headline, it can be positioned independently to pad the headline off the top of the page if necessary, which is the case any time we use a "sticky" navigation. 44 | 45 | ## Options 46 | 47 | - `compatibilitySlug` _(optional)_ - if present, will generate an additional target element using a custom slug creation algorithm. Accepts a function with the following signature `fn(text: string)`. The `text` argument is the headline text, if the `compatibilitySlug` function generates an idential slug as the default, it will not be added at all. 48 | 49 | > **NOTE:** Be conscious of duplicate tracking with your compatibility function. If it needs to keep track of existing slugs on the page to avoid duplicates, it must implement that functionality on its own. Default slugs are not exposed to the `compatibilitySlug` function because this offers a footgun that can easily break compatibility. The `compatibilitySlug` function should operate entirely in its own sphere -- if it happens to generate a duplicate slug, the plugin itself will remove it as compatibility isn't necessary in that instance. 50 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_heading-linkable/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const generateSlug = require('../../generate_slug') 7 | const map = require('unist-util-map') 8 | const is = require('unist-util-is') 9 | 10 | module.exports = function headingLinkablePlugin({ compatibilitySlug } = {}) { 11 | return function transformer(tree) { 12 | const links = [] 13 | return map(tree, node => { 14 | if (!is(node, 'heading')) return node 15 | const text = node.children.reduce((m, i) => { 16 | m += i.value 17 | return m 18 | }, '') 19 | 20 | const slug = generateSlug(text, links) 21 | node.children.unshift({ 22 | type: 'html', 23 | value: `` 24 | }) 25 | 26 | if (compatibilitySlug) { 27 | const slug2 = compatibilitySlug(text) 28 | if (slug !== slug2) { 29 | node.children.unshift({ 30 | type: 'html', 31 | value: `` 32 | }) 33 | } 34 | } 35 | 36 | node.children.unshift({ 37 | type: 'html', 38 | value: `»` 41 | }) 42 | 43 | return node 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_heading-linkable/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const remark = require('remark') 7 | const html = require('remark-html') 8 | const headingLinkable = require('./index.js') 9 | 10 | describe('heading-linkable', () => { 11 | test('produces the expected html output', () => { 12 | expect( 13 | remark() 14 | .use(headingLinkable) 15 | .use(html) 16 | .processSync('# hello world') 17 | .toString() 18 | ).toMatch( 19 | [ 20 | '

', 21 | '»', 22 | '', 23 | 'hello world', 24 | '

' 25 | ].join('') 26 | ) 27 | }) 28 | 29 | test('handles duplicate slugs', () => { 30 | expect( 31 | remark() 32 | .use(headingLinkable) 33 | .use(html) 34 | .processSync( 35 | [ 36 | '# hello world', 37 | '# hello world', 38 | '# foo', 39 | '# hello world', 40 | '# foo' 41 | ].join('\n') 42 | ) 43 | .toString() 44 | ).toMatch( 45 | expectedResult([ 46 | ['hello world', 'hello-world', 'hello world'], 47 | ['hello world', 'hello-world-1', 'hello world'], 48 | ['foo', 'foo', 'foo'], 49 | ['hello world', 'hello-world-2', 'hello world'], 50 | ['foo', 'foo-1', 'foo'] 51 | ]) 52 | ) 53 | }) 54 | 55 | test('strips html', () => { 56 | expect( 57 | remark() 58 | .use(headingLinkable) 59 | .use(html) 60 | .processSync( 61 | [ 62 | '# hello world ', 63 | '# hello world' 64 | ].join('\n') 65 | ) 66 | .toString() 67 | ).toMatch( 68 | expectedResult([ 69 | ['hello world ', 'hello-world', 'hello world'], 70 | ['hello world', 'hello-world-1', 'hello world'] 71 | ]) 72 | ) 73 | }) 74 | 75 | test('removes leading hyphens', () => { 76 | expect( 77 | remark() 78 | .use(headingLinkable) 79 | .use(html) 80 | .processSync(['# - hello world', '# hello world'].join('\n')) 81 | .toString() 82 | ).toMatch( 83 | expectedResult([ 84 | ['- hello world', 'hello-world', 'hello world'], 85 | [' hello world', 'hello-world-1', 'hello world'] 86 | ]) 87 | ) 88 | }) 89 | 90 | test('removes double hyphens', () => { 91 | expect( 92 | remark() 93 | .use(headingLinkable) 94 | .use(html) 95 | .processSync( 96 | [ 97 | '# hEllO----world', 98 | '# hello :&-- world', 99 | '# hello world (foo)()' 100 | ].join('\n') 101 | ) 102 | .toString() 103 | ).toMatch( 104 | expectedResult([ 105 | ['hEllO----world', 'hello-world', 'hello world'], 106 | ['hello :&-- world', 'hello-world-1', 'hello world'], 107 | ['hello world (foo)()', 'hello-world-foo', 'hello world foo'] 108 | ]) 109 | ) 110 | }) 111 | 112 | test('generates an extra slug if the argument is provided', () => { 113 | expect( 114 | remark() 115 | .use(headingLinkable, { compatibilitySlug: slug => 'foo' }) 116 | .use(html) 117 | .processSync('# hello world') 118 | .toString() 119 | ).toMatch( 120 | [ 121 | '

', 122 | '»', 123 | '', 124 | '', 125 | 'hello world', 126 | '

' 127 | ].join('') 128 | ) 129 | }) 130 | 131 | test('does not render duplicate compatibility slugs', () => { 132 | expect( 133 | remark() 134 | .use(headingLinkable, { compatibilitySlug: slug => 'hello-world' }) 135 | .use(html) 136 | .processSync('# hello world') 137 | .toString() 138 | ).toMatch( 139 | [ 140 | '

', 141 | '»', 142 | '', 143 | 'hello world', 144 | '

' 145 | ].join('') 146 | ) 147 | }) 148 | }) 149 | 150 | // Takes an array of expected results, [literal text, slug, aria label] 151 | function expectedResult(results) { 152 | return results 153 | .map(([text, slug, ariaLabel]) => { 154 | return `

»${text}

` 155 | }) 156 | .join('\n') 157 | } 158 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_inline-code-linkable/README.md: -------------------------------------------------------------------------------- 1 | # Linkable Inline Code Blocks 2 | 3 | This plugin links to any [`InlineCode` Node](https://github.com/syntax-tree/mdast#inlinecode) that appears within a [`ListItem` Node](https://github.com/syntax-tree/mdast#listitem) 4 | 5 | ### Input 6 | 7 | ```mdx 8 | - First item 9 | - Second item 10 | - `Third` item contains code 11 | ``` 12 | 13 | ### Output 14 | 15 | ```html 16 | 24 | ``` 25 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_inline-code-linkable/__snapshots__/index.test.js.snap: -------------------------------------------------------------------------------- 1 | // Jest Snapshot v1, https://goo.gl/fbAQLP 2 | 3 | exports[`real world output, deep nested lists 1`] = ` 4 | "

Field Reference

5 | " 16 | `; 17 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_inline-code-linkable/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const is = require('unist-util-is') 7 | const map = require('unist-util-map') 8 | const generateSlug = require('../../generate_slug') 9 | 10 | module.exports = function inlineCodeLinkablePlugin() { 11 | return function transformer(tree) { 12 | const links = [] 13 | map(tree, node => { 14 | // we're looking for: listItem -> paragraph -> [inlineCode, text] 15 | const liNode = node 16 | if (!is(liNode, 'listItem') || !liNode.children) return node 17 | const pNode = liNode.children[0] 18 | if (!is(pNode, 'paragraph') || !pNode.children) return node 19 | const codeNode = pNode.children[0] 20 | if (!is(codeNode, 'inlineCode')) return node 21 | 22 | // If the above all passes, we have a list item starting with inline code 23 | // Construct an id/slug based on value of node 24 | const codeSlug = generateSlug(`inlinecode-${codeNode.value}`, links) 25 | 26 | // Add slug to parent
  • node's id attribute 27 | const data = liNode.data || (liNode.data = {}) 28 | const props = data.hProperties || (data.hProperties = {}) 29 | props.id = codeSlug 30 | 31 | // Wrap link element around child node 32 | pNode.children[0] = { 33 | type: 'link', 34 | url: `#${codeSlug}`, 35 | title: null, 36 | children: [pNode.children[0]] 37 | } 38 | 39 | return node 40 | }) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_inline-code-linkable/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const remark = require('remark') 7 | const codeBlockLinkable = require('./index.js') 8 | const unified = require('unified') 9 | const rehype = require('remark-rehype') 10 | const markdownParse = require('remark-parse') 11 | const html = require('rehype-stringify') 12 | 13 | describe('inlineCode-linkable', () => { 14 | describe('basic fixture - two list items; one linkable', () => { 15 | const processor = remark().use(codeBlockLinkable) 16 | const ast = processor.runSync( 17 | processor.parse( 18 | '- first\n- `code` here is some code\n- should not link this `codeBlock`' 19 | ) 20 | ) 21 | 22 | const firstListItem = ast.children[0].children[0] 23 | const secondListItem = ast.children[0].children[1] 24 | const thirdListItem = ast.children[0].children[2] 25 | 26 | it('should add an id to an
  • that contains ', () => { 27 | expect(secondListItem.data.hProperties.id).toEqual('inlinecode-code') 28 | }) 29 | 30 | it("should *not* add an id to an
  • that doesn't contain ", () => { 31 | expect(firstListItem.data).not.toBeDefined() 32 | }) 33 | 34 | it('should wrap elements within an tag', () => { 35 | expect(secondListItem.children[0].children[0].type).toEqual('link') 36 | expect(secondListItem.children[0].children[0].children[0].type).toEqual( 37 | 'inlineCode' 38 | ) 39 | }) 40 | 41 | it('should produce and apply matching ids and href attributes for
  • and ', () => { 42 | expect(secondListItem.data.hProperties.id).toEqual( 43 | secondListItem.children[0].children[0].url.slice(1) 44 | ) 45 | }) 46 | 47 | it('should *not* link
  • where appears outside of first position', () => { 48 | expect(thirdListItem.data).not.toBeDefined() 49 | }) 50 | }) 51 | 52 | describe('intermediate fixture - several list items; several linkable', () => { 53 | const processor = remark().use(codeBlockLinkable) 54 | const ast = processor.runSync( 55 | processor.parse( 56 | '- one\n- two\n- `three` docs for **three**\n- four\n- `five` is also linkable!\n- `six` is linkable too!' 57 | ) 58 | ) 59 | 60 | const firstListItem = ast.children[0].children[0] 61 | const secondListItem = ast.children[0].children[1] 62 | const thirdListItem = ast.children[0].children[2] 63 | const fourthListItem = ast.children[0].children[3] 64 | const fifthListItem = ast.children[0].children[4] 65 | const sixthListItem = ast.children[0].children[5] 66 | 67 | it("should make third, fifth, and sixth
  • 's linkable", () => { 68 | expect(thirdListItem.data.hProperties.id).toBeDefined() 69 | expect(fifthListItem.data.hProperties.id).toBeDefined() 70 | expect(sixthListItem.data.hProperties.id).toBeDefined() 71 | }) 72 | 73 | it("should *not* make first, second, and fourth
  • 's linkable", () => { 74 | expect(firstListItem.data).not.toBeDefined() 75 | expect(secondListItem.data).not.toBeDefined() 76 | expect(fourthListItem.data).not.toBeDefined() 77 | }) 78 | }) 79 | }) 80 | 81 | test('real world output, deep nested lists', () => { 82 | const text = 83 | "#### Field Reference\n\n - `TaskStates` - A map of tasks to their current state and the latest events\n that have effected the state. `TaskState` objects contain the following\n fields:\n - `State`: The task's current state. It can have one of the following\n values:\n - `TaskStatePending` - The task is waiting to be run, either for the first\n time or due to a restart." 84 | 85 | unified() 86 | .use(markdownParse) 87 | .use(codeBlockLinkable) 88 | .use(rehype) 89 | .use(html) 90 | .process(text, (_, file) => { 91 | expect(String(file)).toMatchSnapshot() 92 | }) 93 | }) 94 | -------------------------------------------------------------------------------- /plugins/anchor-links/README.md: -------------------------------------------------------------------------------- 1 | # Anchor Links 2 | 3 | This plugin processes headings and inline code blocks at the beginning of a list item to generate a slug and adds a **permalink** element and an invisible **target** element. These two elements ensure that users are able to click a link next to the heading, or click on the inline code block to quickly get an anchor link directly to the corresponding section, and that developers are able to customize the position that the section appears when that anchor link is visited, respectively. 4 | 5 | ## Input: 6 | 7 | ```mdx 8 | # First Level Heading 9 | 10 | - list item 11 | - `code_block` - with text explanation 12 | 13 | Content here... 14 | ``` 15 | 16 | ## Output: 17 | 18 | ```html 19 |

    20 | » 26 | 27 | First Level Heading 28 |

    29 | 30 | 44 | 45 |

    Content here...

    46 | ``` 47 | 48 | Since the `__target` element carries the `id` rather than the headline, it can be positioned independently to pad the headline off the top of the page if necessary, which is the case any time we use a "sticky" navigation. Also worth noting is that the `__target` and `__permalink` elements carry a category identifier after their classname, `h` for "heading" and `lic` for "list inline code", in order to make styling super clear and avoid any chance for conflicts. 49 | 50 | ## Anchor Link Aliases 51 | 52 | This plugin also adds the ability to add **anchor link aliases** via markdown directly. Aliases give authors the ability to specify additional anchors that they would like to link to an existing anchor link. Here's an example of how this might look: 53 | 54 | ```md 55 | # Headline ((#alias, #alias-2)) 56 | 57 | - `code_block` ((#alias-3)) further text, etc 58 | ``` 59 | 60 | This markup would ensure that `#alias` and `#alias-2` also link to `#headline`, and that `#alias-3` also links to `#code_block`. Any number of aliases can be specified as long as they are in this exact format - for a single alias `((#slug))`, or for multiple, `((#slug, #slug2, #slug3))` etc. Anything following a headline or initial inline code element within a list item will be used as aliases and removed from the output. If you are using this syntax and you still see it in the output, this typically means there was an error in the syntax used. 61 | 62 | This feature is intended to be used **very sparingly**. It is a nonstandard markdown feature which we do our best to avoid as an organization. Let's walk through a couple situations where this syntax could be used and examine when it's appropriate. 63 | 64 | - You have written a headline, and would like to add a custom "vanity" permalink, to ensure that it's short and memorable. 65 | 66 | 🚫 **This is not an appropriate use on an anchor link alias.** As a custom, nonstandard markdown feature, we need to use this functionality sparingly, only when it is essential. This scenario does not qualify as essential. 67 | 68 | - You are changing an existing headline that is linked to internally, which you know will change its permalink slug. It's quicker and easier to add an alias than to find-and-replace all the internal links to the anchor. 69 | 70 | 🚫 **This is not an appropriate use of an anchor link alias.** Any time a headline changes, internal links to its permalink should be manually updated to its new permalink using find-and-replace. 71 | 72 | - You are changing an existing headline, and there are many external links to this headline which we are unable to fix. 73 | 74 | ✅ **This is the only appropriate scenario to be using anchor link aliases.** We track statistics on all anchor links via web analytics - if a headline's text must be changed, ask your manager and/or the digital dev team to check the analytics dashboard and see if there is significant externally-driven traffic to its permalink. If so, an anchor link alias should be used to avoid breaking users' expectations. 75 | 76 | ## Options 77 | 78 | - `compatibilitySlug` _(function, optional)_ - if present, will generate an slug using a custom slug creation algorithm and add it as an additional `__target` element. Accepts a function with the following signature `fn(text: string)`. The `text` argument is the headline/inline code text, if the `compatibilitySlug` function generates an idential slug as the default, it will not be added at all. 79 | 80 | > **NOTE:** Be conscious of duplicate tracking with your compatibility function. If it needs to keep track of existing slugs on the page to avoid duplicates, it must implement that functionality on its own. Default slugs are not exposed to the `compatibilitySlug` function because this offers a footgun that can easily break compatibility. The `compatibilitySlug` function should operate entirely in its own sphere -- if it happens to generate a duplicate slug, the plugin itself will remove it as compatibility isn't necessary in that instance. 81 | 82 | - `headings` _(array, optional)_ - if present, data about the headings being processed will be pushed to the array. Each element is an object with the following properties: 83 | 84 | - `aliases`: a string array containing all of the given [anchor link aliases](#anchor-link-aliases) for a heading 85 | - `level`: the level of a heading (e.g. an `

    ` has a level of 1 and an `

    ` has a level of 2) 86 | - `permalinkSlug`: the slug used in the permalink element 87 | - `slug`: the slug generated from a heading's text 88 | - `title`: the content of a heading in plain text (excluding aliases) 89 | 90 | - `listWithInlineCodePrefix` _(string, optional)_ - if present, will append a string to the beginning of each instance where lists with inline code at the beginning get an anchor link. This is also provided for compatibility reasons, as we previously used a separate plugin for lists with inline code that appended an `inlinecode` prefix to avoid conflicts. 91 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/00-nested-headings/tutorial-terraform-aks.mdx: -------------------------------------------------------------------------------- 1 | 8 | 9 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure. 10 | 11 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard. 12 | 13 | ~> **Warning!** If you're not using an account that qualifies under the Azure 14 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these 15 | examples. The most you should be charged should only be a few dollars, but 16 | we're not responsible for any charges that may incur. 17 | 18 | ### Why deploy with Terraform? 19 | 20 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits: 21 | 22 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform. 23 | 24 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources. 25 | 26 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create. 27 | 28 | ## Prerequisites 29 | 30 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does 31 | not assume any pre-existing deployment. 32 | 33 | It also assumes that you are familiar with the usual Terraform plan/apply 34 | workflow. If you're new to Terraform itself, refer first to the Getting Started 35 | [tutorial](/terraform/tutorials/azure-get-started). 36 | 37 | For this tutorial, you will need 38 | 39 | - an [Azure account](https://portal.azure.com/#home) 40 | - a configured Azure CLI 41 | - `kubectl` 42 | 43 | 44 | 45 | 46 | In order for Terraform to run operations on your behalf, you must install and 47 | configure the Azure CLI tool. To install the Azure CLI, follow 48 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system. 49 | 50 | 51 | 52 | 53 | 54 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI. 55 | 56 | ```shell-session 57 | $ brew install azure-cli 58 | ``` 59 | 60 | 61 | 62 | 63 | 64 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI. 65 | 66 | ```shell-session 67 | $ choco install azure-cli 68 | ``` 69 | 70 | 71 | 72 | 73 | 74 | After you've installed the Azure CLI, login into Azure by running: 75 | 76 | ```shell-session 77 | $ az login 78 | ``` 79 | 80 | 81 | 82 | 83 | 84 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system. 85 | 86 | 87 | 88 | 89 | 90 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`. 91 | 92 | ```shell-session 93 | $ brew install kubernetes-cli 94 | ``` 95 | 96 | 97 | 98 | 99 | 100 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`. 101 | 102 | ```shell-session 103 | $ choco install kubernetes-cli 104 | ``` 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | ## Set up and initialize your Terraform workspace 113 | 114 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster). 115 | It contains the example configuration used in this tutorial. 116 | 117 | ```shell-session 118 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster 119 | ``` 120 | 121 | You can explore this repository by changing directories or navigating in your UI. 122 | 123 | ```shell-session 124 | $ cd learn-terraform-provision-aks-cluster 125 | ``` 126 | 127 | In here, you will find three files used to provision the AKS cluster. 128 | 129 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a 130 | resource group and an AKS cluster. The `default_node_pool` defines the 131 | number of VMs and the VM type the cluster uses. 132 | 133 | ```hcl 134 | resource "azurerm_kubernetes_cluster" "default" { 135 | name = "${random_pet.prefix.id}-aks" 136 | location = azurerm_resource_group.default.location 137 | resource_group_name = azurerm_resource_group.default.name 138 | dns_prefix = "${random_pet.prefix.id}-k8s" 139 | 140 | default_node_pool { 141 | name = "default" 142 | node_count = 2 143 | vm_size = "Standard_B2s" 144 | os_disk_size_gb = 30 145 | } 146 | 147 | service_principal { 148 | client_id = var.appId 149 | client_secret = var.password 150 | } 151 | 152 | role_based_access_control_enabled = true 153 | 154 | tags = { 155 | environment = "Demo" 156 | } 157 | } 158 | ``` 159 | 160 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration 161 | 162 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure 163 | 164 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster 165 | 166 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block 167 | 168 | ### Create an Active Directory service principal account 169 | 170 | There are many ways to authenticate to the Azure provider. In this tutorial, you 171 | will use an Active Directory service principal account. You can learn how to 172 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure). 173 | 174 | First, you need to create an Active Directory service principal account using 175 | the Azure CLI. You should see something like the following. 176 | 177 | ```shell-session 178 | $ az ad sp create-for-rbac --skip-assignment 179 | { 180 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 181 | "displayName": "azure-cli-2019-04-11-00-46-05", 182 | "name": "http://azure-cli-2019-04-11-00-46-05", 183 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 184 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 185 | } 186 | ``` 187 | 188 | ### Update your `terraform.tfvars` file 189 | 190 | Replace the values in your `terraform.tfvars` file with your `appId` and 191 | `password`. Terraform will use these values to authenticate to Azure before 192 | provisioning your resources. Your `terraform.tfvars` file should look like the 193 | following. 194 | 195 | ```plaintext 196 | # terraform.tfvars 197 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 198 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 199 | ``` 200 | 201 | ### Initialize Terraform 202 | 203 | After you have saved your customized variables file, initialize your Terraform 204 | workspace, which will download the provider and initialize it with the values 205 | provided in your `terraform.tfvars` file. 206 | 207 | ```shell-session 208 | $ terraform init 209 | Initializing the backend... 210 | 211 | Initializing provider plugins... 212 | - Reusing previous version of hashicorp/random from the dependency lock file 213 | - Reusing previous version of hashicorp/azurerm from the dependency lock file 214 | - Installing hashicorp/random v3.0.0... 215 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp) 216 | - Installing hashicorp/azurerm v3.0.2... 217 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp) 218 | 219 | Terraform has been successfully initialized! 220 | 221 | You may now begin working with Terraform. Try running "terraform plan" to see 222 | any changes that are required for your infrastructure. All Terraform commands 223 | should now work. 224 | 225 | If you ever set or change modules or backend configuration for Terraform, 226 | rerun this command to reinitialize your working directory. If you forget, other 227 | commands will detect it and remind you to do so if necessary. 228 | ``` 229 | 230 | ## Provision the AKS cluster 231 | 232 | In your initialized directory, run `terraform apply` and review the planned actions. 233 | Your terminal output should indicate the plan is running and what resources will be created. 234 | 235 | ```shell-session 236 | $ terraform apply 237 | An execution plan has been generated and is shown below. 238 | Resource actions are indicated with the following symbols: 239 | + create 240 | 241 | Terraform will perform the following actions: 242 | 243 | ## ... 244 | 245 | Plan: 1 to add, 0 to change, 0 to destroy. 246 | 247 | ## ... 248 | ``` 249 | 250 | You can see this terraform apply will provision an Azure resource group and an 251 | AKS cluster. Confirm the apply with a `yes`. 252 | 253 | This process should take approximately 5 minutes. Upon successful application, 254 | your terminal prints the outputs defined in `aks-cluster.tf`. 255 | 256 | ```plaintext hideClipboard 257 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed. 258 | 259 | Outputs: 260 | 261 | kubernetes_cluster_name = light-eagle-aks 262 | resource_group_name = light-eagle-rg 263 | ``` 264 | 265 | ## Configure kubectl 266 | 267 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`. 268 | 269 | Run the following command to retrieve the access credentials for your cluster 270 | and automatically configure `kubectl`. 271 | 272 | ```shell-session 273 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 274 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config 275 | ``` 276 | 277 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1) 278 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5) 279 | correspond to the output variables showed after the successful Terraform run. 280 | 281 | ## Access Kubernetes Dashboard 282 | 283 | To verify that your cluster's configuration, visit 284 | the Azure Portal's Kubernetes resource view. 285 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard) 286 | using this view over the default Kubernetes dashboard, since the AKS dashboard 287 | add-on is deprecated for Kubernetes versions 1.19+. 288 | 289 | Run the following command to generate the Azure portal link. 290 | 291 | ```shell-session 292 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 293 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads 294 | ``` 295 | 296 | Go to the URL in your preferred browser to view the Kubernetes resource view. 297 | 298 | ![AKS Dashboard](/img/terraform/aks-portal.azure.com.png) 299 | 300 | ## Clean up your workspace 301 | 302 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`, 303 | and visited the Kubernetes dashboard. 304 | 305 | If you'd like to learn how to manage your AKS cluster using the Terraform 306 | Kubernetes Provider, leave your cluster running and continue to the 307 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 308 | 309 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform. 310 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and 311 | managing a Kubernetes cluster resources separate, changes in one repository don't 312 | affect the other. In addition, the modularity makes the configuration more 313 | readable and enables you to scope different permissions to each workspace. 314 | 315 | If not, remember to destroy any resources you create once you are done with this 316 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal. 317 | 318 | ```shell-session 319 | $ terraform destroy 320 | ``` 321 | 322 | ## Next steps 323 | 324 | For more information on the AKS resource, visit the 325 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster). 326 | 327 | For steps on how to manage Kubernetes resources your AKS cluster or any other 328 | already created Kubernetes cluster, visit the 329 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 330 | 331 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault 332 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline). 333 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/00-nested-headings/tutorial-terraform-gke.mdx: -------------------------------------------------------------------------------- 1 | 8 | 9 | The Google Kubernetes Engine (GKE) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Google Cloud. 10 | 11 | In this tutorial, you will deploy a 2-node separately managed node pool GKE cluster using Terraform. This GKE cluster will be distributed across multiple zones for high availability. 12 | Then, you will configure `kubectl` using Terraform output to deploy a Kubernetes dashboard on the cluster. 13 | 14 | ~> **Warning!** Google Cloud charges 15 | [about ten cents per hour management fee for each GKE cluster](https://cloud.google.com/kubernetes-engine/pricing), in addition to the cluster's resource costs. 16 | One zonal cluster per billing account is free. As a result, you may be charged 17 | to run these examples. The most you should be charged should only be a few 18 | dollars, but we're not responsible for any charges that may incur. 19 | 20 | -> **Tip:** This example configuration provisions a GKE cluster with 2 nodes so it's under the default `IN_USE_ADDRESSES` quota. This configuration should be used as a learning exercise only — do not run a 2-node cluster in production. 21 | 22 | ### Why deploy with Terraform? 23 | 24 | While you could use the built-in GCP provisioning processes (UI, SDK/CLI) for GKE clusters, Terraform provides you with several benefits: 25 | 26 | - **Unified Workflow** - If you are already deploying infrastructure to Google Cloud with Terraform, your GKE cluster can fit into that workflow. You can also deploy applications into your GKE cluster using Terraform. 27 | 28 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources. 29 | 30 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, if you require a separately managed node pool, Terraform won't attempt to create the node pool if the GKE cluster failed to create. 31 | 32 | ## Prerequisites 33 | 34 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does 35 | not assume any pre-existing deployment. 36 | 37 | It also assumes that you are familiar with the usual Terraform plan/apply 38 | workflow. If you're new to Terraform itself, refer first to the Getting Started 39 | [tutorial](/terraform/tutorials/gcp-get-started). 40 | 41 | For this tutorial, you will need 42 | 43 | - a [GCP account](https://console.cloud.google.com/) 44 | - a configured gcloud SDK 45 | - `kubectl` 46 | 47 | 48 | 49 | 50 | 51 | In order for Terraform to run operations on your behalf, you must install and 52 | configure the `gcloud` SDK tool. To install the `gcloud` SDK, follow 53 | [these instructions](https://cloud.google.com/sdk/docs/quickstarts) or choose a package manager based on your operating system. 54 | 55 | 56 | 57 | 58 | 59 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the gcloud SDK. 60 | 61 | ```shell-session 62 | $ brew install --cask google-cloud-sdk 63 | ``` 64 | 65 | 66 | 67 | 68 | 69 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the gcloud SDK. 70 | 71 | ```shell-session 72 | $ choco install gcloudsdk 73 | ``` 74 | 75 | 76 | 77 | 78 | 79 | After you've installed the `gcloud` SDK, initialize it by running the following 80 | command. 81 | 82 | ```shell-session 83 | $ gcloud init 84 | ``` 85 | 86 | This will authorize the SDK to access GCP using your user account credentials 87 | and add the SDK to your PATH. This steps requires you to login and select the 88 | project you want to work in. Finally, add your account to the Application 89 | Default Credentials (ADC). This will allow Terraform to access these credentials 90 | to provision resources on GCloud. 91 | 92 | ```shell-session 93 | $ gcloud auth application-default login 94 | ``` 95 | 96 | 97 | 98 | 99 | 100 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system. 101 | 102 | 103 | 104 | 105 | 106 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`. 107 | 108 | ```shell-session 109 | $ brew install kubernetes-cli 110 | ``` 111 | 112 | 113 | 114 | 115 | 116 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`. 117 | 118 | ```shell-session 119 | $ choco install kubernetes-cli 120 | ``` 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | ## Set up and initialize your Terraform workspace 129 | 130 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-gke-cluster). 131 | It contains the example configuration used in this tutorial. 132 | 133 | ```shell-session 134 | $ git clone https://github.com/hashicorp/learn-terraform-provision-gke-cluster 135 | ``` 136 | 137 | You can explore this repository by changing directories or navigating in your UI. 138 | 139 | ```shell-session 140 | $ cd learn-terraform-provision-gke-cluster 141 | ``` 142 | 143 | In here, you will find four files used to provision a VPC, subnets and a GKE cluster. 144 | 145 | 1. [`vpc.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf) provisions a VPC and subnet. A new VPC 146 | is created for this tutorial so it doesn't impact your existing cloud environment 147 | and resources. This file outputs `region`. 148 | 149 | 1. [`gke.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf) provisions a GKE cluster and a 150 | separately managed node pool (recommended). Separately managed node pools 151 | allows you to customize your Kubernetes cluster profile — this is 152 | useful if some Pods require more resources than others. You can learn more 153 | [here](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools). 154 | The number of nodes in the node pool is defined also defined 155 | [here](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11). 156 | 157 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/terraform.tfvars) is a template for the `project_id` and `region` variables. 158 | 159 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14. 160 | 161 | ### Update your `terraform.tfvars` file 162 | 163 | Replace the values in your `terraform.tfvars` file with your `project_id` and 164 | `region`. Terraform will use these values to target your project when 165 | provisioning your resources. Your `terraform.tfvars` file should look like the 166 | following. 167 | 168 | ```plaintext 169 | # terraform.tfvars 170 | project_id = "REPLACE_ME" 171 | region = "us-central1" 172 | ``` 173 | 174 | You can find the project your `gcloud` is configured to with this command. 175 | 176 | ```shell-session 177 | $ gcloud config get-value project 178 | ``` 179 | 180 | The region has been defaulted to `us-central1`; you can find a full list of 181 | gcloud regions [here](https://cloud.google.com/compute/docs/regions-zones). 182 | 183 | ### Initialize Terraform workspace 184 | 185 | After you have saved your customized variables file, initialize your Terraform 186 | workspace, which will download the provider and initialize it with the values 187 | provided in your `terraform.tfvars` file. 188 | 189 | ```shell-session 190 | $ terraform init 191 | 192 | Initializing the backend... 193 | 194 | Initializing provider plugins... 195 | - Reusing previous version of hashicorp/google from the dependency lock file 196 | - Installing hashicorp/google v4.27.0... 197 | - Installed hashicorp/google v4.27.0 (signed by HashiCorp) 198 | 199 | Terraform has been successfully initialized! 200 | 201 | You may now begin working with Terraform. Try running "terraform plan" to see 202 | any changes that are required for your infrastructure. All Terraform commands 203 | should now work. 204 | 205 | If you ever set or change modules or backend configuration for Terraform, 206 | rerun this command to reinitialize your working directory. If you forget, other 207 | commands will detect it and remind you to do so if necessary. 208 | ``` 209 | 210 | ## Provision the GKE cluster 211 | 212 | -> **NOTE** [Compute Engine API](https://console.developers.google.com/apis/api/compute.googleapis.com/overview) 213 | and [Kubernetes Engine API](https://console.cloud.google.com/apis/api/container.googleapis.com/overview) 214 | are required for `terraform apply` to work on this configuration. 215 | Enable both APIs for your Google Cloud project before continuing. 216 | 217 | In your initialized directory, run `terraform apply` and review the planned actions. 218 | Your terminal output should indicate the plan is running and what resources will be created. 219 | 220 | ```shell-session 221 | $ terraform apply 222 | An execution plan has been generated and is shown below. 223 | Resource actions are indicated with the following symbols: 224 | + create 225 | 226 | Terraform will perform the following actions: 227 | 228 | ## ... 229 | 230 | Plan: 4 to add, 0 to change, 0 to destroy. 231 | 232 | ## ... 233 | ``` 234 | 235 | You can see this terraform apply will provision a VPC, subnet, GKE Cluster and a 236 | GKE node pool. Confirm the apply with a `yes`. 237 | 238 | This process should take approximately 10 minutes. Upon successful application, 239 | your terminal prints the outputs defined in `vpc.tf` and `gke.tf`. 240 | 241 | ```plaintext 242 | Apply complete! Resources: 4 added, 0 changed, 0 destroyed. 243 | 244 | Outputs: 245 | 246 | kubernetes_cluster_host = "35.232.196.187" 247 | kubernetes_cluster_name = "dos-terraform-edu-gke" 248 | project_id = "dos-terraform-edu" 249 | region = "us-central1" 250 | ``` 251 | 252 | ## Configure kubectl 253 | 254 | Now that you've provisioned your GKE cluster, you need to configure `kubectl`. 255 | 256 | Run the following command to retrieve the access credentials for your cluster 257 | and automatically configure `kubectl`. 258 | 259 | ```shell-session 260 | $ gcloud container clusters get-credentials $(terraform output -raw kubernetes_cluster_name) --region $(terraform output -raw region) 261 | Fetching cluster endpoint and auth data. 262 | kubeconfig entry generated for dos-terraform-edu-gke. 263 | ``` 264 | 265 | The 266 | [Kubernetes cluster name](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L63) 267 | and [region](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf#L29) 268 | correspond to the output variables showed after the successful Terraform run. 269 | 270 | ### Troubleshooting 271 | 272 | You may see the following warning message when you try to retrieve your cluster 273 | credentials. This may be because your Kubernetes cluster is still 274 | initializing/updating. If this happens, you can still proceed to the next step. 275 | 276 | ```plaintext 277 | WARNING: cluster dos-terraform-edu-gke is not running. The kubernetes API may not be available. 278 | ``` 279 | 280 | ## Deploy and access Kubernetes Dashboard 281 | 282 | To verify your cluster is correctly configured and running, you will deploy the 283 | Kubernetes dashboard and navigate to it in your local browser. 284 | 285 | While you can deploy the Kubernetes dashboard using Terraform, `kubectl` is used in this tutorial so you don't need to configure your Terraform Kubernetes Provider. 286 | 287 | The following command will schedule the resources necessary for the dashboard. 288 | 289 | ```shell-session 290 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml 291 | 292 | namespace/kubernetes-dashboard created 293 | serviceaccount/kubernetes-dashboard created 294 | service/kubernetes-dashboard created 295 | secret/kubernetes-dashboard-certs created 296 | secret/kubernetes-dashboard-csrf created 297 | secret/kubernetes-dashboard-key-holder created 298 | configmap/kubernetes-dashboard-settings created 299 | role.rbac.authorization.k8s.io/kubernetes-dashboard created 300 | clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created 301 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created 302 | clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created 303 | deployment.apps/kubernetes-dashboard created 304 | service/dashboard-metrics-scraper created 305 | deployment.apps/dashboard-metrics-scraper created 306 | ``` 307 | 308 | Now, create a proxy server that will allow you to navigate to the dashboard 309 | from the browser on your local machine. This will continue running until you stop the process by pressing `CTRL + C`. 310 | 311 | ```shell-session 312 | $ kubectl proxy 313 | ``` 314 | 315 | You should be able to access the Kubernetes dashboard [here](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/) 316 | (`http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`). 317 | 318 | ![GKE Auth Page](/img/terraform/kubernetes/gke-k8s-dashboard-auth.png) 319 | 320 | ## Authenticate to Kubernetes Dashboard 321 | 322 | To use the Kubernetes dashboard, you need to create a `ClusterRoleBinding` and 323 | provide an authorization token. This gives the `cluster-admin` permission to 324 | access the `kubernetes-dashboard`. 325 | Authenticating using `kubeconfig` is **not** an option. You can read more about 326 | it in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui). 327 | 328 | In another terminal (do not close the `kubectl proxy` process), create the 329 | `ClusterRoleBinding` resource. 330 | 331 | ```shell-session 332 | $ kubectl apply -f https://raw.githubusercontent.com/hashicorp/learn-terraform-provision-gke-cluster/main/kubernetes-dashboard-admin.rbac.yaml 333 | ``` 334 | 335 | Then, generate the authorization token. 336 | 337 | ```shell-session 338 | $ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep service-controller-token | awk '{print $1}') 339 | 340 | Name: service-controller-token-m8m7j 341 | Namespace: kube-system 342 | Labels: 343 | Annotations: kubernetes.io/service-account.name: service-controller 344 | kubernetes.io/service-account.uid: bc99ddad-6be7-11ea-a3c7-42010a800017 345 | 346 | Type: kubernetes.io/service-account-token 347 | 348 | Data 349 | ==== 350 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9... 351 | ca.crt: 1119 bytes 352 | namespace: 11 bytes 353 | ``` 354 | 355 | Select "Token" on the Dashboard UI then copy and paste the entire token you 356 | receive into the 357 | [dashboard authentication screen](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/) 358 | to sign in. You are now signed in to the dashboard for your Kubernetes cluster. 359 | 360 | ![GKE Dashboard](/img/terraform/kubernetes/gke-k8s-dashboard.png) 361 | 362 | ### (Optional) GKE nodes and node pool 363 | 364 | On the Dashboard UI, click _Nodes_ on the left hand menu. 365 | 366 | Notice there are 6 nodes in your cluster, even though 367 | [`gke_num_nodes` in your `gke.tf` file](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11) 368 | was set to 2. This is because a node pool was provisioned in each of the three zones 369 | within the region to provide high availability. 370 | 371 | ```shell-session 372 | $ gcloud container clusters describe dos-terraform-edu-gke --region us-central1 --format='default(locations)' 373 | locations: 374 | - us-central1-b 375 | - us-central1-f 376 | - us-central1-c 377 | ``` 378 | 379 | -> **NOTE** Replace `dos-terraform-edu-gke` with the `kubernetes_cluster_name` value from your Terraform output. 380 | 381 | ![GKE Dashboard](/img/terraform/kubernetes/gke-k8s-dashboard-nodes.png) 382 | 383 | ## Clean up your workspace 384 | 385 | Congratulations, you have provisioned a GKE cluster with a separated node pool, 386 | configured `kubectl`, and deployed the Kubernetes dashboard. 387 | 388 | If you'd like to learn how to manage your GKE cluster using the Terraform 389 | Kubernetes Provider, leave your cluster running and continue to the 390 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 391 | 392 | ~> **Note:** This directory is **only** used to provision a GKE cluster with Terraform. 393 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and 394 | managing a Kubernetes cluster resources separate, changes in one repository don't 395 | affect the other. In addition, the modularity makes the configuration more 396 | readable and enables you to scope different permissions to each workspace. 397 | 398 | If not, remember to destroy any resources you create once you are done with this 399 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal. 400 | 401 | ```shell-session 402 | $ terraform destroy 403 | ``` 404 | 405 | ## Next steps 406 | 407 | For more information on the GKE resource, please visit the 408 | [Google Cloud provider documentation](https://registry.terraform.io/providers/hashicorp/google/3.14.0/docs/resources/container_cluster). 409 | 410 | For steps on how to manage Kubernetes resources your GKE cluster or any other 411 | already created Kubernetes cluster, visit the 412 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 413 | 414 | For a more in-depth Kubernetes example, [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline) (this tutorial is GKE based). 415 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/00-nested-headings/tutorials-nomad-format-output-with-templates.mdx: -------------------------------------------------------------------------------- 1 | 8 | 9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to 10 | several of the Nomad commands that generate output based on the API. This allows 11 | you to filter and customize the output to meet your specific needs. 12 | 13 | The commands that allow for the -t flag are: 14 | 15 | - `nomad acl policy list` 16 | - `nomad acl token list` 17 | - `nomad alloc status` 18 | - `nomad deployment list` 19 | - `nomad deployment status` 20 | - `nomad eval status` 21 | - `nomad job deployments` 22 | - `nomad job history` 23 | - `nomad job inspect` 24 | - `nomad namespace list` 25 | - `nomad node status` 26 | - `nomad plugin status` 27 | - `nomad quota list` 28 | - `nomad volume status` 29 | 30 | This tutorial will teach you how to explore the objects that are returned to 31 | the template engine and how to use template syntax to format the output into 32 | a custom form. 33 | 34 | ## Prerequisites 35 | 36 | This guide assumes the following: 37 | 38 | - Familiarity with Go's text/template syntax. You can learn more about it in the 39 | [Learn Go Template Syntax] tutorial 40 | 41 | - That you are running these commands against a Nomad cluster with an active 42 | workload. You can create a minimal environment using a dev agent, started with 43 | `nomad agent -dev`, then running at least one Nomad job. You can use 44 | `nomad init -short` to create a sample Docker job or provide your own Nomad 45 | job. 46 | 47 | ## Note the shell-specific syntax 48 | 49 | When using the -t flag, you need to correctly handle string literals based on 50 | your shell environment. In a POSIX shell, you can run the following with a 51 | single quote: 52 | 53 | ```shell-session 54 | $ nomad node status -t '{{printf "%#+v" .}}' 55 | ``` 56 | 57 | In a Windows shell (for example, PowerShell), use single 58 | quotes but escape the double quotes inside the parameter as follows: 59 | 60 | ```powershell 61 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 62 | ``` 63 | 64 | In this tutorial, you can select examples with the proper escaping using the 65 | tabs above the snippets. 66 | 67 | ## Start discovering objects 68 | 69 | The `printf` function and the `"%#+v"` format string are critical tools for you 70 | in exploring an unfamiliar template context. 71 | 72 | Run the following command to output the context being passed to the template 73 | in Go object format. 74 | 75 | 76 | 77 | 78 | ```shell-session 79 | $ nomad node status -t '{{printf "%#+v" .}}' 80 | ``` 81 | 82 | 83 | 84 | 85 | ```powershell 86 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 87 | ``` 88 | 89 | 90 | 91 | 92 | ```plaintext 93 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)} 94 | ``` 95 | 96 | The output indicates that the context consists of a list (`[]`) of pointers 97 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub 98 | object per client node in your cluster's server state. 99 | 100 | You can explore these api.NodeListStub object by using the `range` control over 101 | the list. 102 | 103 | 104 | 105 | 106 | ```shell-session 107 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}' 108 | ``` 109 | 110 | 111 | 112 | 113 | ```powershell 114 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}' 115 | ``` 116 | 117 | 118 | 119 | 120 | ```plaintext 121 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 122 | ``` 123 | 124 | If you have a lot of client nodes in your cluster state, this output will be 125 | unwieldy. In that case, you can use `with` and the index function to get the 126 | first list item. 127 | 128 | 129 | 130 | 131 | ```shell-session 132 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}' 133 | ``` 134 | 135 | 136 | 137 | 138 | ```powershell 139 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}' 140 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 141 | ``` 142 | 143 | 144 | 145 | 146 | ```plaintext 147 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 148 | ``` 149 | 150 | Finally, output `Name` and `Version` for each client in the cluster. 151 | 152 | 153 | 154 | 155 | ```shell-session 156 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}' 157 | ``` 158 | 159 | 160 | 161 | 162 | ```powershell 163 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}' 164 | ``` 165 | 166 | 167 | 168 | 169 | ```plaintext 170 | nomad-client-2.node.consul: 0.12.0 171 | nomad-client-3.node.consul: 0.12.0 172 | nomad-client-1.node.consul: 0.12.0 173 | ``` 174 | 175 | ## Make quiet output 176 | 177 | Suppose you want to create a reduced version of the `nomad job status` output 178 | to show just the running job IDs in your cluster and nothing else. 179 | 180 | 181 | 182 | 183 | ```shell-session 184 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}' 185 | ``` 186 | 187 | 188 | 189 | 190 | ```powershell 191 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}' 192 | ``` 193 | 194 | 195 | 196 | 197 | Nomad will output the job IDs for every running job in your cluster. For example: 198 | 199 | ```plaintext 200 | fabio 201 | sockshop-carts 202 | sockshop-catalogue 203 | sockshop-frontend 204 | sockshop-infra 205 | sockshop-orders 206 | sockshop-payment 207 | sockshop-shipping 208 | sockshop-user 209 | ``` 210 | 211 | ### Challenge yourself 212 | 213 | Allocations have a slightly different shape. How might you create similar output 214 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at 215 | least one allocation running and then use the printf technique from earlier to 216 | explore the values sent into the template. 217 | 218 | 219 | 220 | 221 | Print the context that you are passed from the command using the printf command. 222 | 223 | 224 | 225 | 226 | ```shell-session 227 | $ nomad alloc status -t '{{printf "%#+v" . }}' 228 | ``` 229 | 230 | 231 | 232 | 233 | ```powershell 234 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}' 235 | ``` 236 | 237 | 238 | 239 | 240 | ```plaintext 241 | []*api.AllocationListStub ... 242 | ``` 243 | 244 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to 245 | `AllocationListStub` objects. 246 | 247 | Use `range` to traverse each item in the list. 248 | 249 | 250 | 251 | 252 | ```shell-session 253 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}' 254 | ``` 255 | 256 | 257 | 258 | 259 | ```powershell 260 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}' 261 | ``` 262 | 263 | 264 | 265 | 266 | ```plaintext 267 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 268 | ``` 269 | 270 | If you have a lot of allocations running, this could get unwieldy. In that case, 271 | you can use `with` and the index function to get the first list item. 272 | 273 | 274 | 275 | 276 | ```shell-session 277 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}' 278 | ``` 279 | 280 | 281 | 282 | 283 | ```powershell 284 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}' 285 | ``` 286 | 287 | 288 | 289 | 290 | ```plaintext 291 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 292 | ``` 293 | 294 | The fields on the AllocationListStub object that give insight into the running 295 | state of an allocation are `DesiredStatus` and `ClientStatus`. 296 | 297 | -> **Did you know?** The definition of an [AllocationListStub][] object and 298 | valid values for the DesiredStatus and ClientStatus are located in Nomad's 299 | [api package][]. Take a moment to look at it and see what other information you 300 | might be interested in displaying with templates. 301 | 302 | Update your template to show items with a DesiredStatus of "run" and a client 303 | status of "running" or "pending." 304 | 305 | 306 | 307 | 308 | ```shell-session 309 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}' 310 | ``` 311 | 312 | 313 | 314 | 315 | ```powershell 316 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}' 317 | ``` 318 | 319 | 320 | 321 | 322 | ```plaintext 323 | 30663b68-4d8a-aada-4ad2-011b1acae3a1 324 | 11b916da-d679-1718-26f3-f6cd499bfdb8 325 | 68bcb157-359f-9293-d091-5a8ef71475ad 326 | ... 327 | ``` 328 | 329 | You now have a list of the IDs for all of the allocations running in your Nomad 330 | cluster. 331 | 332 | 333 | 334 | 335 | ## Retrieve a template from file 336 | 337 | Using the command line to write templates becomes challenging 338 | as the template becomes more complex. 339 | 340 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators. 341 | 342 | Consider using some of these techniques 343 | to include the template data into the command. 344 | 345 | 346 | 347 | 348 | 349 | Create a file named running_jobs.tmpl with the following content. 350 | 351 | ```plaintext 352 | {{- /* 353 | Get Running Jobs 354 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"` 355 | */ -}} 356 | {{- range . -}} 357 | {{- if eq .Status "running" -}} 358 | {{- println .Name -}} 359 | {{- end -}} 360 | {{- end -}} 361 | ``` 362 | 363 | Now, use a subshell to read the file into a variable 364 | 365 | ```shell-session 366 | $ nomad job inspect -t "$(cat running_jobs.tmpl)" 367 | ``` 368 | 369 | 370 | 371 | 372 | 373 | Create a file named running_jobs.tmpl with the following content. 374 | 375 | ```plaintext 376 | {{- /* 377 | Get Running Jobs 378 | Run with: 379 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 380 | */ -}} 381 | {{- range . -}} 382 | {{- if eq .Status \"running\" -}} 383 | {{- println .Name -}} 384 | {{- end -}} 385 | {{- end -}} 386 | ``` 387 | 388 | Now, use a subshell to read the file into a variable 389 | 390 | ```powershell 391 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 392 | ``` 393 | 394 | 395 | 396 | 397 | 398 | ## Learn more 399 | 400 | In this tutorial, you learned how to: 401 | 402 | - Customize the output of several Nomad commands using Go's text/template 403 | syntax. 404 | 405 | - Use the `printf` function to discover what is available in the template's 406 | context. 407 | 408 | - Use a template definition contained in a file as part of the command. 409 | 410 | Learn more about templating in other tutorials in the Nomad Templating 411 | Collection. 412 | 413 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax 414 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub 415 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api 416 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/01-nested-heading/tutorial-terraform-aks.mdx: -------------------------------------------------------------------------------- 1 | 10 | 11 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure. 12 | 13 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard. 14 | 15 | ~> **Warning!** If you're not using an account that qualifies under the Azure 16 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these 17 | examples. The most you should be charged should only be a few dollars, but 18 | we're not responsible for any charges that may incur. 19 | 20 | ### Why deploy with Terraform? 21 | 22 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits: 23 | 24 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform. 25 | 26 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources. 27 | 28 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create. 29 | 30 | ## Prerequisites 31 | 32 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does 33 | not assume any pre-existing deployment. 34 | 35 | It also assumes that you are familiar with the usual Terraform plan/apply 36 | workflow. If you're new to Terraform itself, refer first to the Getting Started 37 | [tutorial](/terraform/tutorials/azure-get-started). 38 | 39 | For this tutorial, you will need 40 | 41 | - an [Azure account](https://portal.azure.com/#home) 42 | - a configured Azure CLI 43 | - `kubectl` 44 | 45 | 46 | 47 | 48 | In order for Terraform to run operations on your behalf, you must install and 49 | configure the Azure CLI tool. To install the Azure CLI, follow 50 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system. 51 | 52 | 53 | 54 | 55 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2) 56 | 57 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI. 58 | 59 | ```shell-session 60 | $ brew install azure-cli 61 | ``` 62 | 63 | 64 | 65 | 66 | 67 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI. 68 | 69 | ```shell-session 70 | $ choco install azure-cli 71 | ``` 72 | 73 | 74 | 75 | 76 | 77 | After you've installed the Azure CLI, login into Azure by running: 78 | 79 | ```shell-session 80 | $ az login 81 | ``` 82 | 83 | 84 | 85 | 86 | 87 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system. 88 | 89 | 90 | 91 | 92 | 93 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`. 94 | 95 | ```shell-session 96 | $ brew install kubernetes-cli 97 | ``` 98 | 99 | 100 | 101 | 102 | 103 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`. 104 | 105 | ```shell-session 106 | $ choco install kubernetes-cli 107 | ``` 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | ## Set up and initialize your Terraform workspace 116 | 117 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster). 118 | It contains the example configuration used in this tutorial. 119 | 120 | ```shell-session 121 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster 122 | ``` 123 | 124 | You can explore this repository by changing directories or navigating in your UI. 125 | 126 | ```shell-session 127 | $ cd learn-terraform-provision-aks-cluster 128 | ``` 129 | 130 | In here, you will find three files used to provision the AKS cluster. 131 | 132 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a 133 | resource group and an AKS cluster. The `default_node_pool` defines the 134 | number of VMs and the VM type the cluster uses. 135 | 136 | ```hcl 137 | resource "azurerm_kubernetes_cluster" "default" { 138 | name = "${random_pet.prefix.id}-aks" 139 | location = azurerm_resource_group.default.location 140 | resource_group_name = azurerm_resource_group.default.name 141 | dns_prefix = "${random_pet.prefix.id}-k8s" 142 | 143 | default_node_pool { 144 | name = "default" 145 | node_count = 2 146 | vm_size = "Standard_B2s" 147 | os_disk_size_gb = 30 148 | } 149 | 150 | service_principal { 151 | client_id = var.appId 152 | client_secret = var.password 153 | } 154 | 155 | role_based_access_control_enabled = true 156 | 157 | tags = { 158 | environment = "Demo" 159 | } 160 | } 161 | ``` 162 | 163 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration 164 | 165 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure 166 | 167 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster 168 | 169 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block 170 | 171 | ### Create an Active Directory service principal account 172 | 173 | There are many ways to authenticate to the Azure provider. In this tutorial, you 174 | will use an Active Directory service principal account. You can learn how to 175 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure). 176 | 177 | First, you need to create an Active Directory service principal account using 178 | the Azure CLI. You should see something like the following. 179 | 180 | ```shell-session 181 | $ az ad sp create-for-rbac --skip-assignment 182 | { 183 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 184 | "displayName": "azure-cli-2019-04-11-00-46-05", 185 | "name": "http://azure-cli-2019-04-11-00-46-05", 186 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 187 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 188 | } 189 | ``` 190 | 191 | ### Update your `terraform.tfvars` file 192 | 193 | Replace the values in your `terraform.tfvars` file with your `appId` and 194 | `password`. Terraform will use these values to authenticate to Azure before 195 | provisioning your resources. Your `terraform.tfvars` file should look like the 196 | following. 197 | 198 | ```plaintext 199 | # terraform.tfvars 200 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 201 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 202 | ``` 203 | 204 | ### Initialize Terraform 205 | 206 | After you have saved your customized variables file, initialize your Terraform 207 | workspace, which will download the provider and initialize it with the values 208 | provided in your `terraform.tfvars` file. 209 | 210 | ```shell-session 211 | $ terraform init 212 | Initializing the backend... 213 | 214 | Initializing provider plugins... 215 | - Reusing previous version of hashicorp/random from the dependency lock file 216 | - Reusing previous version of hashicorp/azurerm from the dependency lock file 217 | - Installing hashicorp/random v3.0.0... 218 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp) 219 | - Installing hashicorp/azurerm v3.0.2... 220 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp) 221 | 222 | Terraform has been successfully initialized! 223 | 224 | You may now begin working with Terraform. Try running "terraform plan" to see 225 | any changes that are required for your infrastructure. All Terraform commands 226 | should now work. 227 | 228 | If you ever set or change modules or backend configuration for Terraform, 229 | rerun this command to reinitialize your working directory. If you forget, other 230 | commands will detect it and remind you to do so if necessary. 231 | ``` 232 | 233 | ## Provision the AKS cluster 234 | 235 | In your initialized directory, run `terraform apply` and review the planned actions. 236 | Your terminal output should indicate the plan is running and what resources will be created. 237 | 238 | ```shell-session 239 | $ terraform apply 240 | An execution plan has been generated and is shown below. 241 | Resource actions are indicated with the following symbols: 242 | + create 243 | 244 | Terraform will perform the following actions: 245 | 246 | ## ... 247 | 248 | Plan: 1 to add, 0 to change, 0 to destroy. 249 | 250 | ## ... 251 | ``` 252 | 253 | You can see this terraform apply will provision an Azure resource group and an 254 | AKS cluster. Confirm the apply with a `yes`. 255 | 256 | This process should take approximately 5 minutes. Upon successful application, 257 | your terminal prints the outputs defined in `aks-cluster.tf`. 258 | 259 | ```plaintext hideClipboard 260 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed. 261 | 262 | Outputs: 263 | 264 | kubernetes_cluster_name = light-eagle-aks 265 | resource_group_name = light-eagle-rg 266 | ``` 267 | 268 | ## Configure kubectl 269 | 270 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`. 271 | 272 | Run the following command to retrieve the access credentials for your cluster 273 | and automatically configure `kubectl`. 274 | 275 | ```shell-session 276 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 277 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config 278 | ``` 279 | 280 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1) 281 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5) 282 | correspond to the output variables showed after the successful Terraform run. 283 | 284 | ## Access Kubernetes Dashboard 285 | 286 | To verify that your cluster's configuration, visit 287 | the Azure Portal's Kubernetes resource view. 288 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard) 289 | using this view over the default Kubernetes dashboard, since the AKS dashboard 290 | add-on is deprecated for Kubernetes versions 1.19+. 291 | 292 | Run the following command to generate the Azure portal link. 293 | 294 | ```shell-session 295 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 296 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads 297 | ``` 298 | 299 | Go to the URL in your preferred browser to view the Kubernetes resource view. 300 | 301 | ![AKS Dashboard](/img/terraform/aks-portal.azure.com.png) 302 | 303 | ## Clean up your workspace 304 | 305 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`, 306 | and visited the Kubernetes dashboard. 307 | 308 | If you'd like to learn how to manage your AKS cluster using the Terraform 309 | Kubernetes Provider, leave your cluster running and continue to the 310 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 311 | 312 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform. 313 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and 314 | managing a Kubernetes cluster resources separate, changes in one repository don't 315 | affect the other. In addition, the modularity makes the configuration more 316 | readable and enables you to scope different permissions to each workspace. 317 | 318 | If not, remember to destroy any resources you create once you are done with this 319 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal. 320 | 321 | ```shell-session 322 | $ terraform destroy 323 | ``` 324 | 325 | ## Next steps 326 | 327 | For more information on the AKS resource, visit the 328 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster). 329 | 330 | For steps on how to manage Kubernetes resources your AKS cluster or any other 331 | already created Kubernetes cluster, visit the 332 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 333 | 334 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault 335 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline). 336 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/01-nested-heading/tutorial-terraform-gke.mdx: -------------------------------------------------------------------------------- 1 | 10 | 11 | The Google Kubernetes Engine (GKE) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Google Cloud. 12 | 13 | In this tutorial, you will deploy a 2-node separately managed node pool GKE cluster using Terraform. This GKE cluster will be distributed across multiple zones for high availability. 14 | Then, you will configure `kubectl` using Terraform output to deploy a Kubernetes dashboard on the cluster. 15 | 16 | ~> **Warning!** Google Cloud charges 17 | [about ten cents per hour management fee for each GKE cluster](https://cloud.google.com/kubernetes-engine/pricing), in addition to the cluster's resource costs. 18 | One zonal cluster per billing account is free. As a result, you may be charged 19 | to run these examples. The most you should be charged should only be a few 20 | dollars, but we're not responsible for any charges that may incur. 21 | 22 | -> **Tip:** This example configuration provisions a GKE cluster with 2 nodes so it's under the default `IN_USE_ADDRESSES` quota. This configuration should be used as a learning exercise only — do not run a 2-node cluster in production. 23 | 24 | ### Why deploy with Terraform? 25 | 26 | While you could use the built-in GCP provisioning processes (UI, SDK/CLI) for GKE clusters, Terraform provides you with several benefits: 27 | 28 | - **Unified Workflow** - If you are already deploying infrastructure to Google Cloud with Terraform, your GKE cluster can fit into that workflow. You can also deploy applications into your GKE cluster using Terraform. 29 | 30 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources. 31 | 32 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, if you require a separately managed node pool, Terraform won't attempt to create the node pool if the GKE cluster failed to create. 33 | 34 | ## Prerequisites 35 | 36 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does 37 | not assume any pre-existing deployment. 38 | 39 | It also assumes that you are familiar with the usual Terraform plan/apply 40 | workflow. If you're new to Terraform itself, refer first to the Getting Started 41 | [tutorial](/terraform/tutorials/gcp-get-started). 42 | 43 | For this tutorial, you will need 44 | 45 | - a [GCP account](https://console.cloud.google.com/) 46 | - a configured gcloud SDK 47 | - `kubectl` 48 | 49 | 50 | 51 | 52 | 53 | In order for Terraform to run operations on your behalf, you must install and 54 | configure the `gcloud` SDK tool. To install the `gcloud` SDK, follow 55 | [these instructions](https://cloud.google.com/sdk/docs/quickstarts) or choose a package manager based on your operating system. 56 | 57 | 58 | 59 | 60 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2) 61 | 62 | 63 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the gcloud SDK. 64 | 65 | ```shell-session 66 | $ brew install --cask google-cloud-sdk 67 | ``` 68 | 69 | 70 | 71 | 72 | 73 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the gcloud SDK. 74 | 75 | ```shell-session 76 | $ choco install gcloudsdk 77 | ``` 78 | 79 | 80 | 81 | 82 | 83 | After you've installed the `gcloud` SDK, initialize it by running the following 84 | command. 85 | 86 | ```shell-session 87 | $ gcloud init 88 | ``` 89 | 90 | This will authorize the SDK to access GCP using your user account credentials 91 | and add the SDK to your PATH. This steps requires you to login and select the 92 | project you want to work in. Finally, add your account to the Application 93 | Default Credentials (ADC). This will allow Terraform to access these credentials 94 | to provision resources on GCloud. 95 | 96 | ```shell-session 97 | $ gcloud auth application-default login 98 | ``` 99 | 100 | 101 | 102 | 103 | 104 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system. 105 | 106 | 107 | 108 | 109 | 110 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`. 111 | 112 | ```shell-session 113 | $ brew install kubernetes-cli 114 | ``` 115 | 116 | 117 | 118 | 119 | 120 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`. 121 | 122 | ```shell-session 123 | $ choco install kubernetes-cli 124 | ``` 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | ## Set up and initialize your Terraform workspace 133 | 134 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-gke-cluster). 135 | It contains the example configuration used in this tutorial. 136 | 137 | ```shell-session 138 | $ git clone https://github.com/hashicorp/learn-terraform-provision-gke-cluster 139 | ``` 140 | 141 | You can explore this repository by changing directories or navigating in your UI. 142 | 143 | ```shell-session 144 | $ cd learn-terraform-provision-gke-cluster 145 | ``` 146 | 147 | In here, you will find four files used to provision a VPC, subnets and a GKE cluster. 148 | 149 | 1. [`vpc.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf) provisions a VPC and subnet. A new VPC 150 | is created for this tutorial so it doesn't impact your existing cloud environment 151 | and resources. This file outputs `region`. 152 | 153 | 1. [`gke.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf) provisions a GKE cluster and a 154 | separately managed node pool (recommended). Separately managed node pools 155 | allows you to customize your Kubernetes cluster profile — this is 156 | useful if some Pods require more resources than others. You can learn more 157 | [here](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools). 158 | The number of nodes in the node pool is defined also defined 159 | [here](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11). 160 | 161 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/terraform.tfvars) is a template for the `project_id` and `region` variables. 162 | 163 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14. 164 | 165 | ### Update your `terraform.tfvars` file 166 | 167 | Replace the values in your `terraform.tfvars` file with your `project_id` and 168 | `region`. Terraform will use these values to target your project when 169 | provisioning your resources. Your `terraform.tfvars` file should look like the 170 | following. 171 | 172 | ```plaintext 173 | # terraform.tfvars 174 | project_id = "REPLACE_ME" 175 | region = "us-central1" 176 | ``` 177 | 178 | You can find the project your `gcloud` is configured to with this command. 179 | 180 | ```shell-session 181 | $ gcloud config get-value project 182 | ``` 183 | 184 | The region has been defaulted to `us-central1`; you can find a full list of 185 | gcloud regions [here](https://cloud.google.com/compute/docs/regions-zones). 186 | 187 | ### Initialize Terraform workspace 188 | 189 | After you have saved your customized variables file, initialize your Terraform 190 | workspace, which will download the provider and initialize it with the values 191 | provided in your `terraform.tfvars` file. 192 | 193 | ```shell-session 194 | $ terraform init 195 | 196 | Initializing the backend... 197 | 198 | Initializing provider plugins... 199 | - Reusing previous version of hashicorp/google from the dependency lock file 200 | - Installing hashicorp/google v4.27.0... 201 | - Installed hashicorp/google v4.27.0 (signed by HashiCorp) 202 | 203 | Terraform has been successfully initialized! 204 | 205 | You may now begin working with Terraform. Try running "terraform plan" to see 206 | any changes that are required for your infrastructure. All Terraform commands 207 | should now work. 208 | 209 | If you ever set or change modules or backend configuration for Terraform, 210 | rerun this command to reinitialize your working directory. If you forget, other 211 | commands will detect it and remind you to do so if necessary. 212 | ``` 213 | 214 | ## Provision the GKE cluster 215 | 216 | -> **NOTE** [Compute Engine API](https://console.developers.google.com/apis/api/compute.googleapis.com/overview) 217 | and [Kubernetes Engine API](https://console.cloud.google.com/apis/api/container.googleapis.com/overview) 218 | are required for `terraform apply` to work on this configuration. 219 | Enable both APIs for your Google Cloud project before continuing. 220 | 221 | In your initialized directory, run `terraform apply` and review the planned actions. 222 | Your terminal output should indicate the plan is running and what resources will be created. 223 | 224 | ```shell-session 225 | $ terraform apply 226 | An execution plan has been generated and is shown below. 227 | Resource actions are indicated with the following symbols: 228 | + create 229 | 230 | Terraform will perform the following actions: 231 | 232 | ## ... 233 | 234 | Plan: 4 to add, 0 to change, 0 to destroy. 235 | 236 | ## ... 237 | ``` 238 | 239 | You can see this terraform apply will provision a VPC, subnet, GKE Cluster and a 240 | GKE node pool. Confirm the apply with a `yes`. 241 | 242 | This process should take approximately 10 minutes. Upon successful application, 243 | your terminal prints the outputs defined in `vpc.tf` and `gke.tf`. 244 | 245 | ```plaintext 246 | Apply complete! Resources: 4 added, 0 changed, 0 destroyed. 247 | 248 | Outputs: 249 | 250 | kubernetes_cluster_host = "35.232.196.187" 251 | kubernetes_cluster_name = "dos-terraform-edu-gke" 252 | project_id = "dos-terraform-edu" 253 | region = "us-central1" 254 | ``` 255 | 256 | ## Configure kubectl 257 | 258 | Now that you've provisioned your GKE cluster, you need to configure `kubectl`. 259 | 260 | Run the following command to retrieve the access credentials for your cluster 261 | and automatically configure `kubectl`. 262 | 263 | ```shell-session 264 | $ gcloud container clusters get-credentials $(terraform output -raw kubernetes_cluster_name) --region $(terraform output -raw region) 265 | Fetching cluster endpoint and auth data. 266 | kubeconfig entry generated for dos-terraform-edu-gke. 267 | ``` 268 | 269 | The 270 | [Kubernetes cluster name](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L63) 271 | and [region](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf#L29) 272 | correspond to the output variables showed after the successful Terraform run. 273 | 274 | ### Troubleshooting 275 | 276 | You may see the following warning message when you try to retrieve your cluster 277 | credentials. This may be because your Kubernetes cluster is still 278 | initializing/updating. If this happens, you can still proceed to the next step. 279 | 280 | ```plaintext 281 | WARNING: cluster dos-terraform-edu-gke is not running. The kubernetes API may not be available. 282 | ``` 283 | 284 | ## Deploy and access Kubernetes Dashboard 285 | 286 | To verify your cluster is correctly configured and running, you will deploy the 287 | Kubernetes dashboard and navigate to it in your local browser. 288 | 289 | While you can deploy the Kubernetes dashboard using Terraform, `kubectl` is used in this tutorial so you don't need to configure your Terraform Kubernetes Provider. 290 | 291 | The following command will schedule the resources necessary for the dashboard. 292 | 293 | ```shell-session 294 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml 295 | 296 | namespace/kubernetes-dashboard created 297 | serviceaccount/kubernetes-dashboard created 298 | service/kubernetes-dashboard created 299 | secret/kubernetes-dashboard-certs created 300 | secret/kubernetes-dashboard-csrf created 301 | secret/kubernetes-dashboard-key-holder created 302 | configmap/kubernetes-dashboard-settings created 303 | role.rbac.authorization.k8s.io/kubernetes-dashboard created 304 | clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created 305 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created 306 | clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created 307 | deployment.apps/kubernetes-dashboard created 308 | service/dashboard-metrics-scraper created 309 | deployment.apps/dashboard-metrics-scraper created 310 | ``` 311 | 312 | Now, create a proxy server that will allow you to navigate to the dashboard 313 | from the browser on your local machine. This will continue running until you stop the process by pressing `CTRL + C`. 314 | 315 | ```shell-session 316 | $ kubectl proxy 317 | ``` 318 | 319 | You should be able to access the Kubernetes dashboard [here](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/) 320 | (`http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`). 321 | 322 | ![GKE Auth Page](/img/terraform/kubernetes/gke-k8s-dashboard-auth.png) 323 | 324 | ## Authenticate to Kubernetes Dashboard 325 | 326 | To use the Kubernetes dashboard, you need to create a `ClusterRoleBinding` and 327 | provide an authorization token. This gives the `cluster-admin` permission to 328 | access the `kubernetes-dashboard`. 329 | Authenticating using `kubeconfig` is **not** an option. You can read more about 330 | it in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui). 331 | 332 | In another terminal (do not close the `kubectl proxy` process), create the 333 | `ClusterRoleBinding` resource. 334 | 335 | ```shell-session 336 | $ kubectl apply -f https://raw.githubusercontent.com/hashicorp/learn-terraform-provision-gke-cluster/main/kubernetes-dashboard-admin.rbac.yaml 337 | ``` 338 | 339 | Then, generate the authorization token. 340 | 341 | ```shell-session 342 | $ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep service-controller-token | awk '{print $1}') 343 | 344 | Name: service-controller-token-m8m7j 345 | Namespace: kube-system 346 | Labels: 347 | Annotations: kubernetes.io/service-account.name: service-controller 348 | kubernetes.io/service-account.uid: bc99ddad-6be7-11ea-a3c7-42010a800017 349 | 350 | Type: kubernetes.io/service-account-token 351 | 352 | Data 353 | ==== 354 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9... 355 | ca.crt: 1119 bytes 356 | namespace: 11 bytes 357 | ``` 358 | 359 | Select "Token" on the Dashboard UI then copy and paste the entire token you 360 | receive into the 361 | [dashboard authentication screen](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/) 362 | to sign in. You are now signed in to the dashboard for your Kubernetes cluster. 363 | 364 | ![GKE Dashboard](/img/terraform/kubernetes/gke-k8s-dashboard.png) 365 | 366 | ### (Optional) GKE nodes and node pool 367 | 368 | On the Dashboard UI, click _Nodes_ on the left hand menu. 369 | 370 | Notice there are 6 nodes in your cluster, even though 371 | [`gke_num_nodes` in your `gke.tf` file](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11) 372 | was set to 2. This is because a node pool was provisioned in each of the three zones 373 | within the region to provide high availability. 374 | 375 | ```shell-session 376 | $ gcloud container clusters describe dos-terraform-edu-gke --region us-central1 --format='default(locations)' 377 | locations: 378 | - us-central1-b 379 | - us-central1-f 380 | - us-central1-c 381 | ``` 382 | 383 | -> **NOTE** Replace `dos-terraform-edu-gke` with the `kubernetes_cluster_name` value from your Terraform output. 384 | 385 | ![GKE Dashboard](/img/terraform/kubernetes/gke-k8s-dashboard-nodes.png) 386 | 387 | ## Clean up your workspace 388 | 389 | Congratulations, you have provisioned a GKE cluster with a separated node pool, 390 | configured `kubectl`, and deployed the Kubernetes dashboard. 391 | 392 | If you'd like to learn how to manage your GKE cluster using the Terraform 393 | Kubernetes Provider, leave your cluster running and continue to the 394 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 395 | 396 | ~> **Note:** This directory is **only** used to provision a GKE cluster with Terraform. 397 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and 398 | managing a Kubernetes cluster resources separate, changes in one repository don't 399 | affect the other. In addition, the modularity makes the configuration more 400 | readable and enables you to scope different permissions to each workspace. 401 | 402 | If not, remember to destroy any resources you create once you are done with this 403 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal. 404 | 405 | ```shell-session 406 | $ terraform destroy 407 | ``` 408 | 409 | ## Next steps 410 | 411 | For more information on the GKE resource, please visit the 412 | [Google Cloud provider documentation](https://registry.terraform.io/providers/hashicorp/google/3.14.0/docs/resources/container_cluster). 413 | 414 | For steps on how to manage Kubernetes resources your GKE cluster or any other 415 | already created Kubernetes cluster, visit the 416 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 417 | 418 | For a more in-depth Kubernetes example, [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline) (this tutorial is GKE based). 419 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/01-nested-heading/tutorials-nomad-format-output-with-templates.mdx: -------------------------------------------------------------------------------- 1 | 8 | 9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to 10 | several of the Nomad commands that generate output based on the API. This allows 11 | you to filter and customize the output to meet your specific needs. 12 | 13 | The commands that allow for the -t flag are: 14 | 15 | - `nomad acl policy list` 16 | - `nomad acl token list` 17 | - `nomad alloc status` 18 | - `nomad deployment list` 19 | - `nomad deployment status` 20 | - `nomad eval status` 21 | - `nomad job deployments` 22 | - `nomad job history` 23 | - `nomad job inspect` 24 | - `nomad namespace list` 25 | - `nomad node status` 26 | - `nomad plugin status` 27 | - `nomad quota list` 28 | - `nomad volume status` 29 | 30 | This tutorial will teach you how to explore the objects that are returned to 31 | the template engine and how to use template syntax to format the output into 32 | a custom form. 33 | 34 | ## Prerequisites 35 | 36 | This guide assumes the following: 37 | 38 | - Familiarity with Go's text/template syntax. You can learn more about it in the 39 | [Learn Go Template Syntax] tutorial 40 | 41 | - That you are running these commands against a Nomad cluster with an active 42 | workload. You can create a minimal environment using a dev agent, started with 43 | `nomad agent -dev`, then running at least one Nomad job. You can use 44 | `nomad init -short` to create a sample Docker job or provide your own Nomad 45 | job. 46 | 47 | ## Note the shell-specific syntax 48 | 49 | When using the -t flag, you need to correctly handle string literals based on 50 | your shell environment. In a POSIX shell, you can run the following with a 51 | single quote: 52 | 53 | ```shell-session 54 | $ nomad node status -t '{{printf "%#+v" .}}' 55 | ``` 56 | 57 | In a Windows shell (for example, PowerShell), use single 58 | quotes but escape the double quotes inside the parameter as follows: 59 | 60 | ```powershell 61 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 62 | ``` 63 | 64 | In this tutorial, you can select examples with the proper escaping using the 65 | tabs above the snippets. 66 | 67 | ## Start discovering objects 68 | 69 | The `printf` function and the `"%#+v"` format string are critical tools for you 70 | in exploring an unfamiliar template context. 71 | 72 | Run the following command to output the context being passed to the template 73 | in Go object format. 74 | 75 | 76 | 77 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1) 78 | 79 | ```shell-session 80 | $ nomad node status -t '{{printf "%#+v" .}}' 81 | ``` 82 | 83 | 84 | 85 | 86 | ```powershell 87 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 88 | ``` 89 | 90 | 91 | 92 | 93 | ```plaintext 94 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)} 95 | ``` 96 | 97 | The output indicates that the context consists of a list (`[]`) of pointers 98 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub 99 | object per client node in your cluster's server state. 100 | 101 | You can explore these api.NodeListStub object by using the `range` control over 102 | the list. 103 | 104 | 105 | 106 | 107 | ```shell-session 108 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}' 109 | ``` 110 | 111 | 112 | 113 | 114 | ```powershell 115 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}' 116 | ``` 117 | 118 | 119 | 120 | 121 | ```plaintext 122 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 123 | ``` 124 | 125 | If you have a lot of client nodes in your cluster state, this output will be 126 | unwieldy. In that case, you can use `with` and the index function to get the 127 | first list item. 128 | 129 | 130 | 131 | 132 | ```shell-session 133 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}' 134 | ``` 135 | 136 | 137 | 138 | 139 | ```powershell 140 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}' 141 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 142 | ``` 143 | 144 | 145 | 146 | 147 | ```plaintext 148 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 149 | ``` 150 | 151 | Finally, output `Name` and `Version` for each client in the cluster. 152 | 153 | 154 | 155 | 156 | ```shell-session 157 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}' 158 | ``` 159 | 160 | 161 | 162 | 163 | ```powershell 164 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}' 165 | ``` 166 | 167 | 168 | 169 | 170 | ```plaintext 171 | nomad-client-2.node.consul: 0.12.0 172 | nomad-client-3.node.consul: 0.12.0 173 | nomad-client-1.node.consul: 0.12.0 174 | ``` 175 | 176 | ## Make quiet output 177 | 178 | Suppose you want to create a reduced version of the `nomad job status` output 179 | to show just the running job IDs in your cluster and nothing else. 180 | 181 | 182 | 183 | 184 | ```shell-session 185 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}' 186 | ``` 187 | 188 | 189 | 190 | 191 | ```powershell 192 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}' 193 | ``` 194 | 195 | 196 | 197 | 198 | Nomad will output the job IDs for every running job in your cluster. For example: 199 | 200 | ```plaintext 201 | fabio 202 | sockshop-carts 203 | sockshop-catalogue 204 | sockshop-frontend 205 | sockshop-infra 206 | sockshop-orders 207 | sockshop-payment 208 | sockshop-shipping 209 | sockshop-user 210 | ``` 211 | 212 | ### Challenge yourself 213 | 214 | Allocations have a slightly different shape. How might you create similar output 215 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at 216 | least one allocation running and then use the printf technique from earlier to 217 | explore the values sent into the template. 218 | 219 | 220 | 221 | 222 | Print the context that you are passed from the command using the printf command. 223 | 224 | 225 | 226 | 227 | ```shell-session 228 | $ nomad alloc status -t '{{printf "%#+v" . }}' 229 | ``` 230 | 231 | 232 | 233 | 234 | ```powershell 235 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}' 236 | ``` 237 | 238 | 239 | 240 | 241 | ```plaintext 242 | []*api.AllocationListStub ... 243 | ``` 244 | 245 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to 246 | `AllocationListStub` objects. 247 | 248 | Use `range` to traverse each item in the list. 249 | 250 | 251 | 252 | 253 | ```shell-session 254 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}' 255 | ``` 256 | 257 | 258 | 259 | 260 | ```powershell 261 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}' 262 | ``` 263 | 264 | 265 | 266 | 267 | ```plaintext 268 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 269 | ``` 270 | 271 | If you have a lot of allocations running, this could get unwieldy. In that case, 272 | you can use `with` and the index function to get the first list item. 273 | 274 | 275 | 276 | 277 | ```shell-session 278 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}' 279 | ``` 280 | 281 | 282 | 283 | 284 | ```powershell 285 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}' 286 | ``` 287 | 288 | 289 | 290 | 291 | ```plaintext 292 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 293 | ``` 294 | 295 | The fields on the AllocationListStub object that give insight into the running 296 | state of an allocation are `DesiredStatus` and `ClientStatus`. 297 | 298 | -> **Did you know?** The definition of an [AllocationListStub][] object and 299 | valid values for the DesiredStatus and ClientStatus are located in Nomad's 300 | [api package][]. Take a moment to look at it and see what other information you 301 | might be interested in displaying with templates. 302 | 303 | Update your template to show items with a DesiredStatus of "run" and a client 304 | status of "running" or "pending." 305 | 306 | 307 | 308 | 309 | ```shell-session 310 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}' 311 | ``` 312 | 313 | 314 | 315 | 316 | ```powershell 317 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}' 318 | ``` 319 | 320 | 321 | 322 | 323 | ```plaintext 324 | 30663b68-4d8a-aada-4ad2-011b1acae3a1 325 | 11b916da-d679-1718-26f3-f6cd499bfdb8 326 | 68bcb157-359f-9293-d091-5a8ef71475ad 327 | ... 328 | ``` 329 | 330 | You now have a list of the IDs for all of the allocations running in your Nomad 331 | cluster. 332 | 333 | 334 | 335 | 336 | ## Retrieve a template from file 337 | 338 | Using the command line to write templates becomes challenging 339 | as the template becomes more complex. 340 | 341 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators. 342 | 343 | Consider using some of these techniques 344 | to include the template data into the command. 345 | 346 | 347 | 348 | 349 | 350 | Create a file named running_jobs.tmpl with the following content. 351 | 352 | ```plaintext 353 | {{- /* 354 | Get Running Jobs 355 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"` 356 | */ -}} 357 | {{- range . -}} 358 | {{- if eq .Status "running" -}} 359 | {{- println .Name -}} 360 | {{- end -}} 361 | {{- end -}} 362 | ``` 363 | 364 | Now, use a subshell to read the file into a variable 365 | 366 | ```shell-session 367 | $ nomad job inspect -t "$(cat running_jobs.tmpl)" 368 | ``` 369 | 370 | 371 | 372 | 373 | 374 | Create a file named running_jobs.tmpl with the following content. 375 | 376 | ```plaintext 377 | {{- /* 378 | Get Running Jobs 379 | Run with: 380 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 381 | */ -}} 382 | {{- range . -}} 383 | {{- if eq .Status \"running\" -}} 384 | {{- println .Name -}} 385 | {{- end -}} 386 | {{- end -}} 387 | ``` 388 | 389 | Now, use a subshell to read the file into a variable 390 | 391 | ```powershell 392 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 393 | ``` 394 | 395 | 396 | 397 | 398 | 399 | ## Learn more 400 | 401 | In this tutorial, you learned how to: 402 | 403 | - Customize the output of several Nomad commands using Go's text/template 404 | syntax. 405 | 406 | - Use the `printf` function to discover what is available in the template's 407 | context. 408 | 409 | - Use a template definition contained in a file as part of the command. 410 | 411 | Learn more about templating in other tutorials in the Nomad Templating 412 | Collection. 413 | 414 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax 415 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub 416 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api 417 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/02-nested-headings/tutorial-terraform-aks.mdx: -------------------------------------------------------------------------------- 1 | 10 | 11 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure. 12 | 13 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard. 14 | 15 | ~> **Warning!** If you're not using an account that qualifies under the Azure 16 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these 17 | examples. The most you should be charged should only be a few dollars, but 18 | we're not responsible for any charges that may incur. 19 | 20 | ### Why deploy with Terraform? 21 | 22 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits: 23 | 24 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform. 25 | 26 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources. 27 | 28 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create. 29 | 30 | ## Prerequisites 31 | 32 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does 33 | not assume any pre-existing deployment. 34 | 35 | It also assumes that you are familiar with the usual Terraform plan/apply 36 | workflow. If you're new to Terraform itself, refer first to the Getting Started 37 | [tutorial](/terraform/tutorials/azure-get-started). 38 | 39 | For this tutorial, you will need 40 | 41 | - an [Azure account](https://portal.azure.com/#home) 42 | - a configured Azure CLI 43 | - `kubectl` 44 | 45 | 46 | 47 | 48 | ## This is a single nested heading within a Tabs (tabbedSectionDepth should be 1) 49 | 50 | In order for Terraform to run operations on your behalf, you must install and 51 | configure the Azure CLI tool. To install the Azure CLI, follow 52 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system. 53 | 54 | 55 | 56 | 57 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2) 58 | 59 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI. 60 | 61 | ```shell-session 62 | $ brew install azure-cli 63 | ``` 64 | 65 | 66 | 67 | 68 | 69 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI. 70 | 71 | ```shell-session 72 | $ choco install azure-cli 73 | ``` 74 | 75 | 76 | 77 | 78 | 79 | After you've installed the Azure CLI, login into Azure by running: 80 | 81 | ```shell-session 82 | $ az login 83 | ``` 84 | 85 | 86 | 87 | 88 | 89 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system. 90 | 91 | 92 | 93 | 94 | 95 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`. 96 | 97 | ```shell-session 98 | $ brew install kubernetes-cli 99 | ``` 100 | 101 | 102 | 103 | 104 | 105 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`. 106 | 107 | ```shell-session 108 | $ choco install kubernetes-cli 109 | ``` 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | ## Set up and initialize your Terraform workspace 118 | 119 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster). 120 | It contains the example configuration used in this tutorial. 121 | 122 | ```shell-session 123 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster 124 | ``` 125 | 126 | You can explore this repository by changing directories or navigating in your UI. 127 | 128 | ```shell-session 129 | $ cd learn-terraform-provision-aks-cluster 130 | ``` 131 | 132 | In here, you will find three files used to provision the AKS cluster. 133 | 134 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a 135 | resource group and an AKS cluster. The `default_node_pool` defines the 136 | number of VMs and the VM type the cluster uses. 137 | 138 | ```hcl 139 | resource "azurerm_kubernetes_cluster" "default" { 140 | name = "${random_pet.prefix.id}-aks" 141 | location = azurerm_resource_group.default.location 142 | resource_group_name = azurerm_resource_group.default.name 143 | dns_prefix = "${random_pet.prefix.id}-k8s" 144 | 145 | default_node_pool { 146 | name = "default" 147 | node_count = 2 148 | vm_size = "Standard_B2s" 149 | os_disk_size_gb = 30 150 | } 151 | 152 | service_principal { 153 | client_id = var.appId 154 | client_secret = var.password 155 | } 156 | 157 | role_based_access_control_enabled = true 158 | 159 | tags = { 160 | environment = "Demo" 161 | } 162 | } 163 | ``` 164 | 165 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration 166 | 167 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure 168 | 169 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster 170 | 171 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block 172 | 173 | ### Create an Active Directory service principal account 174 | 175 | There are many ways to authenticate to the Azure provider. In this tutorial, you 176 | will use an Active Directory service principal account. You can learn how to 177 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure). 178 | 179 | First, you need to create an Active Directory service principal account using 180 | the Azure CLI. You should see something like the following. 181 | 182 | ```shell-session 183 | $ az ad sp create-for-rbac --skip-assignment 184 | { 185 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 186 | "displayName": "azure-cli-2019-04-11-00-46-05", 187 | "name": "http://azure-cli-2019-04-11-00-46-05", 188 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", 189 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 190 | } 191 | ``` 192 | 193 | ### Update your `terraform.tfvars` file 194 | 195 | Replace the values in your `terraform.tfvars` file with your `appId` and 196 | `password`. Terraform will use these values to authenticate to Azure before 197 | provisioning your resources. Your `terraform.tfvars` file should look like the 198 | following. 199 | 200 | ```plaintext 201 | # terraform.tfvars 202 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 203 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 204 | ``` 205 | 206 | ### Initialize Terraform 207 | 208 | After you have saved your customized variables file, initialize your Terraform 209 | workspace, which will download the provider and initialize it with the values 210 | provided in your `terraform.tfvars` file. 211 | 212 | ```shell-session 213 | $ terraform init 214 | Initializing the backend... 215 | 216 | Initializing provider plugins... 217 | - Reusing previous version of hashicorp/random from the dependency lock file 218 | - Reusing previous version of hashicorp/azurerm from the dependency lock file 219 | - Installing hashicorp/random v3.0.0... 220 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp) 221 | - Installing hashicorp/azurerm v3.0.2... 222 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp) 223 | 224 | Terraform has been successfully initialized! 225 | 226 | You may now begin working with Terraform. Try running "terraform plan" to see 227 | any changes that are required for your infrastructure. All Terraform commands 228 | should now work. 229 | 230 | If you ever set or change modules or backend configuration for Terraform, 231 | rerun this command to reinitialize your working directory. If you forget, other 232 | commands will detect it and remind you to do so if necessary. 233 | ``` 234 | 235 | ## Provision the AKS cluster 236 | 237 | In your initialized directory, run `terraform apply` and review the planned actions. 238 | Your terminal output should indicate the plan is running and what resources will be created. 239 | 240 | ```shell-session 241 | $ terraform apply 242 | An execution plan has been generated and is shown below. 243 | Resource actions are indicated with the following symbols: 244 | + create 245 | 246 | Terraform will perform the following actions: 247 | 248 | ## ... 249 | 250 | Plan: 1 to add, 0 to change, 0 to destroy. 251 | 252 | ## ... 253 | ``` 254 | 255 | You can see this terraform apply will provision an Azure resource group and an 256 | AKS cluster. Confirm the apply with a `yes`. 257 | 258 | This process should take approximately 5 minutes. Upon successful application, 259 | your terminal prints the outputs defined in `aks-cluster.tf`. 260 | 261 | ```plaintext hideClipboard 262 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed. 263 | 264 | Outputs: 265 | 266 | kubernetes_cluster_name = light-eagle-aks 267 | resource_group_name = light-eagle-rg 268 | ``` 269 | 270 | ## Configure kubectl 271 | 272 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`. 273 | 274 | Run the following command to retrieve the access credentials for your cluster 275 | and automatically configure `kubectl`. 276 | 277 | ```shell-session 278 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 279 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config 280 | ``` 281 | 282 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1) 283 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5) 284 | correspond to the output variables showed after the successful Terraform run. 285 | 286 | ## Access Kubernetes Dashboard 287 | 288 | To verify that your cluster's configuration, visit 289 | the Azure Portal's Kubernetes resource view. 290 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard) 291 | using this view over the default Kubernetes dashboard, since the AKS dashboard 292 | add-on is deprecated for Kubernetes versions 1.19+. 293 | 294 | Run the following command to generate the Azure portal link. 295 | 296 | ```shell-session 297 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name) 298 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads 299 | ``` 300 | 301 | Go to the URL in your preferred browser to view the Kubernetes resource view. 302 | 303 | ![AKS Dashboard](/img/terraform/aks-portal.azure.com.png) 304 | 305 | ## Clean up your workspace 306 | 307 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`, 308 | and visited the Kubernetes dashboard. 309 | 310 | If you'd like to learn how to manage your AKS cluster using the Terraform 311 | Kubernetes Provider, leave your cluster running and continue to the 312 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 313 | 314 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform. 315 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and 316 | managing a Kubernetes cluster resources separate, changes in one repository don't 317 | affect the other. In addition, the modularity makes the configuration more 318 | readable and enables you to scope different permissions to each workspace. 319 | 320 | If not, remember to destroy any resources you create once you are done with this 321 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal. 322 | 323 | ```shell-session 324 | $ terraform destroy 325 | ``` 326 | 327 | ## Next steps 328 | 329 | For more information on the AKS resource, visit the 330 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster). 331 | 332 | For steps on how to manage Kubernetes resources your AKS cluster or any other 333 | already created Kubernetes cluster, visit the 334 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider). 335 | 336 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault 337 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline). 338 | -------------------------------------------------------------------------------- /plugins/anchor-links/fixtures/02-nested-headings/tutorials-nomad-format-output-with-templates.mdx: -------------------------------------------------------------------------------- 1 | 8 | 9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to 10 | several of the Nomad commands that generate output based on the API. This allows 11 | you to filter and customize the output to meet your specific needs. 12 | 13 | The commands that allow for the -t flag are: 14 | 15 | - `nomad acl policy list` 16 | - `nomad acl token list` 17 | - `nomad alloc status` 18 | - `nomad deployment list` 19 | - `nomad deployment status` 20 | - `nomad eval status` 21 | - `nomad job deployments` 22 | - `nomad job history` 23 | - `nomad job inspect` 24 | - `nomad namespace list` 25 | - `nomad node status` 26 | - `nomad plugin status` 27 | - `nomad quota list` 28 | - `nomad volume status` 29 | 30 | This tutorial will teach you how to explore the objects that are returned to 31 | the template engine and how to use template syntax to format the output into 32 | a custom form. 33 | 34 | ## Prerequisites 35 | 36 | This guide assumes the following: 37 | 38 | - Familiarity with Go's text/template syntax. You can learn more about it in the 39 | [Learn Go Template Syntax] tutorial 40 | 41 | - That you are running these commands against a Nomad cluster with an active 42 | workload. You can create a minimal environment using a dev agent, started with 43 | `nomad agent -dev`, then running at least one Nomad job. You can use 44 | `nomad init -short` to create a sample Docker job or provide your own Nomad 45 | job. 46 | 47 | ## Note the shell-specific syntax 48 | 49 | When using the -t flag, you need to correctly handle string literals based on 50 | your shell environment. In a POSIX shell, you can run the following with a 51 | single quote: 52 | 53 | ```shell-session 54 | $ nomad node status -t '{{printf "%#+v" .}}' 55 | ``` 56 | 57 | In a Windows shell (for example, PowerShell), use single 58 | quotes but escape the double quotes inside the parameter as follows: 59 | 60 | ```powershell 61 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 62 | ``` 63 | 64 | In this tutorial, you can select examples with the proper escaping using the 65 | tabs above the snippets. 66 | 67 | ## Start discovering objects 68 | 69 | The `printf` function and the `"%#+v"` format string are critical tools for you 70 | in exploring an unfamiliar template context. 71 | 72 | Run the following command to output the context being passed to the template 73 | in Go object format. 74 | 75 | 76 | 77 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1) 78 | 79 | ```shell-session 80 | $ nomad node status -t '{{printf "%#+v" .}}' 81 | ``` 82 | 83 | 84 | 85 | 86 | ```powershell 87 | PS> nomad node status -t '{{printf \"%#+v\" .}}' 88 | ``` 89 | 90 | 91 | 92 | 93 | ```plaintext 94 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)} 95 | ``` 96 | 97 | The output indicates that the context consists of a list (`[]`) of pointers 98 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub 99 | object per client node in your cluster's server state. 100 | 101 | You can explore these api.NodeListStub object by using the `range` control over 102 | the list. 103 | 104 | 105 | 106 | 107 | ```shell-session 108 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}' 109 | ``` 110 | 111 | 112 | 113 | 114 | ```powershell 115 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}' 116 | ``` 117 | 118 | 119 | 120 | 121 | ```plaintext 122 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 123 | ``` 124 | 125 | If you have a lot of client nodes in your cluster state, this output will be 126 | unwieldy. In that case, you can use `with` and the index function to get the 127 | first list item. 128 | 129 | 130 | 131 | 132 | ```shell-session 133 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}' 134 | ``` 135 | 136 | 137 | 138 | 139 | ```powershell 140 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}' 141 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 142 | ``` 143 | 144 | 145 | 146 | 147 | ```plaintext 148 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ... 149 | ``` 150 | 151 | Finally, output `Name` and `Version` for each client in the cluster. 152 | 153 | 154 | 155 | 156 | ```shell-session 157 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}' 158 | ``` 159 | 160 | 161 | 162 | 163 | ```powershell 164 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}' 165 | ``` 166 | 167 | 168 | 169 | 170 | ```plaintext 171 | nomad-client-2.node.consul: 0.12.0 172 | nomad-client-3.node.consul: 0.12.0 173 | nomad-client-1.node.consul: 0.12.0 174 | ``` 175 | 176 | ## Make quiet output 177 | 178 | Suppose you want to create a reduced version of the `nomad job status` output 179 | to show just the running job IDs in your cluster and nothing else. 180 | 181 | 182 | 183 | 184 | ```shell-session 185 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}' 186 | ``` 187 | 188 | 189 | 190 | 191 | ```powershell 192 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}' 193 | ``` 194 | 195 | 196 | 197 | 198 | Nomad will output the job IDs for every running job in your cluster. For example: 199 | 200 | ```plaintext 201 | fabio 202 | sockshop-carts 203 | sockshop-catalogue 204 | sockshop-frontend 205 | sockshop-infra 206 | sockshop-orders 207 | sockshop-payment 208 | sockshop-shipping 209 | sockshop-user 210 | ``` 211 | 212 | ### Challenge yourself 213 | 214 | Allocations have a slightly different shape. How might you create similar output 215 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at 216 | least one allocation running and then use the printf technique from earlier to 217 | explore the values sent into the template. 218 | 219 | 220 | 221 | 222 | Print the context that you are passed from the command using the printf command. 223 | 224 | 225 | 226 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1) 227 | 228 | 229 | ```shell-session 230 | $ nomad alloc status -t '{{printf "%#+v" . }}' 231 | ``` 232 | 233 | 234 | 235 | 236 | ```powershell 237 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}' 238 | ``` 239 | 240 | 241 | 242 | 243 | ```plaintext 244 | []*api.AllocationListStub ... 245 | ``` 246 | 247 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to 248 | `AllocationListStub` objects. 249 | 250 | Use `range` to traverse each item in the list. 251 | 252 | 253 | 254 | 255 | ```shell-session 256 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}' 257 | ``` 258 | 259 | 260 | 261 | 262 | ```powershell 263 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}' 264 | ``` 265 | 266 | 267 | 268 | 269 | ```plaintext 270 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 271 | ``` 272 | 273 | If you have a lot of allocations running, this could get unwieldy. In that case, 274 | you can use `with` and the index function to get the first list item. 275 | 276 | 277 | 278 | 279 | ```shell-session 280 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}' 281 | ``` 282 | 283 | 284 | 285 | 286 | ```powershell 287 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}' 288 | ``` 289 | 290 | 291 | 292 | 293 | ```plaintext 294 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ... 295 | ``` 296 | 297 | The fields on the AllocationListStub object that give insight into the running 298 | state of an allocation are `DesiredStatus` and `ClientStatus`. 299 | 300 | -> **Did you know?** The definition of an [AllocationListStub][] object and 301 | valid values for the DesiredStatus and ClientStatus are located in Nomad's 302 | [api package][]. Take a moment to look at it and see what other information you 303 | might be interested in displaying with templates. 304 | 305 | Update your template to show items with a DesiredStatus of "run" and a client 306 | status of "running" or "pending." 307 | 308 | 309 | 310 | 311 | ```shell-session 312 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}' 313 | ``` 314 | 315 | 316 | 317 | 318 | ```powershell 319 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}' 320 | ``` 321 | 322 | 323 | 324 | 325 | ```plaintext 326 | 30663b68-4d8a-aada-4ad2-011b1acae3a1 327 | 11b916da-d679-1718-26f3-f6cd499bfdb8 328 | 68bcb157-359f-9293-d091-5a8ef71475ad 329 | ... 330 | ``` 331 | 332 | You now have a list of the IDs for all of the allocations running in your Nomad 333 | cluster. 334 | 335 | 336 | 337 | 338 | ## Retrieve a template from file 339 | 340 | Using the command line to write templates becomes challenging 341 | as the template becomes more complex. 342 | 343 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators. 344 | 345 | Consider using some of these techniques 346 | to include the template data into the command. 347 | 348 | 349 | 350 | 351 | 352 | Create a file named running_jobs.tmpl with the following content. 353 | 354 | ```plaintext 355 | {{- /* 356 | Get Running Jobs 357 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"` 358 | */ -}} 359 | {{- range . -}} 360 | {{- if eq .Status "running" -}} 361 | {{- println .Name -}} 362 | {{- end -}} 363 | {{- end -}} 364 | ``` 365 | 366 | Now, use a subshell to read the file into a variable 367 | 368 | ```shell-session 369 | $ nomad job inspect -t "$(cat running_jobs.tmpl)" 370 | ``` 371 | 372 | 373 | 374 | 375 | 376 | Create a file named running_jobs.tmpl with the following content. 377 | 378 | ```plaintext 379 | {{- /* 380 | Get Running Jobs 381 | Run with: 382 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 383 | */ -}} 384 | {{- range . -}} 385 | {{- if eq .Status \"running\" -}} 386 | {{- println .Name -}} 387 | {{- end -}} 388 | {{- end -}} 389 | ``` 390 | 391 | Now, use a subshell to read the file into a variable 392 | 393 | ```powershell 394 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content 395 | ``` 396 | 397 | 398 | 399 | 400 | 401 | ## Learn more 402 | 403 | In this tutorial, you learned how to: 404 | 405 | - Customize the output of several Nomad commands using Go's text/template 406 | syntax. 407 | 408 | - Use the `printf` function to discover what is available in the template's 409 | context. 410 | 411 | - Use a template definition contained in a file as part of the command. 412 | 413 | Learn more about templating in other tutorials in the Nomad Templating 414 | Collection. 415 | 416 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax 417 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub 418 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api 419 | -------------------------------------------------------------------------------- /plugins/anchor-links/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const generateSlug = require('../../generate_slug') 7 | const map = require('unist-util-map') 8 | const is = require('unist-util-is') 9 | 10 | // This plugin adds anchor links to headlines and lists that begin with inline 11 | // code blocks. 12 | // 13 | // NOTE: Some of the HTML code is duplicated in: 14 | // https://github.com/hashicorp/consul/blob/4f15f83dc64e2a9a95cb6b989719838b1f97015b/website/components/config-entry-reference/index.jsx#L84-L105 15 | // If updating the HTML code here, also update there. 16 | module.exports = function anchorLinksPlugin({ 17 | compatibilitySlug, 18 | listWithInlineCodePrefix, 19 | headings, 20 | } = {}) { 21 | return function transformer(tree) { 22 | // this array keeps track of existing slugs to prevent duplicates per-page 23 | const links = [] 24 | 25 | /** 26 | * Keep track of whether we're within . 27 | * If we're in tabbed sections, we may not want to show headings 28 | * in our table of contents. 29 | */ 30 | let tabbedSectionDepth = 0 31 | 32 | return map(tree, (node) => { 33 | /** 34 | * Check if the lines in this node open and/or close . 35 | * - If it opens , increase the tabbedSectionDepth. 36 | * - If it closes , decrease the tabbedSectionDepth. 37 | * 38 | * NOTE: Some nodes are multiple lines and have also have multiple 39 | * opening/closing tags for . 40 | * 41 | * For example, here is a node that is 4 lines long, and each line must be 42 | * checked at individually: 43 | * 44 | * 45 | * 46 | * 47 | * 48 | * 49 | * Where this has happened in production: 50 | * 51 | * https://github.com/hashicorp/tutorials/blob/50b0284436561e6cbf402fb2aa25b5c0a15ef604/content/tutorials/terraform/aks.mdx?plain=1#L111-L114 52 | */ 53 | const isHtmlOrJsxNode = node.type === 'html' || node.type === 'jsx' 54 | if (isHtmlOrJsxNode) { 55 | // Note that a single HTML node could potentially contain multiple tags 56 | const openTagMatches = node.value.match(/\ paragraph -> [inlineCode, ...etc] 80 | const liNode = node 81 | if (!is(liNode, 'listItem') || !liNode.children) return node 82 | const pNode = liNode.children[0] 83 | if (!is(pNode, 'paragraph') || !pNode.children) return node 84 | const codeNode = pNode.children[0] 85 | if (!is(codeNode, 'inlineCode')) return node 86 | 87 | return processListWithInlineCode( 88 | liNode, 89 | pNode, 90 | codeNode, 91 | compatibilitySlug, 92 | listWithInlineCodePrefix, 93 | links 94 | ) 95 | }) 96 | } 97 | } 98 | 99 | function processHeading( 100 | node, 101 | compatibilitySlug, 102 | links, 103 | headings, 104 | tabbedSectionDepth 105 | ) { 106 | const text = stringifyChildNodes(node) 107 | const level = node.depth 108 | const title = text 109 | .replace(/<\/?[^>]*>/g, '') // Strip html 110 | .replace(/\(\(#.*?\)\)/g, '') // Strip anchor link aliases 111 | .replace(/»/g, '') // Safeguard against double-running this plugin 112 | .replace(/\s+/g, ' ') // Collapse whitespace 113 | .trim() 114 | 115 | // generate the slug and use it as the headline's id property 116 | const slug = generateSlug(text, links) 117 | node.data = { 118 | ...node.data, 119 | hProperties: { ...node.data?.hProperties, id: slug }, 120 | } 121 | 122 | /** 123 | * Handle anchor link aliases 124 | * 125 | * Note: depends on children of heading element! Expects first child, 126 | * at index 0, to be the text element. As well, aliases must be attached 127 | * to separate __target-h elements. 128 | */ 129 | const aliases = processAlias(node, 0) 130 | if (aliases.length) node.children.unshift(...aliasesToNodes(aliases, 'h')) 131 | 132 | // if the compatibilitySlug option is present, we generate it and add it 133 | // if it doesn't already match the existing slug 134 | let slug2 135 | if (compatibilitySlug) { 136 | slug2 = compatibilitySlug(text) 137 | if (slug !== slug2) { 138 | node.children.unshift({ 139 | type: 'html', 140 | value: ``, 141 | }) 142 | } 143 | } 144 | 145 | // - if an alias is defined, use that 146 | // - if not, if a compatibilitySlug is defined, use that 147 | // - otherwise use the auto-generated slug 148 | const permalinkSlug = 149 | aliases && aliases.length ? aliases[0] : compatibilitySlug ? slug2 : slug 150 | 151 | // finally, we generate an "permalink" element that can be used to get a quick 152 | // anchor link for any given headline 153 | node.children.unshift({ 154 | type: 'html', 155 | value: `»`, 158 | }) 159 | 160 | const headingData = { 161 | aliases, 162 | level, 163 | permalinkSlug, 164 | slug, 165 | title, 166 | tabbedSectionDepth, 167 | } 168 | headings?.push(headingData) 169 | 170 | return node 171 | } 172 | 173 | function processListWithInlineCode( 174 | liNode, 175 | pNode, 176 | codeNode, 177 | compatibilitySlug, 178 | prefix, 179 | links 180 | ) { 181 | // construct an id/slug based on value of node 182 | // if the prefix option is present, add it before the slug name 183 | const text = codeNode.value 184 | const slug = generateSlug(`${prefix ? `${prefix}-` : ''}${text}`, links) 185 | 186 | // handle anchor link aliases 187 | const aliases = processAlias(pNode, 1) 188 | if (aliases.length) liNode.children.unshift(...aliasesToNodes(aliases, 'lic')) 189 | 190 | // if the compatibilitySlug option is present, we generate it and add it 191 | // if it doesn't already match the existing slug 192 | let slug2 193 | if (compatibilitySlug) { 194 | slug2 = compatibilitySlug(text) 195 | if (slug !== slug2) { 196 | liNode.children.unshift({ 197 | type: 'html', 198 | value: ``, 199 | }) 200 | } 201 | } 202 | 203 | // add the target element with the right slug 204 | liNode.children.unshift({ 205 | type: 'html', 206 | value: ``, 207 | }) 208 | 209 | // - if an alias is defined, use that 210 | // - if not, if a compatibilitySlug is defined, use that 211 | // - otherwise use the auto-generated slug 212 | const permalinkSlug = 213 | aliases && aliases.length ? aliases[0] : compatibilitySlug ? slug2 : slug 214 | 215 | // wrap permalink element around child node, so clicking will set 216 | // the url to the anchor link. 217 | pNode.children[0] = { 218 | type: 'link', 219 | url: `#${permalinkSlug}`, 220 | data: { 221 | hProperties: { 222 | ariaLabel: `${generateSlug.generateAriaLabel(text)} permalink`, 223 | class: '__permalink-lic', 224 | }, 225 | }, 226 | children: [pNode.children[0]], 227 | } 228 | 229 | return liNode 230 | } 231 | 232 | function processAlias(node, startIndex = 0) { 233 | // disqualify input that couldn't possibly be an alias 234 | if ( 235 | !node || 236 | !node.children || 237 | !node.children.length || 238 | node.children.length <= startIndex 239 | ) 240 | return [] 241 | 242 | // with the below regex, we look for ((#foo)) or ((#foo, #bar)) 243 | // 244 | // NOTE: There is a potential improvement in the fidelity of this regex, but it's 245 | // an edge case and would make the code more complex, so skipping until we need it. 246 | // Will detail here in case its ever needed in the future though. 247 | // 248 | // Headline nodes include the headline and alias, like "foo ((#bar))", where inline 249 | // lists that start with code only include the content directly after the code, like 250 | // " ((#bar)) other text". Because of this difference in behavior, this regex does 251 | // not make assumptions about *where* the anchor link alias sits in the string. That 252 | // means that something like "# foo ((#bar)) baz" would still work for a headline, and 253 | // something like "- `foo` some text ((#bar)) more text" would still work for an inline 254 | // list with code. This behavior should not be permitted -- the alias should sit directly 255 | // _after_ the headline or inline code. 256 | // 257 | // It could be enforced by differentiating the regexes that the two types use, such that 258 | // the inline list code uses `/^\s*\(\((#.*?)\)\)/` and headline uses `/\s*\(\((#.*?)\)\)$/` 259 | // but at the moment this seems like unnecessary complexity. 260 | const aliasRegex = /\s*\(\((#.*?)\)\)/ 261 | 262 | // it's possible that the pattern could be broken into multiple nodes 263 | // so we have to check serially. this happens, for example, if an alias 264 | // contains an underscore like ((#\_foo)), which has to be escaped, bc 265 | // markdown. our parser will split escaped characters into multiple nodes, 266 | // for some reason. 267 | // 268 | // the most common scenario, however, is that the first node will match the 269 | // entirely, so we check for that first. 270 | const firstNode = node.children[startIndex] 271 | if (firstNode.value && firstNode.value.match(aliasRegex)) { 272 | return _processAliases(firstNode, aliasRegex) 273 | } 274 | 275 | // next, we check for the more unusual scenario of the pattern being broken into 276 | // multiple nodes. the double parens are a "minimum viable match" so we'll look for 277 | // that in the first text node. if we match this, we can continue our search. 278 | const minimumViableRegex = /\s*\(\(#/ 279 | const endRegex = /\)\)/ 280 | if (firstNode.value && firstNode.value.match(minimumViableRegex)) { 281 | // now we need to figure out where the end of our pattern, "))", is. we find 282 | // this, then squash the entire thing together into a single node. any unusual nodes 283 | // other than text will be discarded. we can't deal with that, honestly. 284 | const endIndex = node.children.findIndex( 285 | (node) => node.value && node.value.match(endRegex) 286 | ) 287 | 288 | // If there is a "((" pattern without a closing, never mind 289 | if (endIndex < 0) { 290 | return [] 291 | } 292 | 293 | // we know where the beginning and end nodes containing our pattern are, so we combine 294 | // their values into a single string 295 | const combinedText = node.children 296 | .slice(startIndex, endIndex + 1) 297 | .reduce((m, s) => { 298 | if (s.value) m += s.value 299 | return m 300 | }, '') 301 | 302 | // now, we replace all of the old broken up pieces with a single, combined node containing 303 | // the full text of the alias 304 | const deleteCount = endIndex - startIndex + 1 305 | node.children.splice(startIndex, deleteCount, { 306 | type: 'text', 307 | value: combinedText, 308 | }) 309 | 310 | // and then proceed to process it as if none of this ever happened! 311 | return _processAliases(node.children[startIndex], aliasRegex) 312 | } 313 | 314 | return [] 315 | } 316 | 317 | function _processAliases(node, aliasRegex) { 318 | // if we have a match, format into an array of slugs without the '#' 319 | const aliases = node.value 320 | .match(aliasRegex)[1] 321 | .split(',') 322 | .map((s) => s.trim().replace(/^#/, '')) 323 | 324 | // then remove the entire match from the element's actual text 325 | node.value = node.value.replace(aliasRegex, '') 326 | 327 | // and return the aliases 328 | return aliases || [] 329 | } 330 | 331 | // This converts a raw array of aliases to html "target" nodes 332 | function aliasesToNodes(aliases, id) { 333 | return aliases.map((alias) => { 334 | return { 335 | type: 'html', 336 | value: ``, 337 | } 338 | }) 339 | } 340 | 341 | // a heading can contain multiple nodes including text, html, etc 342 | // we try to stringify the node here to get its literal text contents 343 | // if that fails due to nonstandard nodes etc. we take a simpler route 344 | // for example, if using mdx, html nodes are encoded as "jsx" which is 345 | // not a type that standard remark recognizes. we can't accommodate all 346 | // types of custom remark setups, so we simply fall back if it doesn't work 347 | function stringifyChildNodes(node) { 348 | return getChildNodesText(node) 349 | } 350 | 351 | /** 352 | * Collect text from children nodes. This will visit 353 | * nodes recursively via "depth-first" strategy. 354 | * 355 | * @param {import('unist').Parent | import('unist').Node} node 356 | * @returns {string} 357 | */ 358 | function getChildNodesText(node) { 359 | const text = node.children.reduce((acc, child) => { 360 | if ('children' in child) { 361 | acc += getChildNodesText(child) 362 | } else if ('value' in child) { 363 | acc += child.value 364 | } 365 | return acc 366 | }, '') 367 | 368 | return text 369 | } 370 | -------------------------------------------------------------------------------- /plugins/include-markdown/README.md: -------------------------------------------------------------------------------- 1 | # Include Markdown Plugin 2 | 3 | This plugin will transform a custom `@include "filename"` directive into the contents of the specified file, relative to the current file. 4 | 5 | ### Input 6 | 7 | Your main markdown file: 8 | 9 | ```md 10 | # My cool page 11 | 12 | @include "disclaimer.md" 13 | 14 | The rest of the content... 15 | ``` 16 | 17 | `disclaimer.md`, in the same directory: 18 | 19 | ```md 20 | Disclaimer: This content is not guaranteed to be in any way useful or truthful. 21 | ``` 22 | 23 | ### Output 24 | 25 | ```html 26 |

    My cool page

    27 |

    28 | Disclaimer: This content is not guaranteed to be in any way useful or 29 | truthful. 30 |

    31 |

    The rest of the content...

    32 | ``` 33 | 34 | ### File Types 35 | 36 | If you include a `.md` or `.mdx` file, its contents will be imported directly into the file, like a partial. If it has `@include` statements nested within it, they will all resolve recursively, as seen in the primary examples above 37 | 38 | If any other file extension is included, it will be displayed as the contents of a code block, with the code block language tag set as the file extension. For example: 39 | 40 | ### Input 41 | 42 | Your main markdown file: 43 | 44 | ```md 45 | # My cool page 46 | 47 | @include "test.js" 48 | 49 | The rest of the content... 50 | ``` 51 | 52 | `test.js`, in the same directory: 53 | 54 | ```js 55 | function sayHello(name) { 56 | console.log(`hello, ${name}!`) 57 | } 58 | ``` 59 | 60 | ### Output 61 | 62 | ```html 63 |

    My cool page

    64 |
     65 |   
     66 |   function sayHello(name) {
     67 |     console.log(`hello, ${name}!`)
     68 |   }
     69 |   
     70 | 
    71 |

    The rest of the content...

    72 | ``` 73 | 74 | ### Options 75 | 76 | This plugin accepts two optional config options: `resolveFrom` and `resolveMdx`. 77 | 78 | #### `resolveFrom` 79 | 80 | If you pass this option along with a path, all partials will resolve from the path that was passed in. For example: 81 | 82 | ```js 83 | remark().use(includeMarkdown, { resolveFrom: path.join(__dirname, 'partials') }) 84 | ``` 85 | 86 | With this config, you'd be able to put all your includes in a partials folder and require only based on the filename regardless of the location of your markdown file. 87 | 88 | #### `resolveMdx` 89 | 90 | If you pass `true` for this option, `.mdx` partials will be processed using [`remark-mdx`](https://github.com/mdx-js/mdx/tree/main/packages/remark-mdx). This allows the use of custom components within partials. For example, with `next-mdx-remote`: 91 | 92 | ```js 93 | import { serialize } from 'next-mdx-remote/serialize' 94 | import { MDXRemote } from 'next-mdx-remote' 95 | import { includeMarkdown } from '@hashicorp/remark-plugins' 96 | import CustomComponent from '../components/custom-component' 97 | 98 | const components = { CustomComponent } 99 | 100 | export default function TestPage({ source }) { 101 | return ( 102 |
    103 | 104 |
    105 | ) 106 | } 107 | 108 | export async function getStaticProps() { 109 | // Imagine "included-file.mdx" has in it... 110 | // it will render as expected, since the @include extension 111 | // is .mdx and resolveMdx is true. 112 | const source = 'Some **mdx** text.\n\n@include "included-file.mdx"' 113 | const mdxSource = await serialize(source, { 114 | mdxOptions: { 115 | remarkPlugins: [[includeMarkdown, { resolveMdx: true }]], 116 | }, 117 | }) 118 | return { props: { source: mdxSource } } 119 | } 120 | ``` 121 | 122 | **Note**: this option should only be used in MDX contexts. This option will likely break where `remark-stringify` is used as the stringify plugin, such as when using `remark` directly. 123 | 124 | ```js 125 | // 🚨 DON'T DO THIS - it will likely just break. 126 | // remark().use(includeMarkdown, { resolveMdx: true }) 127 | ``` 128 | 129 | ### Ordering 130 | 131 | It's important to note that remark applies transforms in the order that they are called. If you want your other plugins to also apply to the contents of includeed files, you need to make sure that you apply the include content plugin **before all other plugins**. For example, let's say you have two plugins, one is this one to include markdown, and the other capitalizes all text, because yelling makes you more authoritative and also it's easier to read capitalized text. If you want to ensure that your includeed content is also capitalized, here's how you'd order your plugins: 132 | 133 | ```js 134 | remark().use(includeMarkdown).use(capitalizeAllText) 135 | ``` 136 | 137 | If you order them the opposite way, like this: 138 | 139 | ```js 140 | remark().use(capitalizeAllText).use(includeMarkdown) 141 | ``` 142 | 143 | ...what will happen is that all your text will be capitalized _except_ for the text in included files. And on top of that, the include plugin wouldn't resolve the files properly, because it capitalized the word "include", which is the wrong syntax. So usually you want to make sure this plugin comes first in your plugin stack. 144 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/basic.expected.md: -------------------------------------------------------------------------------- 1 | hello this is a file that uses an include 2 | 3 | include/before 4 | 5 | nested/include2 6 | 7 | include/after 8 | 9 | nested/include2 10 | 11 | isn't that neat? 12 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/basic.md: -------------------------------------------------------------------------------- 1 | hello this is a file that uses an include 2 | 3 | @include 'include.md' 4 | 5 | @include 'nested/include2.md' 6 | 7 | isn't that neat? 8 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include-nested-component.mdx: -------------------------------------------------------------------------------- 1 | text at depth one 2 | 3 | @include 'nested/include-component.mdx' 4 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include-with-comment.mdx: -------------------------------------------------------------------------------- 1 | We should now be able include custom MDX components in partials. For example, a `"official"` `PluginTierLabel` should render below: 2 | 3 | 4 | 5 | Comments should NOT mess things up: 6 | 7 | 8 | 9 | But they seem to be messing things up, apparently due to differences in `remark` 12 vs 13. 10 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include-with-component.mdx: -------------------------------------------------------------------------------- 1 | some text in an include 2 | 3 | 4 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | function sayHello(name) { 7 | console.log(`hello, ${name}!`) 8 | } 9 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include.md: -------------------------------------------------------------------------------- 1 | include/before 2 | 3 | @include 'nested/include2.md' 4 | 5 | include/after 6 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/include.mdx: -------------------------------------------------------------------------------- 1 | this is an **mdx** include 2 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/invalid-path.md: -------------------------------------------------------------------------------- 1 | hello this is a file that uses an include 2 | 3 | @include 'bskjbfkhj' 4 | 5 | isn't that neat? 6 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/mdx-format.expected.md: -------------------------------------------------------------------------------- 1 | before 2 | 3 | this is an **mdx** include 4 | 5 | after 6 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/mdx-format.md: -------------------------------------------------------------------------------- 1 | before 2 | 3 | @include 'include.mdx' 4 | 5 | after 6 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/nested/include-component.mdx: -------------------------------------------------------------------------------- 1 | some text in a nested include 2 | 3 | 4 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/nested/include2.md: -------------------------------------------------------------------------------- 1 | nested/include2 2 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/nested/include3.md: -------------------------------------------------------------------------------- 1 | nested/include3 2 | 3 | @include 'include2.md' 4 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/non-markdown.expected.md: -------------------------------------------------------------------------------- 1 | before 2 | 3 | ```js 4 | function sayHello(name) { 5 | console.log(`hello, ${name}!`) 6 | } 7 | ``` 8 | 9 | after 10 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/non-markdown.md: -------------------------------------------------------------------------------- 1 | before 2 | 3 | @include 'include.js' 4 | 5 | after 6 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/resolve-from.expected.md: -------------------------------------------------------------------------------- 1 | nested/include3 2 | 3 | nested/include2 4 | -------------------------------------------------------------------------------- /plugins/include-markdown/fixtures/resolve-from.md: -------------------------------------------------------------------------------- 1 | @include 'include3.md' 2 | -------------------------------------------------------------------------------- /plugins/include-markdown/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const path = require('path') 7 | const remark = require('remark') 8 | const remarkMdx = require('remark-mdx') 9 | const flatMap = require('unist-util-flatmap') 10 | const { readSync } = require('to-vfile') 11 | const mdAstToMdxAst = require('./md-ast-to-mdx-ast') 12 | 13 | module.exports = function includeMarkdownPlugin({ 14 | resolveFrom, 15 | resolveMdx, 16 | } = {}) { 17 | return function transformer(tree, file) { 18 | return flatMap(tree, (node) => { 19 | if (node.type !== 'paragraph') return [node] 20 | 21 | // detect an `@include` statement 22 | const includeMatch = 23 | node.children[0].value && 24 | node.children[0].value.match(/^@include\s['"](.*)['"]$/) 25 | if (!includeMatch) return [node] 26 | 27 | // read the file contents 28 | const includePath = path.join( 29 | resolveFrom || file.dirname, 30 | includeMatch[1] 31 | ) 32 | let includeContents 33 | try { 34 | includeContents = readSync(includePath, 'utf8') 35 | } catch (err) { 36 | throw new Error( 37 | `The @include file path at ${includePath} was not found.\n\nInclude Location: ${file.path}:${node.position.start.line}:${node.position.start.column}` 38 | ) 39 | } 40 | 41 | // if we are including a ".md" or ".mdx" file, we add the contents as processed markdown 42 | // if any other file type, they are embedded into a code block 43 | if (includePath.match(/\.md(?:x)?$/)) { 44 | // return the file contents in place of the @include 45 | // (takes a couple steps because we're processing includes with remark) 46 | const processor = remark() 47 | // if the include is MDX, and the plugin consumer has confirmed their 48 | // ability to stringify MDX nodes (eg "jsx"), then use remarkMdx to support 49 | // custom components (which would otherwise appear as likely invalid HTML nodes) 50 | const isMdx = includePath.match(/\.mdx$/) 51 | if (isMdx && resolveMdx) processor.use(remarkMdx).use(mdAstToMdxAst) 52 | // use the includeMarkdown plugin to allow recursive includes 53 | processor.use(includeMarkdownPlugin, { resolveFrom, resolveMdx }) 54 | // Process the file contents, then return them 55 | const ast = processor.parse(includeContents) 56 | return processor.runSync(ast, includeContents).children 57 | } else { 58 | // trim trailing newline 59 | includeContents.contents = includeContents.contents.trim() 60 | 61 | // return contents wrapped inside a "code" node 62 | return [ 63 | { 64 | type: 'code', 65 | lang: includePath.match(/\.(\w+)$/)[1], 66 | value: includeContents, 67 | }, 68 | ] 69 | } 70 | }) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /plugins/include-markdown/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const path = require('path') 7 | const { readSync } = require('to-vfile') 8 | const remark = require('remark') 9 | const includeMarkdown = require('./index.js') 10 | const normalizeNewline = require('normalize-newline') 11 | 12 | describe('include-markdown', () => { 13 | test('basic', () => { 14 | remark() 15 | .use(includeMarkdown) 16 | .process(loadFixture('basic'), (err, file) => { 17 | if (err) throw new Error(err) 18 | expect(file.contents).toEqual(loadFixture('basic.expected').contents) 19 | }) 20 | }) 21 | 22 | test('include mdx', () => { 23 | remark() 24 | .use(includeMarkdown) 25 | .process(loadFixture('mdx-format'), (err, file) => { 26 | if (err) throw new Error(err) 27 | expect(file.contents).toEqual( 28 | loadFixture('mdx-format.expected').contents 29 | ) 30 | }) 31 | }) 32 | 33 | test('include custom mdx components', () => { 34 | // Set up a basic snippet as an mdast tree 35 | const sourceMdx = `hello\n\n@include 'include-with-component.mdx'\n\nworld` 36 | const rawTree = remark().parse(sourceMdx) 37 | // Set up the includes plugin which will also run remark-mdx 38 | const resolveFrom = path.join(__dirname, 'fixtures') 39 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree) 40 | // Expect the custom component to appear in the resulting tree as JSX 41 | expect(tree.children.length).toBe(4) 42 | const [beforeP, includedText, includedComponent, afterP] = tree.children 43 | expect(beforeP.children[0].value).toBe('hello') 44 | expect(includedText.children[0].value).toBe('some text in an include') 45 | expect(includedComponent.type).toBe('jsx') 46 | expect(includedComponent.value).toBe('') 47 | expect(afterP.children[0].value).toBe('world') 48 | }) 49 | 50 | test('include nested custom mdx components', () => { 51 | // Set up a basic snippet as an mdast tree 52 | const sourceMdx = `hello\n\n@include 'include-nested-component.mdx'\n\nworld` 53 | const rawTree = remark().parse(sourceMdx) 54 | // Set up the includes plugin which will also run remark-mdx 55 | const resolveFrom = path.join(__dirname, 'fixtures') 56 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree) 57 | // Expect the custom component to appear in the resulting tree as JSX 58 | expect(tree.children.length).toBe(5) 59 | const [beforeP, includedText, nestedText, nestedComponent, afterP] = 60 | tree.children 61 | expect(beforeP.children[0].value).toBe('hello') 62 | expect(includedText.children[0].value).toBe('text at depth one') 63 | expect(nestedText.children[0].value).toBe('some text in a nested include') 64 | expect(nestedComponent.value).toBe('') 65 | expect(nestedComponent.type).toBe('jsx') 66 | expect(afterP.children[0].value).toBe('world') 67 | }) 68 | 69 | test('handles HTML comments when MDX is enabled', () => { 70 | // Set up a basic snippet as an mdast tree 71 | const sourceMdx = `\n\n@include 'include-with-comment.mdx'\n\nworld` 72 | const rawTree = remark().parse(sourceMdx) 73 | // Set up the includes plugin which will also run remark-mdx 74 | const resolveFrom = path.join(__dirname, 'fixtures') 75 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree) 76 | // Expect the tree to have the right number of nodes 77 | expect(tree.children.length).toBe(7) 78 | // Expect the direct comment to be an HTML node, 79 | // as we're not using md-ast-to-mdx-ast at this top level 80 | // (though in our usual MDX contexts, we would be) 81 | const directComment = tree.children[0] 82 | expect(directComment.type).toBe('html') 83 | expect(directComment.value).toBe('') 84 | // Expect the custom component in the include to be a JSX node 85 | const customComponent = tree.children[2] 86 | expect(customComponent.type).toBe('jsx') 87 | expect(customComponent.value).toBe('') 88 | // Expect the comment in the include to be a comment node, 89 | // as it has been parsed with remark-mdx and md-ast-to-mdx-ast, 90 | // the latter of which transforms comments from "html" to "comment" nodes 91 | const includedComment = tree.children[4] 92 | expect(includedComment.type).toBe('comment') 93 | expect(includedComment.value).toBe(' HTML comment but nested ') 94 | }) 95 | 96 | test('include non-markdown', () => { 97 | remark() 98 | .use(includeMarkdown) 99 | .process(loadFixture('non-markdown'), (err, file) => { 100 | if (err) throw new Error(err) 101 | expect(file.contents).toEqual( 102 | loadFixture('non-markdown.expected').contents 103 | ) 104 | }) 105 | }) 106 | 107 | test('invalid path', () => { 108 | expect(() => 109 | remark() 110 | .use(includeMarkdown) 111 | .process(loadFixture('invalid-path'), (err) => { 112 | if (err) throw err 113 | }) 114 | ).toThrow( 115 | /The @include file path at .*bskjbfkhj was not found\.\s+Include Location: .*invalid-path\.md:3:1/gm 116 | ) 117 | }) 118 | 119 | test('resolveFrom option', () => { 120 | remark() 121 | .use(includeMarkdown, { 122 | resolveFrom: path.join(__dirname, 'fixtures/nested'), 123 | }) 124 | .process(loadFixture('resolve-from'), (err, file) => { 125 | if (err) throw new Error(err) 126 | expect(file.contents).toEqual( 127 | loadFixture('resolve-from.expected').contents 128 | ) 129 | }) 130 | }) 131 | }) 132 | 133 | function loadFixture(name) { 134 | const vfile = readSync(path.join(__dirname, 'fixtures', `${name}.md`), 'utf8') 135 | vfile.contents = normalizeNewline(vfile.contents) 136 | return vfile 137 | } 138 | -------------------------------------------------------------------------------- /plugins/include-markdown/md-ast-to-mdx-ast.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | /* 7 | 8 | NOTE: 9 | This file is swiped directly from @mdxjs/mdx's createMdxAstCompiler. 10 | ref: https://github.com/mdx-js/mdx/blob/510bae2580958598ae29047bf755b1a2ea26cf7e/packages/mdx/md-ast-to-mdx-ast.js 11 | 12 | I considered the possibility of using createMdxAstCompiler rather than remark-mdx on its own. 13 | however, crucially, we do NOT want to transform our AST into a MDXAST, we ONLY want to 14 | transform custom component nodes (ie HTML that is really JSX) into JSX nodes. 15 | So it felt duplicative, but necessary, to copypasta this utility in to meet our needs. 16 | 17 | */ 18 | 19 | const visit = require('unist-util-visit') 20 | const { isComment, getCommentContents } = require('@mdx-js/util') 21 | 22 | module.exports = (_options) => (tree) => { 23 | visit(tree, 'jsx', (node) => { 24 | if (isComment(node.value)) { 25 | node.type = 'comment' 26 | node.value = getCommentContents(node.value) 27 | } 28 | }) 29 | 30 | return tree 31 | } 32 | -------------------------------------------------------------------------------- /plugins/paragraph-custom-alerts/README.md: -------------------------------------------------------------------------------- 1 | # Paragraph Custom Alerts 2 | 3 | This plugin allows paragraphs to be "tagged" by custom symbols, effecting their final render. 4 | 5 | | Symbol | Meaning | 6 | | ------ | --------- | 7 | | `=>` | `success` | 8 | | `->` | `info` | 9 | | `~>` | `warning` | 10 | | `!>` | `danger` | 11 | 12 | ### Input: 13 | 14 | ```mdx 15 | Read below for more information... 16 | 17 | !> Here be dragons. Proceed with caution! 18 | 19 | => You are victorious! Great victory! 20 | ``` 21 | 22 | ### Output: 23 | 24 | ```html 25 |

    Read below for more information...

    26 | 29 | 32 | ``` 33 | -------------------------------------------------------------------------------- /plugins/paragraph-custom-alerts/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const is = require('unist-util-is') 7 | const visit = require('unist-util-visit') 8 | 9 | const sigils = { 10 | '=>': 'success', 11 | '->': 'info', 12 | '~>': 'warning', 13 | '!>': 'danger', 14 | } 15 | 16 | module.exports = function paragraphCustomAlertsPlugin() { 17 | return function transformer(tree) { 18 | visit(tree, 'paragraph', (pNode, _, parent) => { 19 | visit(pNode, 'text', (textNode) => { 20 | Object.keys(sigils).forEach((symbol) => { 21 | if (textNode.value.startsWith(`${symbol} `)) { 22 | // Remove the literal sigil symbol from string contents 23 | textNode.value = textNode.value.replace(`${symbol} `, '') 24 | 25 | // Wrap matched nodes with
    (containing proper attributes) 26 | parent.children = parent.children.map((node) => { 27 | return is(pNode, node) 28 | ? { 29 | type: 'wrapper', 30 | children: [node], 31 | data: { 32 | hName: 'div', 33 | hProperties: { 34 | className: [ 35 | 'alert', 36 | `alert-${sigils[symbol]}`, 37 | 'g-type-body', 38 | ], 39 | }, 40 | }, 41 | } 42 | : node 43 | }) 44 | } 45 | }) 46 | }) 47 | }) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /plugins/paragraph-custom-alerts/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const remark = require('remark') 7 | const html = require('remark-html') 8 | const paragraphCustomAlerts = require('./index.js') 9 | 10 | describe('paragraph-custom-alerts', () => { 11 | it('should produce the expected html output', () => { 12 | expect( 13 | remark() 14 | .use(paragraphCustomAlerts) 15 | .use(html) 16 | .processSync(`=> this is a success paragraph`) 17 | .toString() 18 | ).toMatch( 19 | '

    this is a success paragraph

    ' 20 | ) 21 | }) 22 | 23 | it('should handle multiple paragraph blocks', () => { 24 | const md = `this is a normal, non-alert paragraph 25 | 26 | ~> this is a warning block 27 | 28 | this is another "normal" block 29 | 30 | => success block here! yeah!` 31 | expect( 32 | remark().use(paragraphCustomAlerts).use(html).processSync(md).toString() 33 | ).toMatch( 34 | `

    this is a normal, non-alert paragraph

    35 |

    this is a warning block

    36 |

    this is another "normal" block

    37 |

    success block here! yeah!

    ` 38 | ) 39 | }) 40 | }) 41 | -------------------------------------------------------------------------------- /plugins/typography/README.md: -------------------------------------------------------------------------------- 1 | # Heading Type Styles 2 | 3 | We use a set of global classes for type styling at HashiCorp. This plugin adds type styles to the appropriate elements so that content looks as intended within rendered markdown blocks without duplicating or extending CSS. 4 | 5 | ### Input 6 | 7 | ```mdx 8 | # Uses 9 | 10 | Here are some uses... 11 | 12 | ## Another title 13 | 14 | Here is some more stuff... 15 | ``` 16 | 17 | ### Output 18 | 19 | ```jsx 20 |

    Uses

    21 | 22 |

    Here are some uses...

    23 | 24 |

    Another title

    25 | 26 |

    Here is some more stuff...

    27 | ``` 28 | 29 | ### Custom Class Mapping 30 | 31 | In rare cases, we may want to map custom `class` attributes onto specific elements. Currently, this plugin supports an `options` object, and `options.map` provides this functionality. 32 | 33 | Here is an imagined use case where all possible elements have custom `class` attributes. Any one of these elements can be omitted from the map, and it will fall back to our default `class` for that element. 34 | 35 | ```js 36 | const options = { 37 | map: { 38 | h1: 'custom-1', 39 | h2: 'custom-2', 40 | h3: 'custom-3', 41 | h4: 'custom-4', 42 | h5: 'custom-5', 43 | h6: 'custom-6', 44 | p: 'custom-paragraph', 45 | li: 'custom-list-item', 46 | }, 47 | } 48 | // example use with `mdx` 49 | const output = mdx.sync(fileContents, { 50 | remarkPlugins: [[typographyPlugin, options]], 51 | }) 52 | ``` 53 | 54 | With this configuration, and the same input as the previous example, we would expect the following output: 55 | 56 | ```jsx 57 |

    Uses

    58 | 59 |

    Here are some uses...

    60 | 61 |

    Another title

    62 | 63 |

    Here is some more stuff...

    64 | ``` 65 | -------------------------------------------------------------------------------- /plugins/typography/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const visit = require('unist-util-visit') 7 | 8 | module.exports = function typographyPlugin(options = {}) { 9 | function getClassName(elemKey) { 10 | const defaultMap = { 11 | h1: 'g-type-display-2', 12 | h2: 'g-type-display-3', 13 | h3: 'g-type-display-4', 14 | h4: 'g-type-display-5', 15 | h5: 'g-type-display-6', 16 | h6: 'g-type-label', 17 | p: 'g-type-long-body', 18 | li: 'g-type-long-body', 19 | } 20 | const customMap = options.map || {} 21 | return typeof customMap[elemKey] === 'string' 22 | ? customMap[elemKey] 23 | : defaultMap[elemKey] 24 | } 25 | 26 | function addClassName(node, className) { 27 | if (!className) return true 28 | const data = node.data || (node.data = {}) 29 | const props = data.hProperties || (data.hProperties = {}) 30 | data.id = className 31 | props.className = className 32 | } 33 | 34 | return function transformer(tree) { 35 | // Add typography classes to headings 36 | visit(tree, 'heading', (node) => { 37 | addClassName(node, getClassName(`h${node.depth}`)) 38 | }) 39 | 40 | // Add typography classes to paragraph text 41 | visit(tree, 'paragraph', (node) => { 42 | addClassName(node, getClassName('p')) 43 | }) 44 | 45 | // Add typography classes to list items 46 | visit(tree, 'listItem', (node) => { 47 | addClassName(node, getClassName('li')) 48 | }) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /plugins/typography/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const typographyPlugin = require('./index.js') 7 | const mdx = require('@mdx-js/mdx') 8 | 9 | const fileContents = `hi there 10 | 11 | # Heading One 12 | ## Heading Two 13 | sadklfjhlskdjf 14 | 15 | ### Heading Three 16 | #### Heading Four 17 | ##### Heading Five 18 | ###### Heading Six 19 | 20 | Foo bar baz wow *amaze* 21 | 22 | - foo 23 | - bar 24 | ` 25 | 26 | describe('type-styles', () => { 27 | it('adds classNames to headings, paragraphs, and list items', () => { 28 | const output = mdx.sync(fileContents, { remarkPlugins: [typographyPlugin] }) 29 | expect(output).toMatch( 30 | /

    {`Heading One`}<\/h1>/ 31 | ) 32 | expect(output).toMatch( 33 | /

    {`Heading Two`}<\/h2>/ 34 | ) 35 | expect(output).toMatch( 36 | /

    {`Heading Three`}<\/h3>/ 37 | ) 38 | expect(output).toMatch( 39 | /

    {`Heading Four`}<\/h4>/ 40 | ) 41 | expect(output).toMatch( 42 | /

    {`Heading Five`}<\/h5>/ 43 | ) 44 | expect(output).toMatch( 45 | /
    {`Heading Six`}<\/h6>/ 46 | ) 47 | expect(output).toMatch( 48 | /

    {`sadklfjhlskdjf`}<\/p>/ 49 | ) 50 | expect(output).toMatch( 51 | /

  • {`foo`}<\/li>/ 52 | ) 53 | }) 54 | 55 | it('allows empty strings in map to prevent the addition of classNames', () => { 56 | const options = { 57 | map: { 58 | p: '', 59 | }, 60 | } 61 | const output = mdx.sync(fileContents, { 62 | remarkPlugins: [[typographyPlugin, options]], 63 | }) 64 | expect(output).not.toMatch( 65 | /

    {`sadklfjhlskdjf`}<\/p>/ 66 | ) 67 | }) 68 | 69 | it('allows customization of classNames', () => { 70 | const options = { 71 | map: { 72 | h1: 'custom-1', 73 | h2: 'custom-2', 74 | h3: 'custom-3', 75 | h4: 'custom-4', 76 | h5: 'custom-5', 77 | h6: 'custom-6', 78 | p: 'custom-paragraph', 79 | li: 'custom-list-item', 80 | }, 81 | } 82 | const output = mdx.sync(fileContents, { 83 | remarkPlugins: [[typographyPlugin, options]], 84 | }) 85 | expect(output).toMatch( 86 | /

    {`Heading One`}<\/h1>/ 87 | ) 88 | expect(output).toMatch( 89 | /

    {`Heading Two`}<\/h2>/ 90 | ) 91 | expect(output).toMatch( 92 | /

    {`Heading Three`}<\/h3>/ 93 | ) 94 | expect(output).toMatch( 95 | /

    {`Heading Four`}<\/h4>/ 96 | ) 97 | expect(output).toMatch( 98 | /

    {`Heading Five`}<\/h5>/ 99 | ) 100 | expect(output).toMatch( 101 | /
    {`Heading Six`}<\/h6>/ 102 | ) 103 | expect(output).toMatch( 104 | /

    {`sadklfjhlskdjf`}<\/p>/ 105 | ) 106 | expect(output).toMatch( 107 | /

  • {`foo`}<\/li>/ 108 | ) 109 | }) 110 | }) 111 | --------------------------------------------------------------------------------