Content here...
41 | ``` 42 | 43 | Since the `__target` element actually carries the id rather than the headline, it can be positioned independently to pad the headline off the top of the page if necessary, which is the case any time we use a "sticky" navigation. 44 | 45 | ## Options 46 | 47 | - `compatibilitySlug` _(optional)_ - if present, will generate an additional target element using a custom slug creation algorithm. Accepts a function with the following signature `fn(text: string)`. The `text` argument is the headline text, if the `compatibilitySlug` function generates an idential slug as the default, it will not be added at all. 48 | 49 | > **NOTE:** Be conscious of duplicate tracking with your compatibility function. If it needs to keep track of existing slugs on the page to avoid duplicates, it must implement that functionality on its own. Default slugs are not exposed to the `compatibilitySlug` function because this offers a footgun that can easily break compatibility. The `compatibilitySlug` function should operate entirely in its own sphere -- if it happens to generate a duplicate slug, the plugin itself will remove it as compatibility isn't necessary in that instance. 50 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_heading-linkable/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const generateSlug = require('../../generate_slug') 7 | const map = require('unist-util-map') 8 | const is = require('unist-util-is') 9 | 10 | module.exports = function headingLinkablePlugin({ compatibilitySlug } = {}) { 11 | return function transformer(tree) { 12 | const links = [] 13 | return map(tree, node => { 14 | if (!is(node, 'heading')) return node 15 | const text = node.children.reduce((m, i) => { 16 | m += i.value 17 | return m 18 | }, '') 19 | 20 | const slug = generateSlug(text, links) 21 | node.children.unshift({ 22 | type: 'html', 23 | value: `` 24 | }) 25 | 26 | if (compatibilitySlug) { 27 | const slug2 = compatibilitySlug(text) 28 | if (slug !== slug2) { 29 | node.children.unshift({ 30 | type: 'html', 31 | value: `` 32 | }) 33 | } 34 | } 35 | 36 | node.children.unshift({ 37 | type: 'html', 38 | value: `»` 41 | }) 42 | 43 | return node 44 | }) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /plugins/DEPRECATED_heading-linkable/index.test.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | const remark = require('remark') 7 | const html = require('remark-html') 8 | const headingLinkable = require('./index.js') 9 | 10 | describe('heading-linkable', () => { 11 | test('produces the expected html output', () => { 12 | expect( 13 | remark() 14 | .use(headingLinkable) 15 | .use(html) 16 | .processSync('# hello world') 17 | .toString() 18 | ).toMatch( 19 | [ 20 | 'Third
21 | item contains code
22 | TaskStates
- A map of tasks to their current state and the latest events
7 | that have effected the state. TaskState
objects contain the following
8 | fields:State
: The task's current state. It can have one of the following
10 | values:TaskStatePending
- The task is waiting to be run, either for the first
12 | time or due to a restart. node
24 | const codeSlug = generateSlug(`inlinecode-${codeNode.value}`, links)
25 |
26 | // Add slug to parent node's id attribute
27 | const data = liNode.data || (liNode.data = {})
28 | const props = data.hProperties || (data.hProperties = {})
29 | props.id = codeSlug
30 |
31 | // Wrap link element around child node
32 | pNode.children[0] = {
33 | type: 'link',
34 | url: `#${codeSlug}`,
35 | title: null,
36 | children: [pNode.children[0]]
37 | }
38 |
39 | return node
40 | })
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/plugins/DEPRECATED_inline-code-linkable/index.test.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const remark = require('remark')
7 | const codeBlockLinkable = require('./index.js')
8 | const unified = require('unified')
9 | const rehype = require('remark-rehype')
10 | const markdownParse = require('remark-parse')
11 | const html = require('rehype-stringify')
12 |
13 | describe('inlineCode-linkable', () => {
14 | describe('basic fixture - two list items; one linkable', () => {
15 | const processor = remark().use(codeBlockLinkable)
16 | const ast = processor.runSync(
17 | processor.parse(
18 | '- first\n- `code` here is some code\n- should not link this `codeBlock`'
19 | )
20 | )
21 |
22 | const firstListItem = ast.children[0].children[0]
23 | const secondListItem = ast.children[0].children[1]
24 | const thirdListItem = ast.children[0].children[2]
25 |
26 | it('should add an id to an that contains ', () => {
27 | expect(secondListItem.data.hProperties.id).toEqual('inlinecode-code')
28 | })
29 |
30 | it("should *not* add an id to an that doesn't contain ", () => {
31 | expect(firstListItem.data).not.toBeDefined()
32 | })
33 |
34 | it('should wrap elements within an tag', () => {
35 | expect(secondListItem.children[0].children[0].type).toEqual('link')
36 | expect(secondListItem.children[0].children[0].children[0].type).toEqual(
37 | 'inlineCode'
38 | )
39 | })
40 |
41 | it('should produce and apply matching ids and href attributes for and ', () => {
42 | expect(secondListItem.data.hProperties.id).toEqual(
43 | secondListItem.children[0].children[0].url.slice(1)
44 | )
45 | })
46 |
47 | it('should *not* link where appears outside of first position', () => {
48 | expect(thirdListItem.data).not.toBeDefined()
49 | })
50 | })
51 |
52 | describe('intermediate fixture - several list items; several linkable', () => {
53 | const processor = remark().use(codeBlockLinkable)
54 | const ast = processor.runSync(
55 | processor.parse(
56 | '- one\n- two\n- `three` docs for **three**\n- four\n- `five` is also linkable!\n- `six` is linkable too!'
57 | )
58 | )
59 |
60 | const firstListItem = ast.children[0].children[0]
61 | const secondListItem = ast.children[0].children[1]
62 | const thirdListItem = ast.children[0].children[2]
63 | const fourthListItem = ast.children[0].children[3]
64 | const fifthListItem = ast.children[0].children[4]
65 | const sixthListItem = ast.children[0].children[5]
66 |
67 | it("should make third, fifth, and sixth 's linkable", () => {
68 | expect(thirdListItem.data.hProperties.id).toBeDefined()
69 | expect(fifthListItem.data.hProperties.id).toBeDefined()
70 | expect(sixthListItem.data.hProperties.id).toBeDefined()
71 | })
72 |
73 | it("should *not* make first, second, and fourth 's linkable", () => {
74 | expect(firstListItem.data).not.toBeDefined()
75 | expect(secondListItem.data).not.toBeDefined()
76 | expect(fourthListItem.data).not.toBeDefined()
77 | })
78 | })
79 | })
80 |
81 | test('real world output, deep nested lists', () => {
82 | const text =
83 | "#### Field Reference\n\n - `TaskStates` - A map of tasks to their current state and the latest events\n that have effected the state. `TaskState` objects contain the following\n fields:\n - `State`: The task's current state. It can have one of the following\n values:\n - `TaskStatePending` - The task is waiting to be run, either for the first\n time or due to a restart."
84 |
85 | unified()
86 | .use(markdownParse)
87 | .use(codeBlockLinkable)
88 | .use(rehype)
89 | .use(html)
90 | .process(text, (_, file) => {
91 | expect(String(file)).toMatchSnapshot()
92 | })
93 | })
94 |
--------------------------------------------------------------------------------
/plugins/anchor-links/README.md:
--------------------------------------------------------------------------------
1 | # Anchor Links
2 |
3 | This plugin processes headings and inline code blocks at the beginning of a list item to generate a slug and adds a **permalink** element and an invisible **target** element. These two elements ensure that users are able to click a link next to the heading, or click on the inline code block to quickly get an anchor link directly to the corresponding section, and that developers are able to customize the position that the section appears when that anchor link is visited, respectively.
4 |
5 | ## Input:
6 |
7 | ```mdx
8 | # First Level Heading
9 |
10 | - list item
11 | - `code_block` - with text explanation
12 |
13 | Content here...
14 | ```
15 |
16 | ## Output:
17 |
18 | ```html
19 |
20 | »
26 |
27 | First Level Heading
28 |
29 |
30 |
31 | - list item
32 | -
33 |
39 |
code_block
40 |
41 | - with text explanation
42 |
43 |
34 |
44 |
45 | Content here...
46 | ```
47 |
48 | Since the `__target` element carries the `id` rather than the headline, it can be positioned independently to pad the headline off the top of the page if necessary, which is the case any time we use a "sticky" navigation. Also worth noting is that the `__target` and `__permalink` elements carry a category identifier after their classname, `h` for "heading" and `lic` for "list inline code", in order to make styling super clear and avoid any chance for conflicts.
49 |
50 | ## Anchor Link Aliases
51 |
52 | This plugin also adds the ability to add **anchor link aliases** via markdown directly. Aliases give authors the ability to specify additional anchors that they would like to link to an existing anchor link. Here's an example of how this might look:
53 |
54 | ```md
55 | # Headline ((#alias, #alias-2))
56 |
57 | - `code_block` ((#alias-3)) further text, etc
58 | ```
59 |
60 | This markup would ensure that `#alias` and `#alias-2` also link to `#headline`, and that `#alias-3` also links to `#code_block`. Any number of aliases can be specified as long as they are in this exact format - for a single alias `((#slug))`, or for multiple, `((#slug, #slug2, #slug3))` etc. Anything following a headline or initial inline code element within a list item will be used as aliases and removed from the output. If you are using this syntax and you still see it in the output, this typically means there was an error in the syntax used.
61 |
62 | This feature is intended to be used **very sparingly**. It is a nonstandard markdown feature which we do our best to avoid as an organization. Let's walk through a couple situations where this syntax could be used and examine when it's appropriate.
63 |
64 | - You have written a headline, and would like to add a custom "vanity" permalink, to ensure that it's short and memorable.
65 |
66 | 🚫 **This is not an appropriate use on an anchor link alias.** As a custom, nonstandard markdown feature, we need to use this functionality sparingly, only when it is essential. This scenario does not qualify as essential.
67 |
68 | - You are changing an existing headline that is linked to internally, which you know will change its permalink slug. It's quicker and easier to add an alias than to find-and-replace all the internal links to the anchor.
69 |
70 | 🚫 **This is not an appropriate use of an anchor link alias.** Any time a headline changes, internal links to its permalink should be manually updated to its new permalink using find-and-replace.
71 |
72 | - You are changing an existing headline, and there are many external links to this headline which we are unable to fix.
73 |
74 | ✅ **This is the only appropriate scenario to be using anchor link aliases.** We track statistics on all anchor links via web analytics - if a headline's text must be changed, ask your manager and/or the digital dev team to check the analytics dashboard and see if there is significant externally-driven traffic to its permalink. If so, an anchor link alias should be used to avoid breaking users' expectations.
75 |
76 | ## Options
77 |
78 | - `compatibilitySlug` _(function, optional)_ - if present, will generate an slug using a custom slug creation algorithm and add it as an additional `__target` element. Accepts a function with the following signature `fn(text: string)`. The `text` argument is the headline/inline code text, if the `compatibilitySlug` function generates an idential slug as the default, it will not be added at all.
79 |
80 | > **NOTE:** Be conscious of duplicate tracking with your compatibility function. If it needs to keep track of existing slugs on the page to avoid duplicates, it must implement that functionality on its own. Default slugs are not exposed to the `compatibilitySlug` function because this offers a footgun that can easily break compatibility. The `compatibilitySlug` function should operate entirely in its own sphere -- if it happens to generate a duplicate slug, the plugin itself will remove it as compatibility isn't necessary in that instance.
81 |
82 | - `headings` _(array, optional)_ - if present, data about the headings being processed will be pushed to the array. Each element is an object with the following properties:
83 |
84 | - `aliases`: a string array containing all of the given [anchor link aliases](#anchor-link-aliases) for a heading
85 | - `level`: the level of a heading (e.g. an `` has a level of 1 and an `` has a level of 2)
86 | - `permalinkSlug`: the slug used in the permalink element
87 | - `slug`: the slug generated from a heading's text
88 | - `title`: the content of a heading in plain text (excluding aliases)
89 |
90 | - `listWithInlineCodePrefix` _(string, optional)_ - if present, will append a string to the beginning of each instance where lists with inline code at the beginning get an anchor link. This is also provided for compatibility reasons, as we previously used a separate plugin for lists with inline code that appended an `inlinecode` prefix to avoid conflicts.
91 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/00-nested-headings/tutorial-terraform-aks.mdx:
--------------------------------------------------------------------------------
1 |
8 |
9 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure.
10 |
11 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard.
12 |
13 | ~> **Warning!** If you're not using an account that qualifies under the Azure
14 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these
15 | examples. The most you should be charged should only be a few dollars, but
16 | we're not responsible for any charges that may incur.
17 |
18 | ### Why deploy with Terraform?
19 |
20 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits:
21 |
22 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform.
23 |
24 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources.
25 |
26 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create.
27 |
28 | ## Prerequisites
29 |
30 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does
31 | not assume any pre-existing deployment.
32 |
33 | It also assumes that you are familiar with the usual Terraform plan/apply
34 | workflow. If you're new to Terraform itself, refer first to the Getting Started
35 | [tutorial](/terraform/tutorials/azure-get-started).
36 |
37 | For this tutorial, you will need
38 |
39 | - an [Azure account](https://portal.azure.com/#home)
40 | - a configured Azure CLI
41 | - `kubectl`
42 |
43 |
44 |
45 |
46 | In order for Terraform to run operations on your behalf, you must install and
47 | configure the Azure CLI tool. To install the Azure CLI, follow
48 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system.
49 |
50 |
51 |
52 |
53 |
54 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI.
55 |
56 | ```shell-session
57 | $ brew install azure-cli
58 | ```
59 |
60 |
61 |
62 |
63 |
64 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI.
65 |
66 | ```shell-session
67 | $ choco install azure-cli
68 | ```
69 |
70 |
71 |
72 |
73 |
74 | After you've installed the Azure CLI, login into Azure by running:
75 |
76 | ```shell-session
77 | $ az login
78 | ```
79 |
80 |
81 |
82 |
83 |
84 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system.
85 |
86 |
87 |
88 |
89 |
90 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`.
91 |
92 | ```shell-session
93 | $ brew install kubernetes-cli
94 | ```
95 |
96 |
97 |
98 |
99 |
100 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`.
101 |
102 | ```shell-session
103 | $ choco install kubernetes-cli
104 | ```
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 | ## Set up and initialize your Terraform workspace
113 |
114 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster).
115 | It contains the example configuration used in this tutorial.
116 |
117 | ```shell-session
118 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster
119 | ```
120 |
121 | You can explore this repository by changing directories or navigating in your UI.
122 |
123 | ```shell-session
124 | $ cd learn-terraform-provision-aks-cluster
125 | ```
126 |
127 | In here, you will find three files used to provision the AKS cluster.
128 |
129 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a
130 | resource group and an AKS cluster. The `default_node_pool` defines the
131 | number of VMs and the VM type the cluster uses.
132 |
133 | ```hcl
134 | resource "azurerm_kubernetes_cluster" "default" {
135 | name = "${random_pet.prefix.id}-aks"
136 | location = azurerm_resource_group.default.location
137 | resource_group_name = azurerm_resource_group.default.name
138 | dns_prefix = "${random_pet.prefix.id}-k8s"
139 |
140 | default_node_pool {
141 | name = "default"
142 | node_count = 2
143 | vm_size = "Standard_B2s"
144 | os_disk_size_gb = 30
145 | }
146 |
147 | service_principal {
148 | client_id = var.appId
149 | client_secret = var.password
150 | }
151 |
152 | role_based_access_control_enabled = true
153 |
154 | tags = {
155 | environment = "Demo"
156 | }
157 | }
158 | ```
159 |
160 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration
161 |
162 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure
163 |
164 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster
165 |
166 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block
167 |
168 | ### Create an Active Directory service principal account
169 |
170 | There are many ways to authenticate to the Azure provider. In this tutorial, you
171 | will use an Active Directory service principal account. You can learn how to
172 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure).
173 |
174 | First, you need to create an Active Directory service principal account using
175 | the Azure CLI. You should see something like the following.
176 |
177 | ```shell-session
178 | $ az ad sp create-for-rbac --skip-assignment
179 | {
180 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
181 | "displayName": "azure-cli-2019-04-11-00-46-05",
182 | "name": "http://azure-cli-2019-04-11-00-46-05",
183 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
184 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
185 | }
186 | ```
187 |
188 | ### Update your `terraform.tfvars` file
189 |
190 | Replace the values in your `terraform.tfvars` file with your `appId` and
191 | `password`. Terraform will use these values to authenticate to Azure before
192 | provisioning your resources. Your `terraform.tfvars` file should look like the
193 | following.
194 |
195 | ```plaintext
196 | # terraform.tfvars
197 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
198 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
199 | ```
200 |
201 | ### Initialize Terraform
202 |
203 | After you have saved your customized variables file, initialize your Terraform
204 | workspace, which will download the provider and initialize it with the values
205 | provided in your `terraform.tfvars` file.
206 |
207 | ```shell-session
208 | $ terraform init
209 | Initializing the backend...
210 |
211 | Initializing provider plugins...
212 | - Reusing previous version of hashicorp/random from the dependency lock file
213 | - Reusing previous version of hashicorp/azurerm from the dependency lock file
214 | - Installing hashicorp/random v3.0.0...
215 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp)
216 | - Installing hashicorp/azurerm v3.0.2...
217 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp)
218 |
219 | Terraform has been successfully initialized!
220 |
221 | You may now begin working with Terraform. Try running "terraform plan" to see
222 | any changes that are required for your infrastructure. All Terraform commands
223 | should now work.
224 |
225 | If you ever set or change modules or backend configuration for Terraform,
226 | rerun this command to reinitialize your working directory. If you forget, other
227 | commands will detect it and remind you to do so if necessary.
228 | ```
229 |
230 | ## Provision the AKS cluster
231 |
232 | In your initialized directory, run `terraform apply` and review the planned actions.
233 | Your terminal output should indicate the plan is running and what resources will be created.
234 |
235 | ```shell-session
236 | $ terraform apply
237 | An execution plan has been generated and is shown below.
238 | Resource actions are indicated with the following symbols:
239 | + create
240 |
241 | Terraform will perform the following actions:
242 |
243 | ## ...
244 |
245 | Plan: 1 to add, 0 to change, 0 to destroy.
246 |
247 | ## ...
248 | ```
249 |
250 | You can see this terraform apply will provision an Azure resource group and an
251 | AKS cluster. Confirm the apply with a `yes`.
252 |
253 | This process should take approximately 5 minutes. Upon successful application,
254 | your terminal prints the outputs defined in `aks-cluster.tf`.
255 |
256 | ```plaintext hideClipboard
257 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
258 |
259 | Outputs:
260 |
261 | kubernetes_cluster_name = light-eagle-aks
262 | resource_group_name = light-eagle-rg
263 | ```
264 |
265 | ## Configure kubectl
266 |
267 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`.
268 |
269 | Run the following command to retrieve the access credentials for your cluster
270 | and automatically configure `kubectl`.
271 |
272 | ```shell-session
273 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
274 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config
275 | ```
276 |
277 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1)
278 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5)
279 | correspond to the output variables showed after the successful Terraform run.
280 |
281 | ## Access Kubernetes Dashboard
282 |
283 | To verify that your cluster's configuration, visit
284 | the Azure Portal's Kubernetes resource view.
285 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard)
286 | using this view over the default Kubernetes dashboard, since the AKS dashboard
287 | add-on is deprecated for Kubernetes versions 1.19+.
288 |
289 | Run the following command to generate the Azure portal link.
290 |
291 | ```shell-session
292 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
293 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads
294 | ```
295 |
296 | Go to the URL in your preferred browser to view the Kubernetes resource view.
297 |
298 | 
299 |
300 | ## Clean up your workspace
301 |
302 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`,
303 | and visited the Kubernetes dashboard.
304 |
305 | If you'd like to learn how to manage your AKS cluster using the Terraform
306 | Kubernetes Provider, leave your cluster running and continue to the
307 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
308 |
309 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform.
310 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and
311 | managing a Kubernetes cluster resources separate, changes in one repository don't
312 | affect the other. In addition, the modularity makes the configuration more
313 | readable and enables you to scope different permissions to each workspace.
314 |
315 | If not, remember to destroy any resources you create once you are done with this
316 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal.
317 |
318 | ```shell-session
319 | $ terraform destroy
320 | ```
321 |
322 | ## Next steps
323 |
324 | For more information on the AKS resource, visit the
325 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster).
326 |
327 | For steps on how to manage Kubernetes resources your AKS cluster or any other
328 | already created Kubernetes cluster, visit the
329 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
330 |
331 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault
332 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline).
333 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/00-nested-headings/tutorial-terraform-gke.mdx:
--------------------------------------------------------------------------------
1 |
8 |
9 | The Google Kubernetes Engine (GKE) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Google Cloud.
10 |
11 | In this tutorial, you will deploy a 2-node separately managed node pool GKE cluster using Terraform. This GKE cluster will be distributed across multiple zones for high availability.
12 | Then, you will configure `kubectl` using Terraform output to deploy a Kubernetes dashboard on the cluster.
13 |
14 | ~> **Warning!** Google Cloud charges
15 | [about ten cents per hour management fee for each GKE cluster](https://cloud.google.com/kubernetes-engine/pricing), in addition to the cluster's resource costs.
16 | One zonal cluster per billing account is free. As a result, you may be charged
17 | to run these examples. The most you should be charged should only be a few
18 | dollars, but we're not responsible for any charges that may incur.
19 |
20 | -> **Tip:** This example configuration provisions a GKE cluster with 2 nodes so it's under the default `IN_USE_ADDRESSES` quota. This configuration should be used as a learning exercise only — do not run a 2-node cluster in production.
21 |
22 | ### Why deploy with Terraform?
23 |
24 | While you could use the built-in GCP provisioning processes (UI, SDK/CLI) for GKE clusters, Terraform provides you with several benefits:
25 |
26 | - **Unified Workflow** - If you are already deploying infrastructure to Google Cloud with Terraform, your GKE cluster can fit into that workflow. You can also deploy applications into your GKE cluster using Terraform.
27 |
28 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources.
29 |
30 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, if you require a separately managed node pool, Terraform won't attempt to create the node pool if the GKE cluster failed to create.
31 |
32 | ## Prerequisites
33 |
34 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does
35 | not assume any pre-existing deployment.
36 |
37 | It also assumes that you are familiar with the usual Terraform plan/apply
38 | workflow. If you're new to Terraform itself, refer first to the Getting Started
39 | [tutorial](/terraform/tutorials/gcp-get-started).
40 |
41 | For this tutorial, you will need
42 |
43 | - a [GCP account](https://console.cloud.google.com/)
44 | - a configured gcloud SDK
45 | - `kubectl`
46 |
47 |
48 |
49 |
50 |
51 | In order for Terraform to run operations on your behalf, you must install and
52 | configure the `gcloud` SDK tool. To install the `gcloud` SDK, follow
53 | [these instructions](https://cloud.google.com/sdk/docs/quickstarts) or choose a package manager based on your operating system.
54 |
55 |
56 |
57 |
58 |
59 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the gcloud SDK.
60 |
61 | ```shell-session
62 | $ brew install --cask google-cloud-sdk
63 | ```
64 |
65 |
66 |
67 |
68 |
69 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the gcloud SDK.
70 |
71 | ```shell-session
72 | $ choco install gcloudsdk
73 | ```
74 |
75 |
76 |
77 |
78 |
79 | After you've installed the `gcloud` SDK, initialize it by running the following
80 | command.
81 |
82 | ```shell-session
83 | $ gcloud init
84 | ```
85 |
86 | This will authorize the SDK to access GCP using your user account credentials
87 | and add the SDK to your PATH. This steps requires you to login and select the
88 | project you want to work in. Finally, add your account to the Application
89 | Default Credentials (ADC). This will allow Terraform to access these credentials
90 | to provision resources on GCloud.
91 |
92 | ```shell-session
93 | $ gcloud auth application-default login
94 | ```
95 |
96 |
97 |
98 |
99 |
100 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system.
101 |
102 |
103 |
104 |
105 |
106 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`.
107 |
108 | ```shell-session
109 | $ brew install kubernetes-cli
110 | ```
111 |
112 |
113 |
114 |
115 |
116 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`.
117 |
118 | ```shell-session
119 | $ choco install kubernetes-cli
120 | ```
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 | ## Set up and initialize your Terraform workspace
129 |
130 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-gke-cluster).
131 | It contains the example configuration used in this tutorial.
132 |
133 | ```shell-session
134 | $ git clone https://github.com/hashicorp/learn-terraform-provision-gke-cluster
135 | ```
136 |
137 | You can explore this repository by changing directories or navigating in your UI.
138 |
139 | ```shell-session
140 | $ cd learn-terraform-provision-gke-cluster
141 | ```
142 |
143 | In here, you will find four files used to provision a VPC, subnets and a GKE cluster.
144 |
145 | 1. [`vpc.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf) provisions a VPC and subnet. A new VPC
146 | is created for this tutorial so it doesn't impact your existing cloud environment
147 | and resources. This file outputs `region`.
148 |
149 | 1. [`gke.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf) provisions a GKE cluster and a
150 | separately managed node pool (recommended). Separately managed node pools
151 | allows you to customize your Kubernetes cluster profile — this is
152 | useful if some Pods require more resources than others. You can learn more
153 | [here](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools).
154 | The number of nodes in the node pool is defined also defined
155 | [here](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11).
156 |
157 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/terraform.tfvars) is a template for the `project_id` and `region` variables.
158 |
159 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14.
160 |
161 | ### Update your `terraform.tfvars` file
162 |
163 | Replace the values in your `terraform.tfvars` file with your `project_id` and
164 | `region`. Terraform will use these values to target your project when
165 | provisioning your resources. Your `terraform.tfvars` file should look like the
166 | following.
167 |
168 | ```plaintext
169 | # terraform.tfvars
170 | project_id = "REPLACE_ME"
171 | region = "us-central1"
172 | ```
173 |
174 | You can find the project your `gcloud` is configured to with this command.
175 |
176 | ```shell-session
177 | $ gcloud config get-value project
178 | ```
179 |
180 | The region has been defaulted to `us-central1`; you can find a full list of
181 | gcloud regions [here](https://cloud.google.com/compute/docs/regions-zones).
182 |
183 | ### Initialize Terraform workspace
184 |
185 | After you have saved your customized variables file, initialize your Terraform
186 | workspace, which will download the provider and initialize it with the values
187 | provided in your `terraform.tfvars` file.
188 |
189 | ```shell-session
190 | $ terraform init
191 |
192 | Initializing the backend...
193 |
194 | Initializing provider plugins...
195 | - Reusing previous version of hashicorp/google from the dependency lock file
196 | - Installing hashicorp/google v4.27.0...
197 | - Installed hashicorp/google v4.27.0 (signed by HashiCorp)
198 |
199 | Terraform has been successfully initialized!
200 |
201 | You may now begin working with Terraform. Try running "terraform plan" to see
202 | any changes that are required for your infrastructure. All Terraform commands
203 | should now work.
204 |
205 | If you ever set or change modules or backend configuration for Terraform,
206 | rerun this command to reinitialize your working directory. If you forget, other
207 | commands will detect it and remind you to do so if necessary.
208 | ```
209 |
210 | ## Provision the GKE cluster
211 |
212 | -> **NOTE** [Compute Engine API](https://console.developers.google.com/apis/api/compute.googleapis.com/overview)
213 | and [Kubernetes Engine API](https://console.cloud.google.com/apis/api/container.googleapis.com/overview)
214 | are required for `terraform apply` to work on this configuration.
215 | Enable both APIs for your Google Cloud project before continuing.
216 |
217 | In your initialized directory, run `terraform apply` and review the planned actions.
218 | Your terminal output should indicate the plan is running and what resources will be created.
219 |
220 | ```shell-session
221 | $ terraform apply
222 | An execution plan has been generated and is shown below.
223 | Resource actions are indicated with the following symbols:
224 | + create
225 |
226 | Terraform will perform the following actions:
227 |
228 | ## ...
229 |
230 | Plan: 4 to add, 0 to change, 0 to destroy.
231 |
232 | ## ...
233 | ```
234 |
235 | You can see this terraform apply will provision a VPC, subnet, GKE Cluster and a
236 | GKE node pool. Confirm the apply with a `yes`.
237 |
238 | This process should take approximately 10 minutes. Upon successful application,
239 | your terminal prints the outputs defined in `vpc.tf` and `gke.tf`.
240 |
241 | ```plaintext
242 | Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
243 |
244 | Outputs:
245 |
246 | kubernetes_cluster_host = "35.232.196.187"
247 | kubernetes_cluster_name = "dos-terraform-edu-gke"
248 | project_id = "dos-terraform-edu"
249 | region = "us-central1"
250 | ```
251 |
252 | ## Configure kubectl
253 |
254 | Now that you've provisioned your GKE cluster, you need to configure `kubectl`.
255 |
256 | Run the following command to retrieve the access credentials for your cluster
257 | and automatically configure `kubectl`.
258 |
259 | ```shell-session
260 | $ gcloud container clusters get-credentials $(terraform output -raw kubernetes_cluster_name) --region $(terraform output -raw region)
261 | Fetching cluster endpoint and auth data.
262 | kubeconfig entry generated for dos-terraform-edu-gke.
263 | ```
264 |
265 | The
266 | [Kubernetes cluster name](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L63)
267 | and [region](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf#L29)
268 | correspond to the output variables showed after the successful Terraform run.
269 |
270 | ### Troubleshooting
271 |
272 | You may see the following warning message when you try to retrieve your cluster
273 | credentials. This may be because your Kubernetes cluster is still
274 | initializing/updating. If this happens, you can still proceed to the next step.
275 |
276 | ```plaintext
277 | WARNING: cluster dos-terraform-edu-gke is not running. The kubernetes API may not be available.
278 | ```
279 |
280 | ## Deploy and access Kubernetes Dashboard
281 |
282 | To verify your cluster is correctly configured and running, you will deploy the
283 | Kubernetes dashboard and navigate to it in your local browser.
284 |
285 | While you can deploy the Kubernetes dashboard using Terraform, `kubectl` is used in this tutorial so you don't need to configure your Terraform Kubernetes Provider.
286 |
287 | The following command will schedule the resources necessary for the dashboard.
288 |
289 | ```shell-session
290 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
291 |
292 | namespace/kubernetes-dashboard created
293 | serviceaccount/kubernetes-dashboard created
294 | service/kubernetes-dashboard created
295 | secret/kubernetes-dashboard-certs created
296 | secret/kubernetes-dashboard-csrf created
297 | secret/kubernetes-dashboard-key-holder created
298 | configmap/kubernetes-dashboard-settings created
299 | role.rbac.authorization.k8s.io/kubernetes-dashboard created
300 | clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
301 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
302 | clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
303 | deployment.apps/kubernetes-dashboard created
304 | service/dashboard-metrics-scraper created
305 | deployment.apps/dashboard-metrics-scraper created
306 | ```
307 |
308 | Now, create a proxy server that will allow you to navigate to the dashboard
309 | from the browser on your local machine. This will continue running until you stop the process by pressing `CTRL + C`.
310 |
311 | ```shell-session
312 | $ kubectl proxy
313 | ```
314 |
315 | You should be able to access the Kubernetes dashboard [here](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
316 | (`http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`).
317 |
318 | 
319 |
320 | ## Authenticate to Kubernetes Dashboard
321 |
322 | To use the Kubernetes dashboard, you need to create a `ClusterRoleBinding` and
323 | provide an authorization token. This gives the `cluster-admin` permission to
324 | access the `kubernetes-dashboard`.
325 | Authenticating using `kubeconfig` is **not** an option. You can read more about
326 | it in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui).
327 |
328 | In another terminal (do not close the `kubectl proxy` process), create the
329 | `ClusterRoleBinding` resource.
330 |
331 | ```shell-session
332 | $ kubectl apply -f https://raw.githubusercontent.com/hashicorp/learn-terraform-provision-gke-cluster/main/kubernetes-dashboard-admin.rbac.yaml
333 | ```
334 |
335 | Then, generate the authorization token.
336 |
337 | ```shell-session
338 | $ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep service-controller-token | awk '{print $1}')
339 |
340 | Name: service-controller-token-m8m7j
341 | Namespace: kube-system
342 | Labels:
343 | Annotations: kubernetes.io/service-account.name: service-controller
344 | kubernetes.io/service-account.uid: bc99ddad-6be7-11ea-a3c7-42010a800017
345 |
346 | Type: kubernetes.io/service-account-token
347 |
348 | Data
349 | ====
350 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9...
351 | ca.crt: 1119 bytes
352 | namespace: 11 bytes
353 | ```
354 |
355 | Select "Token" on the Dashboard UI then copy and paste the entire token you
356 | receive into the
357 | [dashboard authentication screen](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
358 | to sign in. You are now signed in to the dashboard for your Kubernetes cluster.
359 |
360 | 
361 |
362 | ### (Optional) GKE nodes and node pool
363 |
364 | On the Dashboard UI, click _Nodes_ on the left hand menu.
365 |
366 | Notice there are 6 nodes in your cluster, even though
367 | [`gke_num_nodes` in your `gke.tf` file](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11)
368 | was set to 2. This is because a node pool was provisioned in each of the three zones
369 | within the region to provide high availability.
370 |
371 | ```shell-session
372 | $ gcloud container clusters describe dos-terraform-edu-gke --region us-central1 --format='default(locations)'
373 | locations:
374 | - us-central1-b
375 | - us-central1-f
376 | - us-central1-c
377 | ```
378 |
379 | -> **NOTE** Replace `dos-terraform-edu-gke` with the `kubernetes_cluster_name` value from your Terraform output.
380 |
381 | 
382 |
383 | ## Clean up your workspace
384 |
385 | Congratulations, you have provisioned a GKE cluster with a separated node pool,
386 | configured `kubectl`, and deployed the Kubernetes dashboard.
387 |
388 | If you'd like to learn how to manage your GKE cluster using the Terraform
389 | Kubernetes Provider, leave your cluster running and continue to the
390 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
391 |
392 | ~> **Note:** This directory is **only** used to provision a GKE cluster with Terraform.
393 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and
394 | managing a Kubernetes cluster resources separate, changes in one repository don't
395 | affect the other. In addition, the modularity makes the configuration more
396 | readable and enables you to scope different permissions to each workspace.
397 |
398 | If not, remember to destroy any resources you create once you are done with this
399 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal.
400 |
401 | ```shell-session
402 | $ terraform destroy
403 | ```
404 |
405 | ## Next steps
406 |
407 | For more information on the GKE resource, please visit the
408 | [Google Cloud provider documentation](https://registry.terraform.io/providers/hashicorp/google/3.14.0/docs/resources/container_cluster).
409 |
410 | For steps on how to manage Kubernetes resources your GKE cluster or any other
411 | already created Kubernetes cluster, visit the
412 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
413 |
414 | For a more in-depth Kubernetes example, [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline) (this tutorial is GKE based).
415 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/00-nested-headings/tutorials-nomad-format-output-with-templates.mdx:
--------------------------------------------------------------------------------
1 |
8 |
9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to
10 | several of the Nomad commands that generate output based on the API. This allows
11 | you to filter and customize the output to meet your specific needs.
12 |
13 | The commands that allow for the -t flag are:
14 |
15 | - `nomad acl policy list`
16 | - `nomad acl token list`
17 | - `nomad alloc status`
18 | - `nomad deployment list`
19 | - `nomad deployment status`
20 | - `nomad eval status`
21 | - `nomad job deployments`
22 | - `nomad job history`
23 | - `nomad job inspect`
24 | - `nomad namespace list`
25 | - `nomad node status`
26 | - `nomad plugin status`
27 | - `nomad quota list`
28 | - `nomad volume status`
29 |
30 | This tutorial will teach you how to explore the objects that are returned to
31 | the template engine and how to use template syntax to format the output into
32 | a custom form.
33 |
34 | ## Prerequisites
35 |
36 | This guide assumes the following:
37 |
38 | - Familiarity with Go's text/template syntax. You can learn more about it in the
39 | [Learn Go Template Syntax] tutorial
40 |
41 | - That you are running these commands against a Nomad cluster with an active
42 | workload. You can create a minimal environment using a dev agent, started with
43 | `nomad agent -dev`, then running at least one Nomad job. You can use
44 | `nomad init -short` to create a sample Docker job or provide your own Nomad
45 | job.
46 |
47 | ## Note the shell-specific syntax
48 |
49 | When using the -t flag, you need to correctly handle string literals based on
50 | your shell environment. In a POSIX shell, you can run the following with a
51 | single quote:
52 |
53 | ```shell-session
54 | $ nomad node status -t '{{printf "%#+v" .}}'
55 | ```
56 |
57 | In a Windows shell (for example, PowerShell), use single
58 | quotes but escape the double quotes inside the parameter as follows:
59 |
60 | ```powershell
61 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
62 | ```
63 |
64 | In this tutorial, you can select examples with the proper escaping using the
65 | tabs above the snippets.
66 |
67 | ## Start discovering objects
68 |
69 | The `printf` function and the `"%#+v"` format string are critical tools for you
70 | in exploring an unfamiliar template context.
71 |
72 | Run the following command to output the context being passed to the template
73 | in Go object format.
74 |
75 |
76 |
77 |
78 | ```shell-session
79 | $ nomad node status -t '{{printf "%#+v" .}}'
80 | ```
81 |
82 |
83 |
84 |
85 | ```powershell
86 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
87 | ```
88 |
89 |
90 |
91 |
92 | ```plaintext
93 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)}
94 | ```
95 |
96 | The output indicates that the context consists of a list (`[]`) of pointers
97 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub
98 | object per client node in your cluster's server state.
99 |
100 | You can explore these api.NodeListStub object by using the `range` control over
101 | the list.
102 |
103 |
104 |
105 |
106 | ```shell-session
107 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}'
108 | ```
109 |
110 |
111 |
112 |
113 | ```powershell
114 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}'
115 | ```
116 |
117 |
118 |
119 |
120 | ```plaintext
121 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
122 | ```
123 |
124 | If you have a lot of client nodes in your cluster state, this output will be
125 | unwieldy. In that case, you can use `with` and the index function to get the
126 | first list item.
127 |
128 |
129 |
130 |
131 | ```shell-session
132 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}'
133 | ```
134 |
135 |
136 |
137 |
138 | ```powershell
139 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}'
140 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
141 | ```
142 |
143 |
144 |
145 |
146 | ```plaintext
147 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
148 | ```
149 |
150 | Finally, output `Name` and `Version` for each client in the cluster.
151 |
152 |
153 |
154 |
155 | ```shell-session
156 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}'
157 | ```
158 |
159 |
160 |
161 |
162 | ```powershell
163 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}'
164 | ```
165 |
166 |
167 |
168 |
169 | ```plaintext
170 | nomad-client-2.node.consul: 0.12.0
171 | nomad-client-3.node.consul: 0.12.0
172 | nomad-client-1.node.consul: 0.12.0
173 | ```
174 |
175 | ## Make quiet output
176 |
177 | Suppose you want to create a reduced version of the `nomad job status` output
178 | to show just the running job IDs in your cluster and nothing else.
179 |
180 |
181 |
182 |
183 | ```shell-session
184 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}'
185 | ```
186 |
187 |
188 |
189 |
190 | ```powershell
191 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}'
192 | ```
193 |
194 |
195 |
196 |
197 | Nomad will output the job IDs for every running job in your cluster. For example:
198 |
199 | ```plaintext
200 | fabio
201 | sockshop-carts
202 | sockshop-catalogue
203 | sockshop-frontend
204 | sockshop-infra
205 | sockshop-orders
206 | sockshop-payment
207 | sockshop-shipping
208 | sockshop-user
209 | ```
210 |
211 | ### Challenge yourself
212 |
213 | Allocations have a slightly different shape. How might you create similar output
214 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at
215 | least one allocation running and then use the printf technique from earlier to
216 | explore the values sent into the template.
217 |
218 |
219 |
220 |
221 | Print the context that you are passed from the command using the printf command.
222 |
223 |
224 |
225 |
226 | ```shell-session
227 | $ nomad alloc status -t '{{printf "%#+v" . }}'
228 | ```
229 |
230 |
231 |
232 |
233 | ```powershell
234 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}'
235 | ```
236 |
237 |
238 |
239 |
240 | ```plaintext
241 | []*api.AllocationListStub ...
242 | ```
243 |
244 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to
245 | `AllocationListStub` objects.
246 |
247 | Use `range` to traverse each item in the list.
248 |
249 |
250 |
251 |
252 | ```shell-session
253 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}'
254 | ```
255 |
256 |
257 |
258 |
259 | ```powershell
260 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}'
261 | ```
262 |
263 |
264 |
265 |
266 | ```plaintext
267 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
268 | ```
269 |
270 | If you have a lot of allocations running, this could get unwieldy. In that case,
271 | you can use `with` and the index function to get the first list item.
272 |
273 |
274 |
275 |
276 | ```shell-session
277 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}'
278 | ```
279 |
280 |
281 |
282 |
283 | ```powershell
284 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}'
285 | ```
286 |
287 |
288 |
289 |
290 | ```plaintext
291 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
292 | ```
293 |
294 | The fields on the AllocationListStub object that give insight into the running
295 | state of an allocation are `DesiredStatus` and `ClientStatus`.
296 |
297 | -> **Did you know?** The definition of an [AllocationListStub][] object and
298 | valid values for the DesiredStatus and ClientStatus are located in Nomad's
299 | [api package][]. Take a moment to look at it and see what other information you
300 | might be interested in displaying with templates.
301 |
302 | Update your template to show items with a DesiredStatus of "run" and a client
303 | status of "running" or "pending."
304 |
305 |
306 |
307 |
308 | ```shell-session
309 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}'
310 | ```
311 |
312 |
313 |
314 |
315 | ```powershell
316 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}'
317 | ```
318 |
319 |
320 |
321 |
322 | ```plaintext
323 | 30663b68-4d8a-aada-4ad2-011b1acae3a1
324 | 11b916da-d679-1718-26f3-f6cd499bfdb8
325 | 68bcb157-359f-9293-d091-5a8ef71475ad
326 | ...
327 | ```
328 |
329 | You now have a list of the IDs for all of the allocations running in your Nomad
330 | cluster.
331 |
332 |
333 |
334 |
335 | ## Retrieve a template from file
336 |
337 | Using the command line to write templates becomes challenging
338 | as the template becomes more complex.
339 |
340 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators.
341 |
342 | Consider using some of these techniques
343 | to include the template data into the command.
344 |
345 |
346 |
347 |
348 |
349 | Create a file named running_jobs.tmpl with the following content.
350 |
351 | ```plaintext
352 | {{- /*
353 | Get Running Jobs
354 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"`
355 | */ -}}
356 | {{- range . -}}
357 | {{- if eq .Status "running" -}}
358 | {{- println .Name -}}
359 | {{- end -}}
360 | {{- end -}}
361 | ```
362 |
363 | Now, use a subshell to read the file into a variable
364 |
365 | ```shell-session
366 | $ nomad job inspect -t "$(cat running_jobs.tmpl)"
367 | ```
368 |
369 |
370 |
371 |
372 |
373 | Create a file named running_jobs.tmpl with the following content.
374 |
375 | ```plaintext
376 | {{- /*
377 | Get Running Jobs
378 | Run with:
379 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
380 | */ -}}
381 | {{- range . -}}
382 | {{- if eq .Status \"running\" -}}
383 | {{- println .Name -}}
384 | {{- end -}}
385 | {{- end -}}
386 | ```
387 |
388 | Now, use a subshell to read the file into a variable
389 |
390 | ```powershell
391 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
392 | ```
393 |
394 |
395 |
396 |
397 |
398 | ## Learn more
399 |
400 | In this tutorial, you learned how to:
401 |
402 | - Customize the output of several Nomad commands using Go's text/template
403 | syntax.
404 |
405 | - Use the `printf` function to discover what is available in the template's
406 | context.
407 |
408 | - Use a template definition contained in a file as part of the command.
409 |
410 | Learn more about templating in other tutorials in the Nomad Templating
411 | Collection.
412 |
413 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax
414 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub
415 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api
416 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/01-nested-heading/tutorial-terraform-aks.mdx:
--------------------------------------------------------------------------------
1 |
10 |
11 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure.
12 |
13 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard.
14 |
15 | ~> **Warning!** If you're not using an account that qualifies under the Azure
16 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these
17 | examples. The most you should be charged should only be a few dollars, but
18 | we're not responsible for any charges that may incur.
19 |
20 | ### Why deploy with Terraform?
21 |
22 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits:
23 |
24 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform.
25 |
26 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources.
27 |
28 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create.
29 |
30 | ## Prerequisites
31 |
32 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does
33 | not assume any pre-existing deployment.
34 |
35 | It also assumes that you are familiar with the usual Terraform plan/apply
36 | workflow. If you're new to Terraform itself, refer first to the Getting Started
37 | [tutorial](/terraform/tutorials/azure-get-started).
38 |
39 | For this tutorial, you will need
40 |
41 | - an [Azure account](https://portal.azure.com/#home)
42 | - a configured Azure CLI
43 | - `kubectl`
44 |
45 |
46 |
47 |
48 | In order for Terraform to run operations on your behalf, you must install and
49 | configure the Azure CLI tool. To install the Azure CLI, follow
50 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system.
51 |
52 |
53 |
54 |
55 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2)
56 |
57 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI.
58 |
59 | ```shell-session
60 | $ brew install azure-cli
61 | ```
62 |
63 |
64 |
65 |
66 |
67 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI.
68 |
69 | ```shell-session
70 | $ choco install azure-cli
71 | ```
72 |
73 |
74 |
75 |
76 |
77 | After you've installed the Azure CLI, login into Azure by running:
78 |
79 | ```shell-session
80 | $ az login
81 | ```
82 |
83 |
84 |
85 |
86 |
87 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system.
88 |
89 |
90 |
91 |
92 |
93 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`.
94 |
95 | ```shell-session
96 | $ brew install kubernetes-cli
97 | ```
98 |
99 |
100 |
101 |
102 |
103 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`.
104 |
105 | ```shell-session
106 | $ choco install kubernetes-cli
107 | ```
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 | ## Set up and initialize your Terraform workspace
116 |
117 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster).
118 | It contains the example configuration used in this tutorial.
119 |
120 | ```shell-session
121 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster
122 | ```
123 |
124 | You can explore this repository by changing directories or navigating in your UI.
125 |
126 | ```shell-session
127 | $ cd learn-terraform-provision-aks-cluster
128 | ```
129 |
130 | In here, you will find three files used to provision the AKS cluster.
131 |
132 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a
133 | resource group and an AKS cluster. The `default_node_pool` defines the
134 | number of VMs and the VM type the cluster uses.
135 |
136 | ```hcl
137 | resource "azurerm_kubernetes_cluster" "default" {
138 | name = "${random_pet.prefix.id}-aks"
139 | location = azurerm_resource_group.default.location
140 | resource_group_name = azurerm_resource_group.default.name
141 | dns_prefix = "${random_pet.prefix.id}-k8s"
142 |
143 | default_node_pool {
144 | name = "default"
145 | node_count = 2
146 | vm_size = "Standard_B2s"
147 | os_disk_size_gb = 30
148 | }
149 |
150 | service_principal {
151 | client_id = var.appId
152 | client_secret = var.password
153 | }
154 |
155 | role_based_access_control_enabled = true
156 |
157 | tags = {
158 | environment = "Demo"
159 | }
160 | }
161 | ```
162 |
163 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration
164 |
165 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure
166 |
167 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster
168 |
169 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block
170 |
171 | ### Create an Active Directory service principal account
172 |
173 | There are many ways to authenticate to the Azure provider. In this tutorial, you
174 | will use an Active Directory service principal account. You can learn how to
175 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure).
176 |
177 | First, you need to create an Active Directory service principal account using
178 | the Azure CLI. You should see something like the following.
179 |
180 | ```shell-session
181 | $ az ad sp create-for-rbac --skip-assignment
182 | {
183 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
184 | "displayName": "azure-cli-2019-04-11-00-46-05",
185 | "name": "http://azure-cli-2019-04-11-00-46-05",
186 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
187 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
188 | }
189 | ```
190 |
191 | ### Update your `terraform.tfvars` file
192 |
193 | Replace the values in your `terraform.tfvars` file with your `appId` and
194 | `password`. Terraform will use these values to authenticate to Azure before
195 | provisioning your resources. Your `terraform.tfvars` file should look like the
196 | following.
197 |
198 | ```plaintext
199 | # terraform.tfvars
200 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
201 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
202 | ```
203 |
204 | ### Initialize Terraform
205 |
206 | After you have saved your customized variables file, initialize your Terraform
207 | workspace, which will download the provider and initialize it with the values
208 | provided in your `terraform.tfvars` file.
209 |
210 | ```shell-session
211 | $ terraform init
212 | Initializing the backend...
213 |
214 | Initializing provider plugins...
215 | - Reusing previous version of hashicorp/random from the dependency lock file
216 | - Reusing previous version of hashicorp/azurerm from the dependency lock file
217 | - Installing hashicorp/random v3.0.0...
218 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp)
219 | - Installing hashicorp/azurerm v3.0.2...
220 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp)
221 |
222 | Terraform has been successfully initialized!
223 |
224 | You may now begin working with Terraform. Try running "terraform plan" to see
225 | any changes that are required for your infrastructure. All Terraform commands
226 | should now work.
227 |
228 | If you ever set or change modules or backend configuration for Terraform,
229 | rerun this command to reinitialize your working directory. If you forget, other
230 | commands will detect it and remind you to do so if necessary.
231 | ```
232 |
233 | ## Provision the AKS cluster
234 |
235 | In your initialized directory, run `terraform apply` and review the planned actions.
236 | Your terminal output should indicate the plan is running and what resources will be created.
237 |
238 | ```shell-session
239 | $ terraform apply
240 | An execution plan has been generated and is shown below.
241 | Resource actions are indicated with the following symbols:
242 | + create
243 |
244 | Terraform will perform the following actions:
245 |
246 | ## ...
247 |
248 | Plan: 1 to add, 0 to change, 0 to destroy.
249 |
250 | ## ...
251 | ```
252 |
253 | You can see this terraform apply will provision an Azure resource group and an
254 | AKS cluster. Confirm the apply with a `yes`.
255 |
256 | This process should take approximately 5 minutes. Upon successful application,
257 | your terminal prints the outputs defined in `aks-cluster.tf`.
258 |
259 | ```plaintext hideClipboard
260 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
261 |
262 | Outputs:
263 |
264 | kubernetes_cluster_name = light-eagle-aks
265 | resource_group_name = light-eagle-rg
266 | ```
267 |
268 | ## Configure kubectl
269 |
270 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`.
271 |
272 | Run the following command to retrieve the access credentials for your cluster
273 | and automatically configure `kubectl`.
274 |
275 | ```shell-session
276 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
277 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config
278 | ```
279 |
280 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1)
281 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5)
282 | correspond to the output variables showed after the successful Terraform run.
283 |
284 | ## Access Kubernetes Dashboard
285 |
286 | To verify that your cluster's configuration, visit
287 | the Azure Portal's Kubernetes resource view.
288 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard)
289 | using this view over the default Kubernetes dashboard, since the AKS dashboard
290 | add-on is deprecated for Kubernetes versions 1.19+.
291 |
292 | Run the following command to generate the Azure portal link.
293 |
294 | ```shell-session
295 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
296 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads
297 | ```
298 |
299 | Go to the URL in your preferred browser to view the Kubernetes resource view.
300 |
301 | 
302 |
303 | ## Clean up your workspace
304 |
305 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`,
306 | and visited the Kubernetes dashboard.
307 |
308 | If you'd like to learn how to manage your AKS cluster using the Terraform
309 | Kubernetes Provider, leave your cluster running and continue to the
310 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
311 |
312 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform.
313 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and
314 | managing a Kubernetes cluster resources separate, changes in one repository don't
315 | affect the other. In addition, the modularity makes the configuration more
316 | readable and enables you to scope different permissions to each workspace.
317 |
318 | If not, remember to destroy any resources you create once you are done with this
319 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal.
320 |
321 | ```shell-session
322 | $ terraform destroy
323 | ```
324 |
325 | ## Next steps
326 |
327 | For more information on the AKS resource, visit the
328 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster).
329 |
330 | For steps on how to manage Kubernetes resources your AKS cluster or any other
331 | already created Kubernetes cluster, visit the
332 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
333 |
334 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault
335 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline).
336 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/01-nested-heading/tutorial-terraform-gke.mdx:
--------------------------------------------------------------------------------
1 |
10 |
11 | The Google Kubernetes Engine (GKE) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Google Cloud.
12 |
13 | In this tutorial, you will deploy a 2-node separately managed node pool GKE cluster using Terraform. This GKE cluster will be distributed across multiple zones for high availability.
14 | Then, you will configure `kubectl` using Terraform output to deploy a Kubernetes dashboard on the cluster.
15 |
16 | ~> **Warning!** Google Cloud charges
17 | [about ten cents per hour management fee for each GKE cluster](https://cloud.google.com/kubernetes-engine/pricing), in addition to the cluster's resource costs.
18 | One zonal cluster per billing account is free. As a result, you may be charged
19 | to run these examples. The most you should be charged should only be a few
20 | dollars, but we're not responsible for any charges that may incur.
21 |
22 | -> **Tip:** This example configuration provisions a GKE cluster with 2 nodes so it's under the default `IN_USE_ADDRESSES` quota. This configuration should be used as a learning exercise only — do not run a 2-node cluster in production.
23 |
24 | ### Why deploy with Terraform?
25 |
26 | While you could use the built-in GCP provisioning processes (UI, SDK/CLI) for GKE clusters, Terraform provides you with several benefits:
27 |
28 | - **Unified Workflow** - If you are already deploying infrastructure to Google Cloud with Terraform, your GKE cluster can fit into that workflow. You can also deploy applications into your GKE cluster using Terraform.
29 |
30 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources.
31 |
32 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, if you require a separately managed node pool, Terraform won't attempt to create the node pool if the GKE cluster failed to create.
33 |
34 | ## Prerequisites
35 |
36 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does
37 | not assume any pre-existing deployment.
38 |
39 | It also assumes that you are familiar with the usual Terraform plan/apply
40 | workflow. If you're new to Terraform itself, refer first to the Getting Started
41 | [tutorial](/terraform/tutorials/gcp-get-started).
42 |
43 | For this tutorial, you will need
44 |
45 | - a [GCP account](https://console.cloud.google.com/)
46 | - a configured gcloud SDK
47 | - `kubectl`
48 |
49 |
50 |
51 |
52 |
53 | In order for Terraform to run operations on your behalf, you must install and
54 | configure the `gcloud` SDK tool. To install the `gcloud` SDK, follow
55 | [these instructions](https://cloud.google.com/sdk/docs/quickstarts) or choose a package manager based on your operating system.
56 |
57 |
58 |
59 |
60 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2)
61 |
62 |
63 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the gcloud SDK.
64 |
65 | ```shell-session
66 | $ brew install --cask google-cloud-sdk
67 | ```
68 |
69 |
70 |
71 |
72 |
73 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the gcloud SDK.
74 |
75 | ```shell-session
76 | $ choco install gcloudsdk
77 | ```
78 |
79 |
80 |
81 |
82 |
83 | After you've installed the `gcloud` SDK, initialize it by running the following
84 | command.
85 |
86 | ```shell-session
87 | $ gcloud init
88 | ```
89 |
90 | This will authorize the SDK to access GCP using your user account credentials
91 | and add the SDK to your PATH. This steps requires you to login and select the
92 | project you want to work in. Finally, add your account to the Application
93 | Default Credentials (ADC). This will allow Terraform to access these credentials
94 | to provision resources on GCloud.
95 |
96 | ```shell-session
97 | $ gcloud auth application-default login
98 | ```
99 |
100 |
101 |
102 |
103 |
104 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system.
105 |
106 |
107 |
108 |
109 |
110 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`.
111 |
112 | ```shell-session
113 | $ brew install kubernetes-cli
114 | ```
115 |
116 |
117 |
118 |
119 |
120 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`.
121 |
122 | ```shell-session
123 | $ choco install kubernetes-cli
124 | ```
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 | ## Set up and initialize your Terraform workspace
133 |
134 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-gke-cluster).
135 | It contains the example configuration used in this tutorial.
136 |
137 | ```shell-session
138 | $ git clone https://github.com/hashicorp/learn-terraform-provision-gke-cluster
139 | ```
140 |
141 | You can explore this repository by changing directories or navigating in your UI.
142 |
143 | ```shell-session
144 | $ cd learn-terraform-provision-gke-cluster
145 | ```
146 |
147 | In here, you will find four files used to provision a VPC, subnets and a GKE cluster.
148 |
149 | 1. [`vpc.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf) provisions a VPC and subnet. A new VPC
150 | is created for this tutorial so it doesn't impact your existing cloud environment
151 | and resources. This file outputs `region`.
152 |
153 | 1. [`gke.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf) provisions a GKE cluster and a
154 | separately managed node pool (recommended). Separately managed node pools
155 | allows you to customize your Kubernetes cluster profile — this is
156 | useful if some Pods require more resources than others. You can learn more
157 | [here](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools).
158 | The number of nodes in the node pool is defined also defined
159 | [here](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11).
160 |
161 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/terraform.tfvars) is a template for the `project_id` and `region` variables.
162 |
163 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14.
164 |
165 | ### Update your `terraform.tfvars` file
166 |
167 | Replace the values in your `terraform.tfvars` file with your `project_id` and
168 | `region`. Terraform will use these values to target your project when
169 | provisioning your resources. Your `terraform.tfvars` file should look like the
170 | following.
171 |
172 | ```plaintext
173 | # terraform.tfvars
174 | project_id = "REPLACE_ME"
175 | region = "us-central1"
176 | ```
177 |
178 | You can find the project your `gcloud` is configured to with this command.
179 |
180 | ```shell-session
181 | $ gcloud config get-value project
182 | ```
183 |
184 | The region has been defaulted to `us-central1`; you can find a full list of
185 | gcloud regions [here](https://cloud.google.com/compute/docs/regions-zones).
186 |
187 | ### Initialize Terraform workspace
188 |
189 | After you have saved your customized variables file, initialize your Terraform
190 | workspace, which will download the provider and initialize it with the values
191 | provided in your `terraform.tfvars` file.
192 |
193 | ```shell-session
194 | $ terraform init
195 |
196 | Initializing the backend...
197 |
198 | Initializing provider plugins...
199 | - Reusing previous version of hashicorp/google from the dependency lock file
200 | - Installing hashicorp/google v4.27.0...
201 | - Installed hashicorp/google v4.27.0 (signed by HashiCorp)
202 |
203 | Terraform has been successfully initialized!
204 |
205 | You may now begin working with Terraform. Try running "terraform plan" to see
206 | any changes that are required for your infrastructure. All Terraform commands
207 | should now work.
208 |
209 | If you ever set or change modules or backend configuration for Terraform,
210 | rerun this command to reinitialize your working directory. If you forget, other
211 | commands will detect it and remind you to do so if necessary.
212 | ```
213 |
214 | ## Provision the GKE cluster
215 |
216 | -> **NOTE** [Compute Engine API](https://console.developers.google.com/apis/api/compute.googleapis.com/overview)
217 | and [Kubernetes Engine API](https://console.cloud.google.com/apis/api/container.googleapis.com/overview)
218 | are required for `terraform apply` to work on this configuration.
219 | Enable both APIs for your Google Cloud project before continuing.
220 |
221 | In your initialized directory, run `terraform apply` and review the planned actions.
222 | Your terminal output should indicate the plan is running and what resources will be created.
223 |
224 | ```shell-session
225 | $ terraform apply
226 | An execution plan has been generated and is shown below.
227 | Resource actions are indicated with the following symbols:
228 | + create
229 |
230 | Terraform will perform the following actions:
231 |
232 | ## ...
233 |
234 | Plan: 4 to add, 0 to change, 0 to destroy.
235 |
236 | ## ...
237 | ```
238 |
239 | You can see this terraform apply will provision a VPC, subnet, GKE Cluster and a
240 | GKE node pool. Confirm the apply with a `yes`.
241 |
242 | This process should take approximately 10 minutes. Upon successful application,
243 | your terminal prints the outputs defined in `vpc.tf` and `gke.tf`.
244 |
245 | ```plaintext
246 | Apply complete! Resources: 4 added, 0 changed, 0 destroyed.
247 |
248 | Outputs:
249 |
250 | kubernetes_cluster_host = "35.232.196.187"
251 | kubernetes_cluster_name = "dos-terraform-edu-gke"
252 | project_id = "dos-terraform-edu"
253 | region = "us-central1"
254 | ```
255 |
256 | ## Configure kubectl
257 |
258 | Now that you've provisioned your GKE cluster, you need to configure `kubectl`.
259 |
260 | Run the following command to retrieve the access credentials for your cluster
261 | and automatically configure `kubectl`.
262 |
263 | ```shell-session
264 | $ gcloud container clusters get-credentials $(terraform output -raw kubernetes_cluster_name) --region $(terraform output -raw region)
265 | Fetching cluster endpoint and auth data.
266 | kubeconfig entry generated for dos-terraform-edu-gke.
267 | ```
268 |
269 | The
270 | [Kubernetes cluster name](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L63)
271 | and [region](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/vpc.tf#L29)
272 | correspond to the output variables showed after the successful Terraform run.
273 |
274 | ### Troubleshooting
275 |
276 | You may see the following warning message when you try to retrieve your cluster
277 | credentials. This may be because your Kubernetes cluster is still
278 | initializing/updating. If this happens, you can still proceed to the next step.
279 |
280 | ```plaintext
281 | WARNING: cluster dos-terraform-edu-gke is not running. The kubernetes API may not be available.
282 | ```
283 |
284 | ## Deploy and access Kubernetes Dashboard
285 |
286 | To verify your cluster is correctly configured and running, you will deploy the
287 | Kubernetes dashboard and navigate to it in your local browser.
288 |
289 | While you can deploy the Kubernetes dashboard using Terraform, `kubectl` is used in this tutorial so you don't need to configure your Terraform Kubernetes Provider.
290 |
291 | The following command will schedule the resources necessary for the dashboard.
292 |
293 | ```shell-session
294 | $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
295 |
296 | namespace/kubernetes-dashboard created
297 | serviceaccount/kubernetes-dashboard created
298 | service/kubernetes-dashboard created
299 | secret/kubernetes-dashboard-certs created
300 | secret/kubernetes-dashboard-csrf created
301 | secret/kubernetes-dashboard-key-holder created
302 | configmap/kubernetes-dashboard-settings created
303 | role.rbac.authorization.k8s.io/kubernetes-dashboard created
304 | clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
305 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
306 | clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
307 | deployment.apps/kubernetes-dashboard created
308 | service/dashboard-metrics-scraper created
309 | deployment.apps/dashboard-metrics-scraper created
310 | ```
311 |
312 | Now, create a proxy server that will allow you to navigate to the dashboard
313 | from the browser on your local machine. This will continue running until you stop the process by pressing `CTRL + C`.
314 |
315 | ```shell-session
316 | $ kubectl proxy
317 | ```
318 |
319 | You should be able to access the Kubernetes dashboard [here](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
320 | (`http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/`).
321 |
322 | 
323 |
324 | ## Authenticate to Kubernetes Dashboard
325 |
326 | To use the Kubernetes dashboard, you need to create a `ClusterRoleBinding` and
327 | provide an authorization token. This gives the `cluster-admin` permission to
328 | access the `kubernetes-dashboard`.
329 | Authenticating using `kubeconfig` is **not** an option. You can read more about
330 | it in the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/#accessing-the-dashboard-ui).
331 |
332 | In another terminal (do not close the `kubectl proxy` process), create the
333 | `ClusterRoleBinding` resource.
334 |
335 | ```shell-session
336 | $ kubectl apply -f https://raw.githubusercontent.com/hashicorp/learn-terraform-provision-gke-cluster/main/kubernetes-dashboard-admin.rbac.yaml
337 | ```
338 |
339 | Then, generate the authorization token.
340 |
341 | ```shell-session
342 | $ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep service-controller-token | awk '{print $1}')
343 |
344 | Name: service-controller-token-m8m7j
345 | Namespace: kube-system
346 | Labels:
347 | Annotations: kubernetes.io/service-account.name: service-controller
348 | kubernetes.io/service-account.uid: bc99ddad-6be7-11ea-a3c7-42010a800017
349 |
350 | Type: kubernetes.io/service-account-token
351 |
352 | Data
353 | ====
354 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9...
355 | ca.crt: 1119 bytes
356 | namespace: 11 bytes
357 | ```
358 |
359 | Select "Token" on the Dashboard UI then copy and paste the entire token you
360 | receive into the
361 | [dashboard authentication screen](http://127.0.0.1:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/)
362 | to sign in. You are now signed in to the dashboard for your Kubernetes cluster.
363 |
364 | 
365 |
366 | ### (Optional) GKE nodes and node pool
367 |
368 | On the Dashboard UI, click _Nodes_ on the left hand menu.
369 |
370 | Notice there are 6 nodes in your cluster, even though
371 | [`gke_num_nodes` in your `gke.tf` file](https://github.com/hashicorp/learn-terraform-provision-gke-cluster/blob/main/gke.tf#L11)
372 | was set to 2. This is because a node pool was provisioned in each of the three zones
373 | within the region to provide high availability.
374 |
375 | ```shell-session
376 | $ gcloud container clusters describe dos-terraform-edu-gke --region us-central1 --format='default(locations)'
377 | locations:
378 | - us-central1-b
379 | - us-central1-f
380 | - us-central1-c
381 | ```
382 |
383 | -> **NOTE** Replace `dos-terraform-edu-gke` with the `kubernetes_cluster_name` value from your Terraform output.
384 |
385 | 
386 |
387 | ## Clean up your workspace
388 |
389 | Congratulations, you have provisioned a GKE cluster with a separated node pool,
390 | configured `kubectl`, and deployed the Kubernetes dashboard.
391 |
392 | If you'd like to learn how to manage your GKE cluster using the Terraform
393 | Kubernetes Provider, leave your cluster running and continue to the
394 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
395 |
396 | ~> **Note:** This directory is **only** used to provision a GKE cluster with Terraform.
397 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and
398 | managing a Kubernetes cluster resources separate, changes in one repository don't
399 | affect the other. In addition, the modularity makes the configuration more
400 | readable and enables you to scope different permissions to each workspace.
401 |
402 | If not, remember to destroy any resources you create once you are done with this
403 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal.
404 |
405 | ```shell-session
406 | $ terraform destroy
407 | ```
408 |
409 | ## Next steps
410 |
411 | For more information on the GKE resource, please visit the
412 | [Google Cloud provider documentation](https://registry.terraform.io/providers/hashicorp/google/3.14.0/docs/resources/container_cluster).
413 |
414 | For steps on how to manage Kubernetes resources your GKE cluster or any other
415 | already created Kubernetes cluster, visit the
416 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
417 |
418 | For a more in-depth Kubernetes example, [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline) (this tutorial is GKE based).
419 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/01-nested-heading/tutorials-nomad-format-output-with-templates.mdx:
--------------------------------------------------------------------------------
1 |
8 |
9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to
10 | several of the Nomad commands that generate output based on the API. This allows
11 | you to filter and customize the output to meet your specific needs.
12 |
13 | The commands that allow for the -t flag are:
14 |
15 | - `nomad acl policy list`
16 | - `nomad acl token list`
17 | - `nomad alloc status`
18 | - `nomad deployment list`
19 | - `nomad deployment status`
20 | - `nomad eval status`
21 | - `nomad job deployments`
22 | - `nomad job history`
23 | - `nomad job inspect`
24 | - `nomad namespace list`
25 | - `nomad node status`
26 | - `nomad plugin status`
27 | - `nomad quota list`
28 | - `nomad volume status`
29 |
30 | This tutorial will teach you how to explore the objects that are returned to
31 | the template engine and how to use template syntax to format the output into
32 | a custom form.
33 |
34 | ## Prerequisites
35 |
36 | This guide assumes the following:
37 |
38 | - Familiarity with Go's text/template syntax. You can learn more about it in the
39 | [Learn Go Template Syntax] tutorial
40 |
41 | - That you are running these commands against a Nomad cluster with an active
42 | workload. You can create a minimal environment using a dev agent, started with
43 | `nomad agent -dev`, then running at least one Nomad job. You can use
44 | `nomad init -short` to create a sample Docker job or provide your own Nomad
45 | job.
46 |
47 | ## Note the shell-specific syntax
48 |
49 | When using the -t flag, you need to correctly handle string literals based on
50 | your shell environment. In a POSIX shell, you can run the following with a
51 | single quote:
52 |
53 | ```shell-session
54 | $ nomad node status -t '{{printf "%#+v" .}}'
55 | ```
56 |
57 | In a Windows shell (for example, PowerShell), use single
58 | quotes but escape the double quotes inside the parameter as follows:
59 |
60 | ```powershell
61 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
62 | ```
63 |
64 | In this tutorial, you can select examples with the proper escaping using the
65 | tabs above the snippets.
66 |
67 | ## Start discovering objects
68 |
69 | The `printf` function and the `"%#+v"` format string are critical tools for you
70 | in exploring an unfamiliar template context.
71 |
72 | Run the following command to output the context being passed to the template
73 | in Go object format.
74 |
75 |
76 |
77 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1)
78 |
79 | ```shell-session
80 | $ nomad node status -t '{{printf "%#+v" .}}'
81 | ```
82 |
83 |
84 |
85 |
86 | ```powershell
87 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
88 | ```
89 |
90 |
91 |
92 |
93 | ```plaintext
94 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)}
95 | ```
96 |
97 | The output indicates that the context consists of a list (`[]`) of pointers
98 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub
99 | object per client node in your cluster's server state.
100 |
101 | You can explore these api.NodeListStub object by using the `range` control over
102 | the list.
103 |
104 |
105 |
106 |
107 | ```shell-session
108 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}'
109 | ```
110 |
111 |
112 |
113 |
114 | ```powershell
115 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}'
116 | ```
117 |
118 |
119 |
120 |
121 | ```plaintext
122 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
123 | ```
124 |
125 | If you have a lot of client nodes in your cluster state, this output will be
126 | unwieldy. In that case, you can use `with` and the index function to get the
127 | first list item.
128 |
129 |
130 |
131 |
132 | ```shell-session
133 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}'
134 | ```
135 |
136 |
137 |
138 |
139 | ```powershell
140 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}'
141 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
142 | ```
143 |
144 |
145 |
146 |
147 | ```plaintext
148 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
149 | ```
150 |
151 | Finally, output `Name` and `Version` for each client in the cluster.
152 |
153 |
154 |
155 |
156 | ```shell-session
157 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}'
158 | ```
159 |
160 |
161 |
162 |
163 | ```powershell
164 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}'
165 | ```
166 |
167 |
168 |
169 |
170 | ```plaintext
171 | nomad-client-2.node.consul: 0.12.0
172 | nomad-client-3.node.consul: 0.12.0
173 | nomad-client-1.node.consul: 0.12.0
174 | ```
175 |
176 | ## Make quiet output
177 |
178 | Suppose you want to create a reduced version of the `nomad job status` output
179 | to show just the running job IDs in your cluster and nothing else.
180 |
181 |
182 |
183 |
184 | ```shell-session
185 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}'
186 | ```
187 |
188 |
189 |
190 |
191 | ```powershell
192 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}'
193 | ```
194 |
195 |
196 |
197 |
198 | Nomad will output the job IDs for every running job in your cluster. For example:
199 |
200 | ```plaintext
201 | fabio
202 | sockshop-carts
203 | sockshop-catalogue
204 | sockshop-frontend
205 | sockshop-infra
206 | sockshop-orders
207 | sockshop-payment
208 | sockshop-shipping
209 | sockshop-user
210 | ```
211 |
212 | ### Challenge yourself
213 |
214 | Allocations have a slightly different shape. How might you create similar output
215 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at
216 | least one allocation running and then use the printf technique from earlier to
217 | explore the values sent into the template.
218 |
219 |
220 |
221 |
222 | Print the context that you are passed from the command using the printf command.
223 |
224 |
225 |
226 |
227 | ```shell-session
228 | $ nomad alloc status -t '{{printf "%#+v" . }}'
229 | ```
230 |
231 |
232 |
233 |
234 | ```powershell
235 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}'
236 | ```
237 |
238 |
239 |
240 |
241 | ```plaintext
242 | []*api.AllocationListStub ...
243 | ```
244 |
245 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to
246 | `AllocationListStub` objects.
247 |
248 | Use `range` to traverse each item in the list.
249 |
250 |
251 |
252 |
253 | ```shell-session
254 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}'
255 | ```
256 |
257 |
258 |
259 |
260 | ```powershell
261 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}'
262 | ```
263 |
264 |
265 |
266 |
267 | ```plaintext
268 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
269 | ```
270 |
271 | If you have a lot of allocations running, this could get unwieldy. In that case,
272 | you can use `with` and the index function to get the first list item.
273 |
274 |
275 |
276 |
277 | ```shell-session
278 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}'
279 | ```
280 |
281 |
282 |
283 |
284 | ```powershell
285 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}'
286 | ```
287 |
288 |
289 |
290 |
291 | ```plaintext
292 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
293 | ```
294 |
295 | The fields on the AllocationListStub object that give insight into the running
296 | state of an allocation are `DesiredStatus` and `ClientStatus`.
297 |
298 | -> **Did you know?** The definition of an [AllocationListStub][] object and
299 | valid values for the DesiredStatus and ClientStatus are located in Nomad's
300 | [api package][]. Take a moment to look at it and see what other information you
301 | might be interested in displaying with templates.
302 |
303 | Update your template to show items with a DesiredStatus of "run" and a client
304 | status of "running" or "pending."
305 |
306 |
307 |
308 |
309 | ```shell-session
310 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}'
311 | ```
312 |
313 |
314 |
315 |
316 | ```powershell
317 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}'
318 | ```
319 |
320 |
321 |
322 |
323 | ```plaintext
324 | 30663b68-4d8a-aada-4ad2-011b1acae3a1
325 | 11b916da-d679-1718-26f3-f6cd499bfdb8
326 | 68bcb157-359f-9293-d091-5a8ef71475ad
327 | ...
328 | ```
329 |
330 | You now have a list of the IDs for all of the allocations running in your Nomad
331 | cluster.
332 |
333 |
334 |
335 |
336 | ## Retrieve a template from file
337 |
338 | Using the command line to write templates becomes challenging
339 | as the template becomes more complex.
340 |
341 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators.
342 |
343 | Consider using some of these techniques
344 | to include the template data into the command.
345 |
346 |
347 |
348 |
349 |
350 | Create a file named running_jobs.tmpl with the following content.
351 |
352 | ```plaintext
353 | {{- /*
354 | Get Running Jobs
355 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"`
356 | */ -}}
357 | {{- range . -}}
358 | {{- if eq .Status "running" -}}
359 | {{- println .Name -}}
360 | {{- end -}}
361 | {{- end -}}
362 | ```
363 |
364 | Now, use a subshell to read the file into a variable
365 |
366 | ```shell-session
367 | $ nomad job inspect -t "$(cat running_jobs.tmpl)"
368 | ```
369 |
370 |
371 |
372 |
373 |
374 | Create a file named running_jobs.tmpl with the following content.
375 |
376 | ```plaintext
377 | {{- /*
378 | Get Running Jobs
379 | Run with:
380 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
381 | */ -}}
382 | {{- range . -}}
383 | {{- if eq .Status \"running\" -}}
384 | {{- println .Name -}}
385 | {{- end -}}
386 | {{- end -}}
387 | ```
388 |
389 | Now, use a subshell to read the file into a variable
390 |
391 | ```powershell
392 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
393 | ```
394 |
395 |
396 |
397 |
398 |
399 | ## Learn more
400 |
401 | In this tutorial, you learned how to:
402 |
403 | - Customize the output of several Nomad commands using Go's text/template
404 | syntax.
405 |
406 | - Use the `printf` function to discover what is available in the template's
407 | context.
408 |
409 | - Use a template definition contained in a file as part of the command.
410 |
411 | Learn more about templating in other tutorials in the Nomad Templating
412 | Collection.
413 |
414 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax
415 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub
416 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api
417 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/02-nested-headings/tutorial-terraform-aks.mdx:
--------------------------------------------------------------------------------
1 |
10 |
11 | The Azure Kubernetes Service (AKS) is a fully managed Kubernetes service for deploying, managing, and scaling containerized applications on Azure.
12 |
13 | In this tutorial, you will deploy a 2 node AKS cluster on your default VPC using Terraform then access its Kubernetes dashboard.
14 |
15 | ~> **Warning!** If you're not using an account that qualifies under the Azure
16 | [free tier](https://azure.microsoft.com/en-us/free/), you may be charged to run these
17 | examples. The most you should be charged should only be a few dollars, but
18 | we're not responsible for any charges that may incur.
19 |
20 | ### Why deploy with Terraform?
21 |
22 | While you could use the built-in Azure provisioning processes (UI, CLI) for AKS clusters, Terraform provides you with several benefits:
23 |
24 | - **Unified Workflow** - If you are already deploying infrastructure to Azure with Terraform, your AKS cluster can fit into that workflow. You can also deploy applications into your AKS cluster using Terraform.
25 |
26 | - **Full Lifecycle Management** - Terraform doesn't only create resources, it updates, and deletes tracked resources without requiring you to inspect the API to identify those resources.
27 |
28 | - **Graph of Relationships** - Terraform understands dependency relationships between resources. For example, an Azure Kubernetes cluster needs to be associated with a resource group, Terraform won't attempt to create the cluster if the resource group failed to create.
29 |
30 | ## Prerequisites
31 |
32 | The tutorial assumes some basic familiarity with Kubernetes and `kubectl` but does
33 | not assume any pre-existing deployment.
34 |
35 | It also assumes that you are familiar with the usual Terraform plan/apply
36 | workflow. If you're new to Terraform itself, refer first to the Getting Started
37 | [tutorial](/terraform/tutorials/azure-get-started).
38 |
39 | For this tutorial, you will need
40 |
41 | - an [Azure account](https://portal.azure.com/#home)
42 | - a configured Azure CLI
43 | - `kubectl`
44 |
45 |
46 |
47 |
48 | ## This is a single nested heading within a Tabs (tabbedSectionDepth should be 1)
49 |
50 | In order for Terraform to run operations on your behalf, you must install and
51 | configure the Azure CLI tool. To install the Azure CLI, follow
52 | [these instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) or choose a package manager based on your operating system.
53 |
54 |
55 |
56 |
57 | ## This is a single nested heading within two Tabs (tabbedSectionDepth should be 2)
58 |
59 | You can also use the package manager [`homebrew`](https://formulae.brew.sh/) to install the Azure CLI.
60 |
61 | ```shell-session
62 | $ brew install azure-cli
63 | ```
64 |
65 |
66 |
67 |
68 |
69 | You can also use the package manager [`Chocolatey`](https://chocolatey.org/) to install the Azure CLI.
70 |
71 | ```shell-session
72 | $ choco install azure-cli
73 | ```
74 |
75 |
76 |
77 |
78 |
79 | After you've installed the Azure CLI, login into Azure by running:
80 |
81 | ```shell-session
82 | $ az login
83 | ```
84 |
85 |
86 |
87 |
88 |
89 | To install the `kubectl` (Kubernetes CLI), follow [these instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/) or choose a package manager based on your operating system.
90 |
91 |
92 |
93 |
94 |
95 | Use the package manager [`homebrew`](https://formulae.brew.sh/) to install `kubectl`.
96 |
97 | ```shell-session
98 | $ brew install kubernetes-cli
99 | ```
100 |
101 |
102 |
103 |
104 |
105 | Use the package manager [`Chocolatey`](https://chocolatey.org/) to install `kubectl`.
106 |
107 | ```shell-session
108 | $ choco install kubernetes-cli
109 | ```
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 | ## Set up and initialize your Terraform workspace
118 |
119 | In your terminal, clone the [following repository](https://github.com/hashicorp/learn-terraform-provision-aks-cluster).
120 | It contains the example configuration used in this tutorial.
121 |
122 | ```shell-session
123 | $ git clone https://github.com/hashicorp/learn-terraform-provision-aks-cluster
124 | ```
125 |
126 | You can explore this repository by changing directories or navigating in your UI.
127 |
128 | ```shell-session
129 | $ cd learn-terraform-provision-aks-cluster
130 | ```
131 |
132 | In here, you will find three files used to provision the AKS cluster.
133 |
134 | 1. [`aks-cluster.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/aks-cluster.tf) provisions a
135 | resource group and an AKS cluster. The `default_node_pool` defines the
136 | number of VMs and the VM type the cluster uses.
137 |
138 | ```hcl
139 | resource "azurerm_kubernetes_cluster" "default" {
140 | name = "${random_pet.prefix.id}-aks"
141 | location = azurerm_resource_group.default.location
142 | resource_group_name = azurerm_resource_group.default.name
143 | dns_prefix = "${random_pet.prefix.id}-k8s"
144 |
145 | default_node_pool {
146 | name = "default"
147 | node_count = 2
148 | vm_size = "Standard_B2s"
149 | os_disk_size_gb = 30
150 | }
151 |
152 | service_principal {
153 | client_id = var.appId
154 | client_secret = var.password
155 | }
156 |
157 | role_based_access_control_enabled = true
158 |
159 | tags = {
160 | environment = "Demo"
161 | }
162 | }
163 | ```
164 |
165 | 1. [`variables.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/variables.tf) declares the `appID` and `password` so Terraform can use reference its configuration
166 |
167 | 1. [`terraform.tfvars`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/terraform.tfvars) defines the `appId` and `password` variables to authenticate to Azure
168 |
169 | 1. [`outputs.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf) declares values that can be useful to interact with your AKS cluster
170 |
171 | 1. [`versions.tf`](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/versions.tf) sets the Terraform version to at least 0.14 and defines the [`required_provider`](/terraform/language/providers/requirements#requiring-providers) block
172 |
173 | ### Create an Active Directory service principal account
174 |
175 | There are many ways to authenticate to the Azure provider. In this tutorial, you
176 | will use an Active Directory service principal account. You can learn how to
177 | authenticate using a different method [here](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure).
178 |
179 | First, you need to create an Active Directory service principal account using
180 | the Azure CLI. You should see something like the following.
181 |
182 | ```shell-session
183 | $ az ad sp create-for-rbac --skip-assignment
184 | {
185 | "appId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
186 | "displayName": "azure-cli-2019-04-11-00-46-05",
187 | "name": "http://azure-cli-2019-04-11-00-46-05",
188 | "password": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
189 | "tenant": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
190 | }
191 | ```
192 |
193 | ### Update your `terraform.tfvars` file
194 |
195 | Replace the values in your `terraform.tfvars` file with your `appId` and
196 | `password`. Terraform will use these values to authenticate to Azure before
197 | provisioning your resources. Your `terraform.tfvars` file should look like the
198 | following.
199 |
200 | ```plaintext
201 | # terraform.tfvars
202 | appId = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
203 | password = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
204 | ```
205 |
206 | ### Initialize Terraform
207 |
208 | After you have saved your customized variables file, initialize your Terraform
209 | workspace, which will download the provider and initialize it with the values
210 | provided in your `terraform.tfvars` file.
211 |
212 | ```shell-session
213 | $ terraform init
214 | Initializing the backend...
215 |
216 | Initializing provider plugins...
217 | - Reusing previous version of hashicorp/random from the dependency lock file
218 | - Reusing previous version of hashicorp/azurerm from the dependency lock file
219 | - Installing hashicorp/random v3.0.0...
220 | - Installed hashicorp/random v3.0.0 (signed by HashiCorp)
221 | - Installing hashicorp/azurerm v3.0.2...
222 | - Installed hashicorp/azurerm v3.0.2 (signed by HashiCorp)
223 |
224 | Terraform has been successfully initialized!
225 |
226 | You may now begin working with Terraform. Try running "terraform plan" to see
227 | any changes that are required for your infrastructure. All Terraform commands
228 | should now work.
229 |
230 | If you ever set or change modules or backend configuration for Terraform,
231 | rerun this command to reinitialize your working directory. If you forget, other
232 | commands will detect it and remind you to do so if necessary.
233 | ```
234 |
235 | ## Provision the AKS cluster
236 |
237 | In your initialized directory, run `terraform apply` and review the planned actions.
238 | Your terminal output should indicate the plan is running and what resources will be created.
239 |
240 | ```shell-session
241 | $ terraform apply
242 | An execution plan has been generated and is shown below.
243 | Resource actions are indicated with the following symbols:
244 | + create
245 |
246 | Terraform will perform the following actions:
247 |
248 | ## ...
249 |
250 | Plan: 1 to add, 0 to change, 0 to destroy.
251 |
252 | ## ...
253 | ```
254 |
255 | You can see this terraform apply will provision an Azure resource group and an
256 | AKS cluster. Confirm the apply with a `yes`.
257 |
258 | This process should take approximately 5 minutes. Upon successful application,
259 | your terminal prints the outputs defined in `aks-cluster.tf`.
260 |
261 | ```plaintext hideClipboard
262 | Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
263 |
264 | Outputs:
265 |
266 | kubernetes_cluster_name = light-eagle-aks
267 | resource_group_name = light-eagle-rg
268 | ```
269 |
270 | ## Configure kubectl
271 |
272 | Now that you've provisioned your AKS cluster, you need to configure `kubectl`.
273 |
274 | Run the following command to retrieve the access credentials for your cluster
275 | and automatically configure `kubectl`.
276 |
277 | ```shell-session
278 | $ az aks get-credentials --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
279 | Merged "light-eagle-aks" as current context in /Users/dos/.kube/config
280 | ```
281 |
282 | The [resource group name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L1)
283 | and [Kubernetes Cluster name](https://github.com/hashicorp/learn-terraform-provision-aks-cluster/blob/main/outputs.tf#L5)
284 | correspond to the output variables showed after the successful Terraform run.
285 |
286 | ## Access Kubernetes Dashboard
287 |
288 | To verify that your cluster's configuration, visit
289 | the Azure Portal's Kubernetes resource view.
290 | [Azure recommends](https://docs.microsoft.com/en-us/azure/aks/kubernetes-dashboard#start-the-kubernetes-dashboard)
291 | using this view over the default Kubernetes dashboard, since the AKS dashboard
292 | add-on is deprecated for Kubernetes versions 1.19+.
293 |
294 | Run the following command to generate the Azure portal link.
295 |
296 | ```shell-session
297 | $ az aks browse --resource-group $(terraform output -raw resource_group_name) --name $(terraform output -raw kubernetes_cluster_name)
298 | Kubernetes resources view on https://portal.azure.com/#resource/subscriptions/aaaaa/resourceGroups/light-eagle-rg/providers/Microsoft.ContainerService/managedClusters/light-eagle-aks/workloads
299 | ```
300 |
301 | Go to the URL in your preferred browser to view the Kubernetes resource view.
302 |
303 | 
304 |
305 | ## Clean up your workspace
306 |
307 | Congratulations, you have provisioned an AKS cluster, configured `kubectl`,
308 | and visited the Kubernetes dashboard.
309 |
310 | If you'd like to learn how to manage your AKS cluster using the Terraform
311 | Kubernetes Provider, leave your cluster running and continue to the
312 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
313 |
314 | ~> **Note:** This directory is **only** used to provision a AKS cluster with Terraform.
315 | By keeping the Terraform configuration for provisioning a Kubernetes cluster and
316 | managing a Kubernetes cluster resources separate, changes in one repository don't
317 | affect the other. In addition, the modularity makes the configuration more
318 | readable and enables you to scope different permissions to each workspace.
319 |
320 | If not, remember to destroy any resources you create once you are done with this
321 | tutorial. Run the `destroy` command and confirm with `yes` in your terminal.
322 |
323 | ```shell-session
324 | $ terraform destroy
325 | ```
326 |
327 | ## Next steps
328 |
329 | For more information on the AKS resource, visit the
330 | [Azure provider documentation](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster).
331 |
332 | For steps on how to manage Kubernetes resources your AKS cluster or any other
333 | already created Kubernetes cluster, visit the
334 | [Kubernetes provider tutorial](/terraform/tutorials/kubernetes/kubernetes-provider).
335 |
336 | To use run triggers to deploy a Kubernetes Cluster, Consul and Vault
337 | on Google Cloud, visit the [Deploy Consul and Vault on a Kubernetes Cluster using Run Triggers tutorial](/terraform/tutorials/kubernetes/kubernetes-consul-vault-pipeline).
338 |
--------------------------------------------------------------------------------
/plugins/anchor-links/fixtures/02-nested-headings/tutorials-nomad-format-output-with-templates.mdx:
--------------------------------------------------------------------------------
1 |
8 |
9 | When using Nomad at an intermediate to advanced level, you'll need to interface with other systems or customize output generated by Nomad. The `-t` flag is a powerful way to pass a template in Go's text/template format to
10 | several of the Nomad commands that generate output based on the API. This allows
11 | you to filter and customize the output to meet your specific needs.
12 |
13 | The commands that allow for the -t flag are:
14 |
15 | - `nomad acl policy list`
16 | - `nomad acl token list`
17 | - `nomad alloc status`
18 | - `nomad deployment list`
19 | - `nomad deployment status`
20 | - `nomad eval status`
21 | - `nomad job deployments`
22 | - `nomad job history`
23 | - `nomad job inspect`
24 | - `nomad namespace list`
25 | - `nomad node status`
26 | - `nomad plugin status`
27 | - `nomad quota list`
28 | - `nomad volume status`
29 |
30 | This tutorial will teach you how to explore the objects that are returned to
31 | the template engine and how to use template syntax to format the output into
32 | a custom form.
33 |
34 | ## Prerequisites
35 |
36 | This guide assumes the following:
37 |
38 | - Familiarity with Go's text/template syntax. You can learn more about it in the
39 | [Learn Go Template Syntax] tutorial
40 |
41 | - That you are running these commands against a Nomad cluster with an active
42 | workload. You can create a minimal environment using a dev agent, started with
43 | `nomad agent -dev`, then running at least one Nomad job. You can use
44 | `nomad init -short` to create a sample Docker job or provide your own Nomad
45 | job.
46 |
47 | ## Note the shell-specific syntax
48 |
49 | When using the -t flag, you need to correctly handle string literals based on
50 | your shell environment. In a POSIX shell, you can run the following with a
51 | single quote:
52 |
53 | ```shell-session
54 | $ nomad node status -t '{{printf "%#+v" .}}'
55 | ```
56 |
57 | In a Windows shell (for example, PowerShell), use single
58 | quotes but escape the double quotes inside the parameter as follows:
59 |
60 | ```powershell
61 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
62 | ```
63 |
64 | In this tutorial, you can select examples with the proper escaping using the
65 | tabs above the snippets.
66 |
67 | ## Start discovering objects
68 |
69 | The `printf` function and the `"%#+v"` format string are critical tools for you
70 | in exploring an unfamiliar template context.
71 |
72 | Run the following command to output the context being passed to the template
73 | in Go object format.
74 |
75 |
76 |
77 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1)
78 |
79 | ```shell-session
80 | $ nomad node status -t '{{printf "%#+v" .}}'
81 | ```
82 |
83 |
84 |
85 |
86 | ```powershell
87 | PS> nomad node status -t '{{printf \"%#+v\" .}}'
88 | ```
89 |
90 |
91 |
92 |
93 | ```plaintext
94 | []*api.NodeListStub{(*api.NodeListStub)(0xc0003fa160), (*api.NodeListStub)(0xc0003fa0b0), (*api.NodeListStub)(0xc0003fa000)}
95 | ```
96 |
97 | The output indicates that the context consists of a list (`[]`) of pointers
98 | (`*`) to `api.NodeListStub` objects. The list will also show one NodeListStub
99 | object per client node in your cluster's server state.
100 |
101 | You can explore these api.NodeListStub object by using the `range` control over
102 | the list.
103 |
104 |
105 |
106 |
107 | ```shell-session
108 | $ nomad node status -t '{{range .}}{{printf "%#+v" .}}{{end}}'
109 | ```
110 |
111 |
112 |
113 |
114 | ```powershell
115 | PS> nomad node status -t '{{range .}}{{printf \"%#+v\" .}}{{end}}'
116 | ```
117 |
118 |
119 |
120 |
121 | ```plaintext
122 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
123 | ```
124 |
125 | If you have a lot of client nodes in your cluster state, this output will be
126 | unwieldy. In that case, you can use `with` and the index function to get the
127 | first list item.
128 |
129 |
130 |
131 |
132 | ```shell-session
133 | $ nomad node status -t '{{with index . 0}}{{printf "%#+v" .}}{{end}}'
134 | ```
135 |
136 |
137 |
138 |
139 | ```powershell
140 | PS> nomad node status -t '{{with index . 0}}{{printf \"%#+v\" .}}{{end}}'
141 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
142 | ```
143 |
144 |
145 |
146 |
147 | ```plaintext
148 | &api.NodeListStub{Address:"10.0.2.52", ID:"4f60bc83-71a2-7790-b120-4e55d0e6ed34", Datacenter:"dc1", Name:"nomad-client-2.node.consul", NodeClass:"", Version:"0.12.0", Drain:false, SchedulingEligibility:"eligible", Status:"ready", ...
149 | ```
150 |
151 | Finally, output `Name` and `Version` for each client in the cluster.
152 |
153 |
154 |
155 |
156 | ```shell-session
157 | $ nomad node status -t '{{range .}}{{printf "%s: %s\n" .Name .Version}}{{end}}'
158 | ```
159 |
160 |
161 |
162 |
163 | ```powershell
164 | PS> nomad node status -t '{{range .}}{{printf \"%s: %s\n\" .Name .Version}}{{end}}'
165 | ```
166 |
167 |
168 |
169 |
170 | ```plaintext
171 | nomad-client-2.node.consul: 0.12.0
172 | nomad-client-3.node.consul: 0.12.0
173 | nomad-client-1.node.consul: 0.12.0
174 | ```
175 |
176 | ## Make quiet output
177 |
178 | Suppose you want to create a reduced version of the `nomad job status` output
179 | to show just the running job IDs in your cluster and nothing else.
180 |
181 |
182 |
183 |
184 | ```shell-session
185 | $ nomad job inspect -t '{{range .}}{{if eq .Status "running"}}{{ println .Name}}{{end}}{{end}}'
186 | ```
187 |
188 |
189 |
190 |
191 | ```powershell
192 | PS> nomad job inspect -t '{{range .}}{{if eq .Status \"running\"}}{{ println .Name}}{{end}}{{end}}'
193 | ```
194 |
195 |
196 |
197 |
198 | Nomad will output the job IDs for every running job in your cluster. For example:
199 |
200 | ```plaintext
201 | fabio
202 | sockshop-carts
203 | sockshop-catalogue
204 | sockshop-frontend
205 | sockshop-infra
206 | sockshop-orders
207 | sockshop-payment
208 | sockshop-shipping
209 | sockshop-user
210 | ```
211 |
212 | ### Challenge yourself
213 |
214 | Allocations have a slightly different shape. How might you create similar output
215 | from the `nomad alloc status` command? Make sure that your Nomad cluster has at
216 | least one allocation running and then use the printf technique from earlier to
217 | explore the values sent into the template.
218 |
219 |
220 |
221 |
222 | Print the context that you are passed from the command using the printf command.
223 |
224 |
225 |
226 | ## This is a single nested heading within one Tabs (tabbedSectionDepth should be 1)
227 |
228 |
229 | ```shell-session
230 | $ nomad alloc status -t '{{printf "%#+v" . }}'
231 | ```
232 |
233 |
234 |
235 |
236 | ```powershell
237 | PS> nomad alloc status -t '{{printf \"%#+v\" . }}'
238 | ```
239 |
240 |
241 |
242 |
243 | ```plaintext
244 | []*api.AllocationListStub ...
245 | ```
246 |
247 | Note that the first thing that you receive is a list (`[]`) of pointers (`*`) to
248 | `AllocationListStub` objects.
249 |
250 | Use `range` to traverse each item in the list.
251 |
252 |
253 |
254 |
255 | ```shell-session
256 | $ nomad alloc status -t '{{range .}}{{printf "%#+v" . }}{{end}}'
257 | ```
258 |
259 |
260 |
261 |
262 | ```powershell
263 | PS> nomad alloc status -t '{{range .}}{{printf \"%#+v\" . }}{{end}}'
264 | ```
265 |
266 |
267 |
268 |
269 | ```plaintext
270 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
271 | ```
272 |
273 | If you have a lot of allocations running, this could get unwieldy. In that case,
274 | you can use `with` and the index function to get the first list item.
275 |
276 |
277 |
278 |
279 | ```shell-session
280 | $ nomad alloc status -t '{{with index . 0}}{{printf "%#+v" . }}{{end}}'
281 | ```
282 |
283 |
284 |
285 |
286 | ```powershell
287 | PS> nomad alloc status -t '{{with index . 0}}{{printf \"%#+v\" . }}{{end}}'
288 | ```
289 |
290 |
291 |
292 |
293 | ```plaintext
294 | &api.AllocationListStub{ID:"30663b68-4d8a-aada-4ad2-011b1acae3a1", EvalID:"c5eda90b-f675-048e-b2f7-9ced30e4916b", Name:"sockshop-user.userdb[0]", Namespace:"default", NodeID:"3be35c12-70aa-8816-195e-a4630a457727", NodeName:"nomad-client-3.node.consul", JobID:"sockshop-user", JobType:"service", JobVersion:0x0, ...
295 | ```
296 |
297 | The fields on the AllocationListStub object that give insight into the running
298 | state of an allocation are `DesiredStatus` and `ClientStatus`.
299 |
300 | -> **Did you know?** The definition of an [AllocationListStub][] object and
301 | valid values for the DesiredStatus and ClientStatus are located in Nomad's
302 | [api package][]. Take a moment to look at it and see what other information you
303 | might be interested in displaying with templates.
304 |
305 | Update your template to show items with a DesiredStatus of "run" and a client
306 | status of "running" or "pending."
307 |
308 |
309 |
310 |
311 | ```shell-session
312 | $ nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus "run") (or (eq .ClientStatus "running") (eq .ClientStatus "pending"))}}{{println .ID}}{{end}}{{end}}'
313 | ```
314 |
315 |
316 |
317 |
318 | ```powershell
319 | PS> nomad alloc status -t '{{range .}}{{if and (eq .DesiredStatus \"run\") (or (eq .ClientStatus \"running\") (eq .ClientStatus \"pending\"))}}{{println .ID}}{{end}}{{end}}'
320 | ```
321 |
322 |
323 |
324 |
325 | ```plaintext
326 | 30663b68-4d8a-aada-4ad2-011b1acae3a1
327 | 11b916da-d679-1718-26f3-f6cd499bfdb8
328 | 68bcb157-359f-9293-d091-5a8ef71475ad
329 | ...
330 | ```
331 |
332 | You now have a list of the IDs for all of the allocations running in your Nomad
333 | cluster.
334 |
335 |
336 |
337 |
338 | ## Retrieve a template from file
339 |
340 | Using the command line to write templates becomes challenging
341 | as the template becomes more complex.
342 |
343 | By writing a template in its own file, you can use comments, span multiple lines, and indent conditionals in order to make them more readable to you and to other operators.
344 |
345 | Consider using some of these techniques
346 | to include the template data into the command.
347 |
348 |
349 |
350 |
351 |
352 | Create a file named running_jobs.tmpl with the following content.
353 |
354 | ```plaintext
355 | {{- /*
356 | Get Running Jobs
357 | Run with `nomad job inspect -t "$(cat running_jobs.tmpl)"`
358 | */ -}}
359 | {{- range . -}}
360 | {{- if eq .Status "running" -}}
361 | {{- println .Name -}}
362 | {{- end -}}
363 | {{- end -}}
364 | ```
365 |
366 | Now, use a subshell to read the file into a variable
367 |
368 | ```shell-session
369 | $ nomad job inspect -t "$(cat running_jobs.tmpl)"
370 | ```
371 |
372 |
373 |
374 |
375 |
376 | Create a file named running_jobs.tmpl with the following content.
377 |
378 | ```plaintext
379 | {{- /*
380 | Get Running Jobs
381 | Run with:
382 | $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
383 | */ -}}
384 | {{- range . -}}
385 | {{- if eq .Status \"running\" -}}
386 | {{- println .Name -}}
387 | {{- end -}}
388 | {{- end -}}
389 | ```
390 |
391 | Now, use a subshell to read the file into a variable
392 |
393 | ```powershell
394 | PS> $content=Get-Content running_jobs.tmpl -Raw; nomad job inspect -t $content
395 | ```
396 |
397 |
398 |
399 |
400 |
401 | ## Learn more
402 |
403 | In this tutorial, you learned how to:
404 |
405 | - Customize the output of several Nomad commands using Go's text/template
406 | syntax.
407 |
408 | - Use the `printf` function to discover what is available in the template's
409 | context.
410 |
411 | - Use a template definition contained in a file as part of the command.
412 |
413 | Learn more about templating in other tutorials in the Nomad Templating
414 | Collection.
415 |
416 | [learn go template syntax]: /nomad/tutorials/templates/go-template-syntax
417 | [allocationliststub]: https://godoc.org/github.com/hashicorp/nomad/api#AllocationListStub
418 | [api package]: https://godoc.org/github.com/hashicorp/nomad/api
419 |
--------------------------------------------------------------------------------
/plugins/anchor-links/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const generateSlug = require('../../generate_slug')
7 | const map = require('unist-util-map')
8 | const is = require('unist-util-is')
9 |
10 | // This plugin adds anchor links to headlines and lists that begin with inline
11 | // code blocks.
12 | //
13 | // NOTE: Some of the HTML code is duplicated in:
14 | // https://github.com/hashicorp/consul/blob/4f15f83dc64e2a9a95cb6b989719838b1f97015b/website/components/config-entry-reference/index.jsx#L84-L105
15 | // If updating the HTML code here, also update there.
16 | module.exports = function anchorLinksPlugin({
17 | compatibilitySlug,
18 | listWithInlineCodePrefix,
19 | headings,
20 | } = {}) {
21 | return function transformer(tree) {
22 | // this array keeps track of existing slugs to prevent duplicates per-page
23 | const links = []
24 |
25 | /**
26 | * Keep track of whether we're within .
27 | * If we're in tabbed sections, we may not want to show headings
28 | * in our table of contents.
29 | */
30 | let tabbedSectionDepth = 0
31 |
32 | return map(tree, (node) => {
33 | /**
34 | * Check if the lines in this node open and/or close .
35 | * - If it opens , increase the tabbedSectionDepth.
36 | * - If it closes , decrease the tabbedSectionDepth.
37 | *
38 | * NOTE: Some nodes are multiple lines and have also have multiple
39 | * opening/closing tags for .
40 | *
41 | * For example, here is a node that is 4 lines long, and each line must be
42 | * checked at individually:
43 | *
44 | *
45 | *
46 | *
47 | *
48 | *
49 | * Where this has happened in production:
50 | *
51 | * https://github.com/hashicorp/tutorials/blob/50b0284436561e6cbf402fb2aa25b5c0a15ef604/content/tutorials/terraform/aks.mdx?plain=1#L111-L114
52 | */
53 | const isHtmlOrJsxNode = node.type === 'html' || node.type === 'jsx'
54 | if (isHtmlOrJsxNode) {
55 | // Note that a single HTML node could potentially contain multiple tags
56 | const openTagMatches = node.value.match(/\ paragraph -> [inlineCode, ...etc]
80 | const liNode = node
81 | if (!is(liNode, 'listItem') || !liNode.children) return node
82 | const pNode = liNode.children[0]
83 | if (!is(pNode, 'paragraph') || !pNode.children) return node
84 | const codeNode = pNode.children[0]
85 | if (!is(codeNode, 'inlineCode')) return node
86 |
87 | return processListWithInlineCode(
88 | liNode,
89 | pNode,
90 | codeNode,
91 | compatibilitySlug,
92 | listWithInlineCodePrefix,
93 | links
94 | )
95 | })
96 | }
97 | }
98 |
99 | function processHeading(
100 | node,
101 | compatibilitySlug,
102 | links,
103 | headings,
104 | tabbedSectionDepth
105 | ) {
106 | const text = stringifyChildNodes(node)
107 | const level = node.depth
108 | const title = text
109 | .replace(/<\/?[^>]*>/g, '') // Strip html
110 | .replace(/\(\(#.*?\)\)/g, '') // Strip anchor link aliases
111 | .replace(/»/g, '') // Safeguard against double-running this plugin
112 | .replace(/\s+/g, ' ') // Collapse whitespace
113 | .trim()
114 |
115 | // generate the slug and use it as the headline's id property
116 | const slug = generateSlug(text, links)
117 | node.data = {
118 | ...node.data,
119 | hProperties: { ...node.data?.hProperties, id: slug },
120 | }
121 |
122 | /**
123 | * Handle anchor link aliases
124 | *
125 | * Note: depends on children of heading element! Expects first child,
126 | * at index 0, to be the text element. As well, aliases must be attached
127 | * to separate __target-h elements.
128 | */
129 | const aliases = processAlias(node, 0)
130 | if (aliases.length) node.children.unshift(...aliasesToNodes(aliases, 'h'))
131 |
132 | // if the compatibilitySlug option is present, we generate it and add it
133 | // if it doesn't already match the existing slug
134 | let slug2
135 | if (compatibilitySlug) {
136 | slug2 = compatibilitySlug(text)
137 | if (slug !== slug2) {
138 | node.children.unshift({
139 | type: 'html',
140 | value: `»`,
158 | })
159 |
160 | const headingData = {
161 | aliases,
162 | level,
163 | permalinkSlug,
164 | slug,
165 | title,
166 | tabbedSectionDepth,
167 | }
168 | headings?.push(headingData)
169 |
170 | return node
171 | }
172 |
173 | function processListWithInlineCode(
174 | liNode,
175 | pNode,
176 | codeNode,
177 | compatibilitySlug,
178 | prefix,
179 | links
180 | ) {
181 | // construct an id/slug based on value of node
182 | // if the prefix option is present, add it before the slug name
183 | const text = codeNode.value
184 | const slug = generateSlug(`${prefix ? `${prefix}-` : ''}${text}`, links)
185 |
186 | // handle anchor link aliases
187 | const aliases = processAlias(pNode, 1)
188 | if (aliases.length) liNode.children.unshift(...aliasesToNodes(aliases, 'lic'))
189 |
190 | // if the compatibilitySlug option is present, we generate it and add it
191 | // if it doesn't already match the existing slug
192 | let slug2
193 | if (compatibilitySlug) {
194 | slug2 = compatibilitySlug(text)
195 | if (slug !== slug2) {
196 | liNode.children.unshift({
197 | type: 'html',
198 | value: ` node, so clicking will set
216 | // the url to the anchor link.
217 | pNode.children[0] = {
218 | type: 'link',
219 | url: `#${permalinkSlug}`,
220 | data: {
221 | hProperties: {
222 | ariaLabel: `${generateSlug.generateAriaLabel(text)} permalink`,
223 | class: '__permalink-lic',
224 | },
225 | },
226 | children: [pNode.children[0]],
227 | }
228 |
229 | return liNode
230 | }
231 |
232 | function processAlias(node, startIndex = 0) {
233 | // disqualify input that couldn't possibly be an alias
234 | if (
235 | !node ||
236 | !node.children ||
237 | !node.children.length ||
238 | node.children.length <= startIndex
239 | )
240 | return []
241 |
242 | // with the below regex, we look for ((#foo)) or ((#foo, #bar))
243 | //
244 | // NOTE: There is a potential improvement in the fidelity of this regex, but it's
245 | // an edge case and would make the code more complex, so skipping until we need it.
246 | // Will detail here in case its ever needed in the future though.
247 | //
248 | // Headline nodes include the headline and alias, like "foo ((#bar))", where inline
249 | // lists that start with code only include the content directly after the code, like
250 | // " ((#bar)) other text". Because of this difference in behavior, this regex does
251 | // not make assumptions about *where* the anchor link alias sits in the string. That
252 | // means that something like "# foo ((#bar)) baz" would still work for a headline, and
253 | // something like "- `foo` some text ((#bar)) more text" would still work for an inline
254 | // list with code. This behavior should not be permitted -- the alias should sit directly
255 | // _after_ the headline or inline code.
256 | //
257 | // It could be enforced by differentiating the regexes that the two types use, such that
258 | // the inline list code uses `/^\s*\(\((#.*?)\)\)/` and headline uses `/\s*\(\((#.*?)\)\)$/`
259 | // but at the moment this seems like unnecessary complexity.
260 | const aliasRegex = /\s*\(\((#.*?)\)\)/
261 |
262 | // it's possible that the pattern could be broken into multiple nodes
263 | // so we have to check serially. this happens, for example, if an alias
264 | // contains an underscore like ((#\_foo)), which has to be escaped, bc
265 | // markdown. our parser will split escaped characters into multiple nodes,
266 | // for some reason.
267 | //
268 | // the most common scenario, however, is that the first node will match the
269 | // entirely, so we check for that first.
270 | const firstNode = node.children[startIndex]
271 | if (firstNode.value && firstNode.value.match(aliasRegex)) {
272 | return _processAliases(firstNode, aliasRegex)
273 | }
274 |
275 | // next, we check for the more unusual scenario of the pattern being broken into
276 | // multiple nodes. the double parens are a "minimum viable match" so we'll look for
277 | // that in the first text node. if we match this, we can continue our search.
278 | const minimumViableRegex = /\s*\(\(#/
279 | const endRegex = /\)\)/
280 | if (firstNode.value && firstNode.value.match(minimumViableRegex)) {
281 | // now we need to figure out where the end of our pattern, "))", is. we find
282 | // this, then squash the entire thing together into a single node. any unusual nodes
283 | // other than text will be discarded. we can't deal with that, honestly.
284 | const endIndex = node.children.findIndex(
285 | (node) => node.value && node.value.match(endRegex)
286 | )
287 |
288 | // If there is a "((" pattern without a closing, never mind
289 | if (endIndex < 0) {
290 | return []
291 | }
292 |
293 | // we know where the beginning and end nodes containing our pattern are, so we combine
294 | // their values into a single string
295 | const combinedText = node.children
296 | .slice(startIndex, endIndex + 1)
297 | .reduce((m, s) => {
298 | if (s.value) m += s.value
299 | return m
300 | }, '')
301 |
302 | // now, we replace all of the old broken up pieces with a single, combined node containing
303 | // the full text of the alias
304 | const deleteCount = endIndex - startIndex + 1
305 | node.children.splice(startIndex, deleteCount, {
306 | type: 'text',
307 | value: combinedText,
308 | })
309 |
310 | // and then proceed to process it as if none of this ever happened!
311 | return _processAliases(node.children[startIndex], aliasRegex)
312 | }
313 |
314 | return []
315 | }
316 |
317 | function _processAliases(node, aliasRegex) {
318 | // if we have a match, format into an array of slugs without the '#'
319 | const aliases = node.value
320 | .match(aliasRegex)[1]
321 | .split(',')
322 | .map((s) => s.trim().replace(/^#/, ''))
323 |
324 | // then remove the entire match from the element's actual text
325 | node.value = node.value.replace(aliasRegex, '')
326 |
327 | // and return the aliases
328 | return aliases || []
329 | }
330 |
331 | // This converts a raw array of aliases to html "target" nodes
332 | function aliasesToNodes(aliases, id) {
333 | return aliases.map((alias) => {
334 | return {
335 | type: 'html',
336 | value: `My cool page
27 |
28 | Disclaimer: This content is not guaranteed to be in any way useful or
29 | truthful.
30 |
31 | The rest of the content...
32 | ```
33 |
34 | ### File Types
35 |
36 | If you include a `.md` or `.mdx` file, its contents will be imported directly into the file, like a partial. If it has `@include` statements nested within it, they will all resolve recursively, as seen in the primary examples above
37 |
38 | If any other file extension is included, it will be displayed as the contents of a code block, with the code block language tag set as the file extension. For example:
39 |
40 | ### Input
41 |
42 | Your main markdown file:
43 |
44 | ```md
45 | # My cool page
46 |
47 | @include "test.js"
48 |
49 | The rest of the content...
50 | ```
51 |
52 | `test.js`, in the same directory:
53 |
54 | ```js
55 | function sayHello(name) {
56 | console.log(`hello, ${name}!`)
57 | }
58 | ```
59 |
60 | ### Output
61 |
62 | ```html
63 | My cool page
64 |
65 |
66 | function sayHello(name) {
67 | console.log(`hello, ${name}!`)
68 | }
69 |
70 |
71 | The rest of the content...
72 | ```
73 |
74 | ### Options
75 |
76 | This plugin accepts two optional config options: `resolveFrom` and `resolveMdx`.
77 |
78 | #### `resolveFrom`
79 |
80 | If you pass this option along with a path, all partials will resolve from the path that was passed in. For example:
81 |
82 | ```js
83 | remark().use(includeMarkdown, { resolveFrom: path.join(__dirname, 'partials') })
84 | ```
85 |
86 | With this config, you'd be able to put all your includes in a partials folder and require only based on the filename regardless of the location of your markdown file.
87 |
88 | #### `resolveMdx`
89 |
90 | If you pass `true` for this option, `.mdx` partials will be processed using [`remark-mdx`](https://github.com/mdx-js/mdx/tree/main/packages/remark-mdx). This allows the use of custom components within partials. For example, with `next-mdx-remote`:
91 |
92 | ```js
93 | import { serialize } from 'next-mdx-remote/serialize'
94 | import { MDXRemote } from 'next-mdx-remote'
95 | import { includeMarkdown } from '@hashicorp/remark-plugins'
96 | import CustomComponent from '../components/custom-component'
97 |
98 | const components = { CustomComponent }
99 |
100 | export default function TestPage({ source }) {
101 | return (
102 |
103 |
104 |
105 | )
106 | }
107 |
108 | export async function getStaticProps() {
109 | // Imagine "included-file.mdx" has in it...
110 | // it will render as expected, since the @include extension
111 | // is .mdx and resolveMdx is true.
112 | const source = 'Some **mdx** text.\n\n@include "included-file.mdx"'
113 | const mdxSource = await serialize(source, {
114 | mdxOptions: {
115 | remarkPlugins: [[includeMarkdown, { resolveMdx: true }]],
116 | },
117 | })
118 | return { props: { source: mdxSource } }
119 | }
120 | ```
121 |
122 | **Note**: this option should only be used in MDX contexts. This option will likely break where `remark-stringify` is used as the stringify plugin, such as when using `remark` directly.
123 |
124 | ```js
125 | // 🚨 DON'T DO THIS - it will likely just break.
126 | // remark().use(includeMarkdown, { resolveMdx: true })
127 | ```
128 |
129 | ### Ordering
130 |
131 | It's important to note that remark applies transforms in the order that they are called. If you want your other plugins to also apply to the contents of includeed files, you need to make sure that you apply the include content plugin **before all other plugins**. For example, let's say you have two plugins, one is this one to include markdown, and the other capitalizes all text, because yelling makes you more authoritative and also it's easier to read capitalized text. If you want to ensure that your includeed content is also capitalized, here's how you'd order your plugins:
132 |
133 | ```js
134 | remark().use(includeMarkdown).use(capitalizeAllText)
135 | ```
136 |
137 | If you order them the opposite way, like this:
138 |
139 | ```js
140 | remark().use(capitalizeAllText).use(includeMarkdown)
141 | ```
142 |
143 | ...what will happen is that all your text will be capitalized _except_ for the text in included files. And on top of that, the include plugin wouldn't resolve the files properly, because it capitalized the word "include", which is the wrong syntax. So usually you want to make sure this plugin comes first in your plugin stack.
144 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/basic.expected.md:
--------------------------------------------------------------------------------
1 | hello this is a file that uses an include
2 |
3 | include/before
4 |
5 | nested/include2
6 |
7 | include/after
8 |
9 | nested/include2
10 |
11 | isn't that neat?
12 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/basic.md:
--------------------------------------------------------------------------------
1 | hello this is a file that uses an include
2 |
3 | @include 'include.md'
4 |
5 | @include 'nested/include2.md'
6 |
7 | isn't that neat?
8 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include-nested-component.mdx:
--------------------------------------------------------------------------------
1 | text at depth one
2 |
3 | @include 'nested/include-component.mdx'
4 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include-with-comment.mdx:
--------------------------------------------------------------------------------
1 | We should now be able include custom MDX components in partials. For example, a `"official"` `PluginTierLabel` should render below:
2 |
3 |
4 |
5 | Comments should NOT mess things up:
6 |
7 |
8 |
9 | But they seem to be messing things up, apparently due to differences in `remark` 12 vs 13.
10 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include-with-component.mdx:
--------------------------------------------------------------------------------
1 | some text in an include
2 |
3 |
4 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | function sayHello(name) {
7 | console.log(`hello, ${name}!`)
8 | }
9 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include.md:
--------------------------------------------------------------------------------
1 | include/before
2 |
3 | @include 'nested/include2.md'
4 |
5 | include/after
6 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/include.mdx:
--------------------------------------------------------------------------------
1 | this is an **mdx** include
2 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/invalid-path.md:
--------------------------------------------------------------------------------
1 | hello this is a file that uses an include
2 |
3 | @include 'bskjbfkhj'
4 |
5 | isn't that neat?
6 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/mdx-format.expected.md:
--------------------------------------------------------------------------------
1 | before
2 |
3 | this is an **mdx** include
4 |
5 | after
6 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/mdx-format.md:
--------------------------------------------------------------------------------
1 | before
2 |
3 | @include 'include.mdx'
4 |
5 | after
6 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/nested/include-component.mdx:
--------------------------------------------------------------------------------
1 | some text in a nested include
2 |
3 |
4 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/nested/include2.md:
--------------------------------------------------------------------------------
1 | nested/include2
2 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/nested/include3.md:
--------------------------------------------------------------------------------
1 | nested/include3
2 |
3 | @include 'include2.md'
4 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/non-markdown.expected.md:
--------------------------------------------------------------------------------
1 | before
2 |
3 | ```js
4 | function sayHello(name) {
5 | console.log(`hello, ${name}!`)
6 | }
7 | ```
8 |
9 | after
10 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/non-markdown.md:
--------------------------------------------------------------------------------
1 | before
2 |
3 | @include 'include.js'
4 |
5 | after
6 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/resolve-from.expected.md:
--------------------------------------------------------------------------------
1 | nested/include3
2 |
3 | nested/include2
4 |
--------------------------------------------------------------------------------
/plugins/include-markdown/fixtures/resolve-from.md:
--------------------------------------------------------------------------------
1 | @include 'include3.md'
2 |
--------------------------------------------------------------------------------
/plugins/include-markdown/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const path = require('path')
7 | const remark = require('remark')
8 | const remarkMdx = require('remark-mdx')
9 | const flatMap = require('unist-util-flatmap')
10 | const { readSync } = require('to-vfile')
11 | const mdAstToMdxAst = require('./md-ast-to-mdx-ast')
12 |
13 | module.exports = function includeMarkdownPlugin({
14 | resolveFrom,
15 | resolveMdx,
16 | } = {}) {
17 | return function transformer(tree, file) {
18 | return flatMap(tree, (node) => {
19 | if (node.type !== 'paragraph') return [node]
20 |
21 | // detect an `@include` statement
22 | const includeMatch =
23 | node.children[0].value &&
24 | node.children[0].value.match(/^@include\s['"](.*)['"]$/)
25 | if (!includeMatch) return [node]
26 |
27 | // read the file contents
28 | const includePath = path.join(
29 | resolveFrom || file.dirname,
30 | includeMatch[1]
31 | )
32 | let includeContents
33 | try {
34 | includeContents = readSync(includePath, 'utf8')
35 | } catch (err) {
36 | throw new Error(
37 | `The @include file path at ${includePath} was not found.\n\nInclude Location: ${file.path}:${node.position.start.line}:${node.position.start.column}`
38 | )
39 | }
40 |
41 | // if we are including a ".md" or ".mdx" file, we add the contents as processed markdown
42 | // if any other file type, they are embedded into a code block
43 | if (includePath.match(/\.md(?:x)?$/)) {
44 | // return the file contents in place of the @include
45 | // (takes a couple steps because we're processing includes with remark)
46 | const processor = remark()
47 | // if the include is MDX, and the plugin consumer has confirmed their
48 | // ability to stringify MDX nodes (eg "jsx"), then use remarkMdx to support
49 | // custom components (which would otherwise appear as likely invalid HTML nodes)
50 | const isMdx = includePath.match(/\.mdx$/)
51 | if (isMdx && resolveMdx) processor.use(remarkMdx).use(mdAstToMdxAst)
52 | // use the includeMarkdown plugin to allow recursive includes
53 | processor.use(includeMarkdownPlugin, { resolveFrom, resolveMdx })
54 | // Process the file contents, then return them
55 | const ast = processor.parse(includeContents)
56 | return processor.runSync(ast, includeContents).children
57 | } else {
58 | // trim trailing newline
59 | includeContents.contents = includeContents.contents.trim()
60 |
61 | // return contents wrapped inside a "code" node
62 | return [
63 | {
64 | type: 'code',
65 | lang: includePath.match(/\.(\w+)$/)[1],
66 | value: includeContents,
67 | },
68 | ]
69 | }
70 | })
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/plugins/include-markdown/index.test.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const path = require('path')
7 | const { readSync } = require('to-vfile')
8 | const remark = require('remark')
9 | const includeMarkdown = require('./index.js')
10 | const normalizeNewline = require('normalize-newline')
11 |
12 | describe('include-markdown', () => {
13 | test('basic', () => {
14 | remark()
15 | .use(includeMarkdown)
16 | .process(loadFixture('basic'), (err, file) => {
17 | if (err) throw new Error(err)
18 | expect(file.contents).toEqual(loadFixture('basic.expected').contents)
19 | })
20 | })
21 |
22 | test('include mdx', () => {
23 | remark()
24 | .use(includeMarkdown)
25 | .process(loadFixture('mdx-format'), (err, file) => {
26 | if (err) throw new Error(err)
27 | expect(file.contents).toEqual(
28 | loadFixture('mdx-format.expected').contents
29 | )
30 | })
31 | })
32 |
33 | test('include custom mdx components', () => {
34 | // Set up a basic snippet as an mdast tree
35 | const sourceMdx = `hello\n\n@include 'include-with-component.mdx'\n\nworld`
36 | const rawTree = remark().parse(sourceMdx)
37 | // Set up the includes plugin which will also run remark-mdx
38 | const resolveFrom = path.join(__dirname, 'fixtures')
39 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree)
40 | // Expect the custom component to appear in the resulting tree as JSX
41 | expect(tree.children.length).toBe(4)
42 | const [beforeP, includedText, includedComponent, afterP] = tree.children
43 | expect(beforeP.children[0].value).toBe('hello')
44 | expect(includedText.children[0].value).toBe('some text in an include')
45 | expect(includedComponent.type).toBe('jsx')
46 | expect(includedComponent.value).toBe(' ')
47 | expect(afterP.children[0].value).toBe('world')
48 | })
49 |
50 | test('include nested custom mdx components', () => {
51 | // Set up a basic snippet as an mdast tree
52 | const sourceMdx = `hello\n\n@include 'include-nested-component.mdx'\n\nworld`
53 | const rawTree = remark().parse(sourceMdx)
54 | // Set up the includes plugin which will also run remark-mdx
55 | const resolveFrom = path.join(__dirname, 'fixtures')
56 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree)
57 | // Expect the custom component to appear in the resulting tree as JSX
58 | expect(tree.children.length).toBe(5)
59 | const [beforeP, includedText, nestedText, nestedComponent, afterP] =
60 | tree.children
61 | expect(beforeP.children[0].value).toBe('hello')
62 | expect(includedText.children[0].value).toBe('text at depth one')
63 | expect(nestedText.children[0].value).toBe('some text in a nested include')
64 | expect(nestedComponent.value).toBe(' ')
65 | expect(nestedComponent.type).toBe('jsx')
66 | expect(afterP.children[0].value).toBe('world')
67 | })
68 |
69 | test('handles HTML comments when MDX is enabled', () => {
70 | // Set up a basic snippet as an mdast tree
71 | const sourceMdx = `\n\n@include 'include-with-comment.mdx'\n\nworld`
72 | const rawTree = remark().parse(sourceMdx)
73 | // Set up the includes plugin which will also run remark-mdx
74 | const resolveFrom = path.join(__dirname, 'fixtures')
75 | const tree = includeMarkdown({ resolveFrom, resolveMdx: true })(rawTree)
76 | // Expect the tree to have the right number of nodes
77 | expect(tree.children.length).toBe(7)
78 | // Expect the direct comment to be an HTML node,
79 | // as we're not using md-ast-to-mdx-ast at this top level
80 | // (though in our usual MDX contexts, we would be)
81 | const directComment = tree.children[0]
82 | expect(directComment.type).toBe('html')
83 | expect(directComment.value).toBe('')
84 | // Expect the custom component in the include to be a JSX node
85 | const customComponent = tree.children[2]
86 | expect(customComponent.type).toBe('jsx')
87 | expect(customComponent.value).toBe(' ')
88 | // Expect the comment in the include to be a comment node,
89 | // as it has been parsed with remark-mdx and md-ast-to-mdx-ast,
90 | // the latter of which transforms comments from "html" to "comment" nodes
91 | const includedComment = tree.children[4]
92 | expect(includedComment.type).toBe('comment')
93 | expect(includedComment.value).toBe(' HTML comment but nested ')
94 | })
95 |
96 | test('include non-markdown', () => {
97 | remark()
98 | .use(includeMarkdown)
99 | .process(loadFixture('non-markdown'), (err, file) => {
100 | if (err) throw new Error(err)
101 | expect(file.contents).toEqual(
102 | loadFixture('non-markdown.expected').contents
103 | )
104 | })
105 | })
106 |
107 | test('invalid path', () => {
108 | expect(() =>
109 | remark()
110 | .use(includeMarkdown)
111 | .process(loadFixture('invalid-path'), (err) => {
112 | if (err) throw err
113 | })
114 | ).toThrow(
115 | /The @include file path at .*bskjbfkhj was not found\.\s+Include Location: .*invalid-path\.md:3:1/gm
116 | )
117 | })
118 |
119 | test('resolveFrom option', () => {
120 | remark()
121 | .use(includeMarkdown, {
122 | resolveFrom: path.join(__dirname, 'fixtures/nested'),
123 | })
124 | .process(loadFixture('resolve-from'), (err, file) => {
125 | if (err) throw new Error(err)
126 | expect(file.contents).toEqual(
127 | loadFixture('resolve-from.expected').contents
128 | )
129 | })
130 | })
131 | })
132 |
133 | function loadFixture(name) {
134 | const vfile = readSync(path.join(__dirname, 'fixtures', `${name}.md`), 'utf8')
135 | vfile.contents = normalizeNewline(vfile.contents)
136 | return vfile
137 | }
138 |
--------------------------------------------------------------------------------
/plugins/include-markdown/md-ast-to-mdx-ast.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | /*
7 |
8 | NOTE:
9 | This file is swiped directly from @mdxjs/mdx's createMdxAstCompiler.
10 | ref: https://github.com/mdx-js/mdx/blob/510bae2580958598ae29047bf755b1a2ea26cf7e/packages/mdx/md-ast-to-mdx-ast.js
11 |
12 | I considered the possibility of using createMdxAstCompiler rather than remark-mdx on its own.
13 | however, crucially, we do NOT want to transform our AST into a MDXAST, we ONLY want to
14 | transform custom component nodes (ie HTML that is really JSX) into JSX nodes.
15 | So it felt duplicative, but necessary, to copypasta this utility in to meet our needs.
16 |
17 | */
18 |
19 | const visit = require('unist-util-visit')
20 | const { isComment, getCommentContents } = require('@mdx-js/util')
21 |
22 | module.exports = (_options) => (tree) => {
23 | visit(tree, 'jsx', (node) => {
24 | if (isComment(node.value)) {
25 | node.type = 'comment'
26 | node.value = getCommentContents(node.value)
27 | }
28 | })
29 |
30 | return tree
31 | }
32 |
--------------------------------------------------------------------------------
/plugins/paragraph-custom-alerts/README.md:
--------------------------------------------------------------------------------
1 | # Paragraph Custom Alerts
2 |
3 | This plugin allows paragraphs to be "tagged" by custom symbols, effecting their final render.
4 |
5 | | Symbol | Meaning |
6 | | ------ | --------- |
7 | | `=>` | `success` |
8 | | `->` | `info` |
9 | | `~>` | `warning` |
10 | | `!>` | `danger` |
11 |
12 | ### Input:
13 |
14 | ```mdx
15 | Read below for more information...
16 |
17 | !> Here be dragons. Proceed with caution!
18 |
19 | => You are victorious! Great victory!
20 | ```
21 |
22 | ### Output:
23 |
24 | ```html
25 | Read below for more information...
26 |
27 | Here be dragons. Proceed with caution!
28 |
29 |
30 | You are victorious! Great victory!
31 |
32 | ```
33 |
--------------------------------------------------------------------------------
/plugins/paragraph-custom-alerts/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const is = require('unist-util-is')
7 | const visit = require('unist-util-visit')
8 |
9 | const sigils = {
10 | '=>': 'success',
11 | '->': 'info',
12 | '~>': 'warning',
13 | '!>': 'danger',
14 | }
15 |
16 | module.exports = function paragraphCustomAlertsPlugin() {
17 | return function transformer(tree) {
18 | visit(tree, 'paragraph', (pNode, _, parent) => {
19 | visit(pNode, 'text', (textNode) => {
20 | Object.keys(sigils).forEach((symbol) => {
21 | if (textNode.value.startsWith(`${symbol} `)) {
22 | // Remove the literal sigil symbol from string contents
23 | textNode.value = textNode.value.replace(`${symbol} `, '')
24 |
25 | // Wrap matched nodes with (containing proper attributes)
26 | parent.children = parent.children.map((node) => {
27 | return is(pNode, node)
28 | ? {
29 | type: 'wrapper',
30 | children: [node],
31 | data: {
32 | hName: 'div',
33 | hProperties: {
34 | className: [
35 | 'alert',
36 | `alert-${sigils[symbol]}`,
37 | 'g-type-body',
38 | ],
39 | },
40 | },
41 | }
42 | : node
43 | })
44 | }
45 | })
46 | })
47 | })
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/plugins/paragraph-custom-alerts/index.test.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const remark = require('remark')
7 | const html = require('remark-html')
8 | const paragraphCustomAlerts = require('./index.js')
9 |
10 | describe('paragraph-custom-alerts', () => {
11 | it('should produce the expected html output', () => {
12 | expect(
13 | remark()
14 | .use(paragraphCustomAlerts)
15 | .use(html)
16 | .processSync(`=> this is a success paragraph`)
17 | .toString()
18 | ).toMatch(
19 | 'this is a success paragraph
'
20 | )
21 | })
22 |
23 | it('should handle multiple paragraph blocks', () => {
24 | const md = `this is a normal, non-alert paragraph
25 |
26 | ~> this is a warning block
27 |
28 | this is another "normal" block
29 |
30 | => success block here! yeah!`
31 | expect(
32 | remark().use(paragraphCustomAlerts).use(html).processSync(md).toString()
33 | ).toMatch(
34 | `this is a normal, non-alert paragraph
35 | this is a warning block
36 | this is another "normal" block
37 | success block here! yeah!
`
38 | )
39 | })
40 | })
41 |
--------------------------------------------------------------------------------
/plugins/typography/README.md:
--------------------------------------------------------------------------------
1 | # Heading Type Styles
2 |
3 | We use a set of global classes for type styling at HashiCorp. This plugin adds type styles to the appropriate elements so that content looks as intended within rendered markdown blocks without duplicating or extending CSS.
4 |
5 | ### Input
6 |
7 | ```mdx
8 | # Uses
9 |
10 | Here are some uses...
11 |
12 | ## Another title
13 |
14 | Here is some more stuff...
15 | ```
16 |
17 | ### Output
18 |
19 | ```jsx
20 | Uses
21 |
22 | Here are some uses...
23 |
24 | Another title
25 |
26 | Here is some more stuff...
27 | ```
28 |
29 | ### Custom Class Mapping
30 |
31 | In rare cases, we may want to map custom `class` attributes onto specific elements. Currently, this plugin supports an `options` object, and `options.map` provides this functionality.
32 |
33 | Here is an imagined use case where all possible elements have custom `class` attributes. Any one of these elements can be omitted from the map, and it will fall back to our default `class` for that element.
34 |
35 | ```js
36 | const options = {
37 | map: {
38 | h1: 'custom-1',
39 | h2: 'custom-2',
40 | h3: 'custom-3',
41 | h4: 'custom-4',
42 | h5: 'custom-5',
43 | h6: 'custom-6',
44 | p: 'custom-paragraph',
45 | li: 'custom-list-item',
46 | },
47 | }
48 | // example use with `mdx`
49 | const output = mdx.sync(fileContents, {
50 | remarkPlugins: [[typographyPlugin, options]],
51 | })
52 | ```
53 |
54 | With this configuration, and the same input as the previous example, we would expect the following output:
55 |
56 | ```jsx
57 | Uses
58 |
59 | Here are some uses...
60 |
61 | Another title
62 |
63 | Here is some more stuff...
64 | ```
65 |
--------------------------------------------------------------------------------
/plugins/typography/index.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const visit = require('unist-util-visit')
7 |
8 | module.exports = function typographyPlugin(options = {}) {
9 | function getClassName(elemKey) {
10 | const defaultMap = {
11 | h1: 'g-type-display-2',
12 | h2: 'g-type-display-3',
13 | h3: 'g-type-display-4',
14 | h4: 'g-type-display-5',
15 | h5: 'g-type-display-6',
16 | h6: 'g-type-label',
17 | p: 'g-type-long-body',
18 | li: 'g-type-long-body',
19 | }
20 | const customMap = options.map || {}
21 | return typeof customMap[elemKey] === 'string'
22 | ? customMap[elemKey]
23 | : defaultMap[elemKey]
24 | }
25 |
26 | function addClassName(node, className) {
27 | if (!className) return true
28 | const data = node.data || (node.data = {})
29 | const props = data.hProperties || (data.hProperties = {})
30 | data.id = className
31 | props.className = className
32 | }
33 |
34 | return function transformer(tree) {
35 | // Add typography classes to headings
36 | visit(tree, 'heading', (node) => {
37 | addClassName(node, getClassName(`h${node.depth}`))
38 | })
39 |
40 | // Add typography classes to paragraph text
41 | visit(tree, 'paragraph', (node) => {
42 | addClassName(node, getClassName('p'))
43 | })
44 |
45 | // Add typography classes to list items
46 | visit(tree, 'listItem', (node) => {
47 | addClassName(node, getClassName('li'))
48 | })
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/plugins/typography/index.test.js:
--------------------------------------------------------------------------------
1 | /**
2 | * Copyright (c) HashiCorp, Inc.
3 | * SPDX-License-Identifier: MPL-2.0
4 | */
5 |
6 | const typographyPlugin = require('./index.js')
7 | const mdx = require('@mdx-js/mdx')
8 |
9 | const fileContents = `hi there
10 |
11 | # Heading One
12 | ## Heading Two
13 | sadklfjhlskdjf
14 |
15 | ### Heading Three
16 | #### Heading Four
17 | ##### Heading Five
18 | ###### Heading Six
19 |
20 | Foo bar baz wow *amaze*
21 |
22 | - foo
23 | - bar
24 | `
25 |
26 | describe('type-styles', () => {
27 | it('adds classNames to headings, paragraphs, and list items', () => {
28 | const output = mdx.sync(fileContents, { remarkPlugins: [typographyPlugin] })
29 | expect(output).toMatch(
30 | /{`Heading One`}<\/h1>/
31 | )
32 | expect(output).toMatch(
33 | /{`Heading Two`}<\/h2>/
34 | )
35 | expect(output).toMatch(
36 | /{`Heading Three`}<\/h3>/
37 | )
38 | expect(output).toMatch(
39 | /{`Heading Four`}<\/h4>/
40 | )
41 | expect(output).toMatch(
42 | /{`Heading Five`}<\/h5>/
43 | )
44 | expect(output).toMatch(
45 | /{`Heading Six`}<\/h6>/
46 | )
47 | expect(output).toMatch(
48 | /
{`sadklfjhlskdjf`}<\/p>/
49 | )
50 | expect(output).toMatch(
51 | /
{`foo`}<\/li>/
52 | )
53 | })
54 |
55 | it('allows empty strings in map to prevent the addition of classNames', () => {
56 | const options = {
57 | map: {
58 | p: '',
59 | },
60 | }
61 | const output = mdx.sync(fileContents, {
62 | remarkPlugins: [[typographyPlugin, options]],
63 | })
64 | expect(output).not.toMatch(
65 | /{`sadklfjhlskdjf`}<\/p>/
66 | )
67 | })
68 |
69 | it('allows customization of classNames', () => {
70 | const options = {
71 | map: {
72 | h1: 'custom-1',
73 | h2: 'custom-2',
74 | h3: 'custom-3',
75 | h4: 'custom-4',
76 | h5: 'custom-5',
77 | h6: 'custom-6',
78 | p: 'custom-paragraph',
79 | li: 'custom-list-item',
80 | },
81 | }
82 | const output = mdx.sync(fileContents, {
83 | remarkPlugins: [[typographyPlugin, options]],
84 | })
85 | expect(output).toMatch(
86 | /
{`Heading One`}<\/h1>/
87 | )
88 | expect(output).toMatch(
89 | /{`Heading Two`}<\/h2>/
90 | )
91 | expect(output).toMatch(
92 | /{`Heading Three`}<\/h3>/
93 | )
94 | expect(output).toMatch(
95 | /{`Heading Four`}<\/h4>/
96 | )
97 | expect(output).toMatch(
98 | /{`Heading Five`}<\/h5>/
99 | )
100 | expect(output).toMatch(
101 | /{`Heading Six`}<\/h6>/
102 | )
103 | expect(output).toMatch(
104 | /
{`sadklfjhlskdjf`}<\/p>/
105 | )
106 | expect(output).toMatch(
107 | /
{`foo`}<\/li>/
108 | )
109 | })
110 | })
111 |
--------------------------------------------------------------------------------
`,
337 | }
338 | })
339 | }
340 |
341 | // a heading can contain multiple nodes including text, html, etc
342 | // we try to stringify the node here to get its literal text contents
343 | // if that fails due to nonstandard nodes etc. we take a simpler route
344 | // for example, if using mdx, html nodes are encoded as "jsx" which is
345 | // not a type that standard remark recognizes. we can't accommodate all
346 | // types of custom remark setups, so we simply fall back if it doesn't work
347 | function stringifyChildNodes(node) {
348 | return getChildNodesText(node)
349 | }
350 |
351 | /**
352 | * Collect text from children nodes. This will visit
353 | * nodes recursively via "depth-first" strategy.
354 | *
355 | * @param {import('unist').Parent | import('unist').Node} node
356 | * @returns {string}
357 | */
358 | function getChildNodesText(node) {
359 | const text = node.children.reduce((acc, child) => {
360 | if ('children' in child) {
361 | acc += getChildNodesText(child)
362 | } else if ('value' in child) {
363 | acc += child.value
364 | }
365 | return acc
366 | }, '')
367 |
368 | return text
369 | }
370 |
--------------------------------------------------------------------------------
/plugins/include-markdown/README.md:
--------------------------------------------------------------------------------
1 | # Include Markdown Plugin
2 |
3 | This plugin will transform a custom `@include "filename"` directive into the contents of the specified file, relative to the current file.
4 |
5 | ### Input
6 |
7 | Your main markdown file:
8 |
9 | ```md
10 | # My cool page
11 |
12 | @include "disclaimer.md"
13 |
14 | The rest of the content...
15 | ```
16 |
17 | `disclaimer.md`, in the same directory:
18 |
19 | ```md
20 | Disclaimer: This content is not guaranteed to be in any way useful or truthful.
21 | ```
22 |
23 | ### Output
24 |
25 | ```html
26 |
`,
199 | })
200 | }
201 | }
202 |
203 | // add the target element with the right slug
204 | liNode.children.unshift({
205 | type: 'html',
206 | value: ` `,
207 | })
208 |
209 | // - if an alias is defined, use that
210 | // - if not, if a compatibilitySlug is defined, use that
211 | // - otherwise use the auto-generated slug
212 | const permalinkSlug =
213 | aliases && aliases.length ? aliases[0] : compatibilitySlug ? slug2 : slug
214 |
215 | // wrap permalink element around child `,
141 | })
142 | }
143 | }
144 |
145 | // - if an alias is defined, use that
146 | // - if not, if a compatibilitySlug is defined, use that
147 | // - otherwise use the auto-generated slug
148 | const permalinkSlug =
149 | aliases && aliases.length ? aliases[0] : compatibilitySlug ? slug2 : slug
150 |
151 | // finally, we generate an "permalink" element that can be used to get a quick
152 | // anchor link for any given headline
153 | node.children.unshift({
154 | type: 'html',
155 | value: `