├── .editorconfig
├── .github
├── FUNDING.yml
├── components
│ ├── alertmanager_ui.png
│ ├── cartography_elk.png
│ ├── cartography_setup.png
│ ├── elk_kibana.png
│ ├── grafana_ui.png
│ ├── neo4j_ui.png
│ ├── prometheus_ui.png
│ ├── vault_ui.png
│ └── yopass.png
├── dependabot.yaml
├── k8s_lab_plz_logo.png
├── labeler.yml
└── workflows
│ └── admin-labeler.yml
├── .github_changelog_generator
├── .gitignore
├── .plzconfig
├── .pre-commit-config.yaml
├── .prettierignore
├── .prettierrc.json
├── CHANGELOG.md
├── CODEOWNERS
├── README.md
├── build_defs
├── BUILD
├── docker_repository.build_defs
├── helm.build_defs
├── k8s_namespaced.build_defs
└── kustomize.build_defs
├── common
├── BUILD
└── utils
│ ├── get_resource_from_selector.sh
│ └── wait_pod.sh
├── components
├── baremetal
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── k8s
│ │ ├── haproxy-ingress-values.yaml
│ │ ├── haproxy-namespace.yaml
│ │ ├── metallb-config.yaml
│ │ ├── sample-ingress.yaml
│ │ └── sample-pvc.yaml
│ └── scripts
│ │ ├── 1_crio_install.sh
│ │ ├── 2_crio_config.sh
│ │ ├── 3_tools_install.sh
│ │ ├── 4_tools_config.sh
│ │ ├── 5_cluster_install.sh
│ │ └── config.fcc
├── cartography
│ ├── BUILD
│ ├── README.md
│ ├── consumers
│ │ └── elasticsearch
│ │ │ ├── deployment
│ │ │ ├── base
│ │ │ │ ├── es-index.json
│ │ │ │ ├── ingestor-cronjob.yaml
│ │ │ │ └── kustomization.yaml
│ │ │ ├── docker
│ │ │ │ ├── Dockerfile
│ │ │ │ └── requirements.txt
│ │ │ └── overlays
│ │ │ │ ├── baremetal
│ │ │ │ └── kustomization.yaml
│ │ │ │ └── minikube
│ │ │ │ └── kustomization.yaml
│ │ │ └── py
│ │ │ ├── elastic_connector.py
│ │ │ ├── elastic_ingestor.py
│ │ │ └── neo4j_connector.py
│ ├── deploy.sh
│ ├── deployment
│ │ ├── cartography-namespace.yaml
│ │ ├── cartography
│ │ │ ├── base
│ │ │ │ ├── cartography-job.yaml
│ │ │ │ └── kustomization.yaml
│ │ │ ├── docker
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── docker-entrypoint.sh
│ │ │ │ └── requirements.txt
│ │ │ └── overlays
│ │ │ │ ├── baremetal
│ │ │ │ └── kustomization.yaml
│ │ │ │ └── minikube
│ │ │ │ └── kustomization.yaml
│ │ └── neo4j
│ │ │ ├── base
│ │ │ ├── kustomization.yaml
│ │ │ ├── neo4j-service.yaml
│ │ │ ├── neo4j-statefulset.yaml
│ │ │ └── vault-agent-sa.yaml
│ │ │ └── overlays
│ │ │ ├── baremetal
│ │ │ ├── kustomization.yaml
│ │ │ ├── neo4j-ingress.yaml
│ │ │ ├── neo4j-pv.yaml
│ │ │ └── neo4j-statefulset.yaml
│ │ │ └── minikube
│ │ │ ├── kustomization.yaml
│ │ │ └── neo4j-statefulset.yaml
│ ├── forward-ui.sh
│ └── setup
│ │ ├── cartography.sh
│ │ ├── elastic-ingestor.sh
│ │ └── neo4j.sh
├── elk
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── deployment
│ │ ├── base
│ │ │ ├── elasticsearch.yaml
│ │ │ ├── kibana.yaml
│ │ │ └── kustomization.yaml
│ │ ├── elk-namespace.yaml
│ │ └── overlays
│ │ │ ├── baremetal
│ │ │ ├── elastic-pv.yaml
│ │ │ ├── elasticsearch.yaml
│ │ │ ├── kibana-ui-ingress.yaml
│ │ │ └── kustomization.yaml
│ │ │ └── minikube
│ │ │ ├── elasticsearch.yaml
│ │ │ └── kustomization.yaml
│ └── forward-ui.sh
├── kafka
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── k8s
│ │ ├── kafka-cluster.yaml
│ │ └── kafka-namespace.yaml
│ └── scripts
│ │ ├── consume-topic.sh
│ │ ├── list-topics.sh
│ │ └── produce-topic.sh
├── observability
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── forward-ui.sh
│ └── k8s
│ │ └── namespace.yaml
├── vault
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── deployment
│ │ ├── base
│ │ │ └── kustomization.yaml
│ │ ├── overlays
│ │ │ ├── baremetal
│ │ │ │ ├── helm-values.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── vault-pv.yaml
│ │ │ │ └── vault-ui-ingress.yaml
│ │ │ └── minikube
│ │ │ │ ├── helm-values.yaml
│ │ │ │ └── kustomization.yaml
│ │ └── vault-namespace.yaml
│ ├── forward-ui.sh
│ ├── sample
│ │ ├── agent-service-account.yaml
│ │ ├── deploy.sh
│ │ └── sample-deployment.yaml
│ └── setup
│ │ ├── agent-init.sh
│ │ ├── agent-policy.json
│ │ ├── vault-init.sh
│ │ └── vault-unseal.sh
└── yopass
│ ├── BUILD
│ ├── README.md
│ ├── deploy.sh
│ ├── deployment
│ ├── base
│ │ ├── deployment.yaml
│ │ ├── kustomization.yaml
│ │ └── service.yaml
│ ├── overlays
│ │ ├── baremetal
│ │ │ ├── ingress.yaml
│ │ │ └── kustomization.yaml
│ │ └── minikube
│ │ │ └── kustomization.yaml
│ └── yopass-namespace.yaml
│ └── forward-ui.sh
└── third_party
├── BUILD
├── docker
└── BUILD
└── python
└── BUILD
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig is awesome: http://EditorConfig.org
2 |
3 | # top-most EditorConfig file
4 | root = true
5 |
6 | # Global Style
7 | [*]
8 | charset = utf-8
9 | indent_style = space
10 | end_of_line = lf
11 | insert_final_newline = true
12 | trim_trailing_whitespace = true
13 |
14 | # Python
15 | [{*.{py,build_defs},BUILD}]
16 | indent_size = 4
17 | max_line_length = 100
18 |
19 | # Frontend
20 | [*.{js,json,jsx,ts,tsx,html,scss,css}]
21 | indent_size = 2
22 | max_line_length = 120
23 |
24 | # YAML
25 | [*.{yaml,yml}]
26 | indent_size = 2
27 | max_line_length = 120
28 |
29 | # SH
30 | [*.sh]
31 | indent_size = 4
32 |
33 | # Java
34 | [*.java]
35 | indent_size = 2
36 | max_line_length = 120
37 |
38 | # Go
39 | [*.go]
40 | indent_style = tab
41 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: [marco-lancini]
4 | custom: ['https://www.buymeacoffee.com/marcolancini']
5 |
--------------------------------------------------------------------------------
/.github/components/alertmanager_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/alertmanager_ui.png
--------------------------------------------------------------------------------
/.github/components/cartography_elk.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/cartography_elk.png
--------------------------------------------------------------------------------
/.github/components/cartography_setup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/cartography_setup.png
--------------------------------------------------------------------------------
/.github/components/elk_kibana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/elk_kibana.png
--------------------------------------------------------------------------------
/.github/components/grafana_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/grafana_ui.png
--------------------------------------------------------------------------------
/.github/components/neo4j_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/neo4j_ui.png
--------------------------------------------------------------------------------
/.github/components/prometheus_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/prometheus_ui.png
--------------------------------------------------------------------------------
/.github/components/vault_ui.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/vault_ui.png
--------------------------------------------------------------------------------
/.github/components/yopass.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/components/yopass.png
--------------------------------------------------------------------------------
/.github/dependabot.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | # Maintain dependencies for GitHub Actions
4 | - package-ecosystem: 'github-actions'
5 | directory: '/'
6 | schedule:
7 | interval: 'monthly'
8 |
--------------------------------------------------------------------------------
/.github/k8s_lab_plz_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/marco-lancini/k8s-lab-plz/91e64e4592be2d7260abac3de2abacb3ff00d1b5/.github/k8s_lab_plz_logo.png
--------------------------------------------------------------------------------
/.github/labeler.yml:
--------------------------------------------------------------------------------
1 | build-defs:
2 | - build_defs/**/*
3 |
4 | component:
5 | - components/**/*
6 | - third_party/**/*
7 |
8 | documentation:
9 | - components/**/*.md
10 | - README.md
11 |
12 | ci/cd:
13 | - .github/**/*
14 | - .github_changelog_generator
15 | - .plzconfig
16 | - .pre-commit-config.yaml
17 | - CHANGELOG.md
18 | - CODEOWNERS
19 |
--------------------------------------------------------------------------------
/.github/workflows/admin-labeler.yml:
--------------------------------------------------------------------------------
1 | name: '[ADMIN] Pull Request Labeler'
2 | on:
3 | - pull_request
4 |
5 | jobs:
6 | triage:
7 | runs-on: ubuntu-20.04
8 | steps:
9 | - name: 🏷 Apply labels
10 | uses: actions/labeler@v4
11 | with:
12 | repo-token: '${{ secrets.GITHUB_TOKEN }}'
13 |
--------------------------------------------------------------------------------
/.github_changelog_generator:
--------------------------------------------------------------------------------
1 | user=marco-lancini
2 | project=k8s-lab-plz
3 | add-sections={"components":{"prefix":"**COMPONENTS:**","labels":["component"]},"enhancement":{"prefix":"**ENHANCEMENTS:**","labels":["enhancement"]},"cicd":{"prefix":"**CI/CD:**","labels":["ci/cd"]},"documentation":{"prefix":"**DOCUMENTATION:**","labels":["documentation"]}}
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .env
3 | __temp
4 |
5 | # Please output directory
6 | plz-out
7 | pleasew
8 |
9 | # Vault
10 | cluster-keys*.json
11 |
--------------------------------------------------------------------------------
/.plzconfig:
--------------------------------------------------------------------------------
1 | ; Please config file
2 | ; Leaving this file as is is enough to use plz to build your project.
3 | ; Please will stay on whatever version you currently have until you run
4 | ; 'plz update', when it will download the latest available version.
5 | ;
6 | ; Or you can uncomment the following to pin everyone to a particular version;
7 | ; when you change it all users will automatically get updated.
8 | ; [please]
9 | ; version = 14.5.2
10 |
11 | [buildconfig]
12 | default-docker-repo = "docker.io"
13 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | # pre-commit install --hook-type pre-push
3 | - repo: https://github.com/pre-commit/pre-commit-hooks
4 | rev: v3.3.0
5 | hooks:
6 | - id: check-yaml
7 | args: [--allow-multiple-documents]
8 | - id: check-json
9 | exclude: 'components/vault/setup/agent-policy.json'
10 | - id: detect-aws-credentials
11 | args: ['--allow-missing-credentials']
12 | - id: detect-private-key
13 | - id: end-of-file-fixer
14 | - id: no-commit-to-branch
15 | args: ['--branch', 'main']
16 | - id: trailing-whitespace
17 | - repo: https://github.com/pre-commit/mirrors-prettier/
18 | rev: v2.2.0
19 | hooks:
20 | - id: prettier
21 |
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
1 | # Ignore MD for docs/
2 | *.md
3 | *.markdown
4 | components/vault/setup/*.json
5 |
--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "arrowParens": "always",
3 | "bracketSpacing": false,
4 | "jsxBracketSameLine": false,
5 | "printWidth": 120,
6 | "semi": true,
7 | "singleQuote": true,
8 | "tabWidth": 2,
9 | "trailingComma": "es5",
10 | "useTabs": false
11 | }
12 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## [v0.1.0](https://github.com/marco-lancini/k8s-lab-plz/tree/v0.1.0) (2021-03-29)
4 |
5 | [Full Changelog](https://github.com/marco-lancini/k8s-lab-plz/compare/5b3b315ec38f031ac029bbbfa66c7b89211ce5f8...v0.1.0)
6 |
7 | **Implemented enhancements:**
8 |
9 | - \[ELK\] Parametrize namespace [\#5](https://github.com/marco-lancini/k8s-lab-plz/pull/5) ([marco-lancini](https://github.com/marco-lancini))
10 | - \[Vault\] Cleanup scripts [\#3](https://github.com/marco-lancini/k8s-lab-plz/pull/3) ([marco-lancini](https://github.com/marco-lancini))
11 |
12 | **COMPONENTS:**
13 |
14 | - \[COMPONENT\] Add Baremetal Setup [\#24](https://github.com/marco-lancini/k8s-lab-plz/pull/24) ([marco-lancini](https://github.com/marco-lancini))
15 | - \[COMPONENT\] Introduce Kafka [\#7](https://github.com/marco-lancini/k8s-lab-plz/pull/7) ([marco-lancini](https://github.com/marco-lancini))
16 | - \[COMPONENT\] Introduce Prometheus [\#6](https://github.com/marco-lancini/k8s-lab-plz/pull/6) ([marco-lancini](https://github.com/marco-lancini))
17 | - \[COMPONENT\] Introduce ELK [\#2](https://github.com/marco-lancini/k8s-lab-plz/pull/2) ([marco-lancini](https://github.com/marco-lancini))
18 | - \[COMPONENT\] Introduce Vault [\#1](https://github.com/marco-lancini/k8s-lab-plz/pull/1) ([marco-lancini](https://github.com/marco-lancini))
19 |
20 | **CI/CD:**
21 |
22 | - \[ADMIN\] Add CODEOWNERS [\#22](https://github.com/marco-lancini/k8s-lab-plz/pull/22) ([marco-lancini](https://github.com/marco-lancini))
23 | - \[ADMIN\] Add pre-commit hooks and versioned Kafka [\#20](https://github.com/marco-lancini/k8s-lab-plz/pull/20) ([marco-lancini](https://github.com/marco-lancini))
24 | - Add Sponsors [\#18](https://github.com/marco-lancini/k8s-lab-plz/pull/18) ([marco-lancini](https://github.com/marco-lancini))
25 | - \[ADMIN\] Add .editorconfig, .prettierrc, and pre-commit hooks [\#17](https://github.com/marco-lancini/k8s-lab-plz/pull/17) ([marco-lancini](https://github.com/marco-lancini))
26 |
27 | **DOCUMENTATION:**
28 |
29 | - Update documentation [\#4](https://github.com/marco-lancini/k8s-lab-plz/pull/4) ([marco-lancini](https://github.com/marco-lancini))
30 |
31 |
32 |
33 | \* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)*
34 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @marco-lancini
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |

2 |
3 | `k8s-lab-plz` is a modular Kubernetes lab which provides an easy and streamlined way (managed via [please.build](https://please.build/)) to deploy a test cluster with support for different components.
4 |
5 | Each component can be deployed in a repeatable way with one single command.
6 |
7 | Usage for supported components:
8 |
9 | | Component | Usage | Namespace |
10 | | -------------------------------------------------------- | --------------------------------- | ------------------------------------------------ |
11 | | Baremetal Setup (Load Balancing, Volumes, etc.) | [docs](components/baremetal/) | `ingress-nginx`
`metallb-system`
`haproxy` |
12 | | Vault | [docs](components/vault/) | `vault` |
13 | | ELK (Elasticsearch, Kibana, Filebeats) | [docs](components/elk/) | `elastic-system` |
14 | | Observability (Prometheus, Grafana, Alertmanager) | [docs](components/observability/) | `observability` |
15 | | Kafka (Kafka, Zookeeper, KafkaExporter, Entity Operator) | [docs](components/kafka/) | `kafka` |
16 | | Cartography | [docs](components/cartography/) | `cartography` |
17 | | Yopass | [docs](components/yopass/) | `yopass` |
18 |
19 |
20 | ## Prerequisites
21 | * Minikube (see [official docs](https://kubernetes.io/docs/tasks/tools/install-minikube/) for your OS) or Baremetal Kubernetes (see [baremetal setup](components/baremetal/))
22 | * If using minikube, ensure it is up and running:
23 | ```bash
24 | ❯ minikube start --cpus 4 --memory 6098
25 | ```
26 | * Docker (see [official docs](https://docs.docker.com/get-docker/) for your OS)
27 | * Plz (see [official docs](https://please.build/quickstart.html) for your OS)
28 | * Helm 3 (see [official docs](https://helm.sh/docs/intro/install/) for your OS)
29 |
30 |
31 | ## Roadmap
32 | * [X] ~~Vault~~
33 | * [X] ~~ELK (Elasticsearch, Kibana, Filebeats)~~
34 | * [X] ~~Observability (Prometheus, Grafana, Alertmanager)~~
35 | * [X] ~~Kafka (Kafka, Zookeeper, KafkaExporter, Entity Operator)~~
36 | * [X] ~~Baremetal Setup (Load Balancing, Volumes, etc.)~~
37 | * [X] ~~Cartography~~
38 | * [X] ~~Yopass~~
39 | * [ ] Istio
40 | * [ ] Gatekeeper
41 | * [ ] Falco
42 | * [ ] Starboard
43 | * [ ] Audit logging
44 | * [ ] Private Registry
45 |
46 | Interested in another component to be added? Raise an issue!
47 |
48 | For a more detailed view of what's coming up, please refer to the
49 | [Kanban board](https://github.com/marco-lancini/k8s-lab/projects/1).
50 |
--------------------------------------------------------------------------------
/build_defs/BUILD:
--------------------------------------------------------------------------------
1 | package(default_visibility = ["PUBLIC"])
2 |
3 | remote_file(
4 | name = "k8s",
5 | url = "https://raw.githubusercontent.com/thought-machine/pleasings/master/k8s/k8s.build_defs",
6 | )
7 |
8 | remote_file(
9 | name = "docker",
10 | url = "https://raw.githubusercontent.com/thought-machine/pleasings/master/docker/docker.build_defs"
11 | )
12 |
13 | filegroup(
14 | name = "docker_repository",
15 | srcs = ["docker_repository.build_defs"],
16 | )
17 |
18 | filegroup(
19 | name = "helm_chart",
20 | srcs = ["helm.build_defs"],
21 | )
22 |
23 | filegroup(
24 | name = "k8s_namespaced",
25 | srcs = ["k8s_namespaced.build_defs"],
26 | )
27 |
28 | filegroup(
29 | name = "kustomize",
30 | srcs = ["kustomize.build_defs"]
31 | )
32 |
--------------------------------------------------------------------------------
/build_defs/docker_repository.build_defs:
--------------------------------------------------------------------------------
1 | def docker_repository(name:str, srcs:list=[], image:str=None, version:str='',
2 | dockerfile:str='Dockerfile', repo:str='', labels:list=[],
3 | run_args:str='', test_only:bool=False, visibility:list=None):
4 | """docker_repository defines a build rule for a pulling a Docker repository/image.
5 |
6 | Args:
7 | name: Name of the rule.
8 | image: Name of the image to create.
9 | version: Version to tag the image with.
10 | repo: Repository to load the image from. If not given it will use
11 | default-docker-repo in the [buildconfig] section of your .plzconfig.
12 | visibility: Visibility of this rule.
13 | """
14 | # Image and repo names
15 | image = image or name
16 | if not repo:
17 | repo = check_config('DEFAULT_DOCKER_REPO', 'buildconfig', 'Docker', 'hub.docker.com')
18 |
19 | # The FQN defines a unique hash for the image.
20 | if len(repo) > 1:
21 | fqn = f"{repo}/{image}:{version}"
22 | else:
23 | fqn = f"{image}:{version}"
24 | build_rule(
25 | name = f'{name}_fqn',
26 | cmd = f'echo -n "{fqn}" >> $OUT',
27 | outs = [f'{name}_fqn'],
28 | visibility = visibility,
29 | )
30 |
31 | # PULL
32 | cmd = f'docker pull {fqn}'
33 | docker_pull = sh_cmd(
34 | name = f'{name}_pull',
35 | cmd = cmd,
36 | visibility = visibility,
37 | )
38 |
--------------------------------------------------------------------------------
/build_defs/helm.build_defs:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:k8s")
2 |
3 | def helm_chart(name:str, src:str, install_path:str, namespace:str,
4 | visibility:list=[], values_file:str=None, deps:list=[],
5 | containers:list=[]):
6 | values_files = f'--values $(location {values_file})' if values_file else ''
7 |
8 | template_rule = genrule(
9 | name = f"{name}#helm",
10 | cmd = [
11 | f'helm template {name} --namespace {namespace} {values_files} $SRC/{install_path} > $OUT'
12 | ],
13 | srcs = [src],
14 | outs = [f'{name}.yaml'],
15 | deps = deps,
16 | binary = True
17 | )
18 |
19 | k8s_config(
20 | name = name,
21 | srcs = [template_rule],
22 | visibility = visibility,
23 | containers = containers,
24 | )
25 |
--------------------------------------------------------------------------------
/build_defs/k8s_namespaced.build_defs:
--------------------------------------------------------------------------------
1 |
2 | def k8s_config_namespaced(name:str, srcs:list, namespace:str, containers:list=[], params:dict=None, visibility:list=None, labels:list=[], vendorised:bool=False, deps:list=[]):
3 | """Extends https://raw.githubusercontent.com/thought-machine/pleasings/master/k8s/k8s.build_defs to add support for namespaced resources
4 | """
5 | containers = [c for c in containers if c]
6 |
7 | # Make sure to canonicalise each container (they could be in the form of ':local_container').
8 | containers = [canonicalise(container) for container in containers]
9 |
10 | # This will template image tags into k8s resource files that reference docker images by build target.
11 | fqns = [f'{c}_fqn' for c in containers]
12 | # Tag with appropriate labels
13 | labels += ['k8s'] + ['container:' + c for c in containers]
14 |
15 | # Template each config YAML and collect them in the filegroup.
16 | rules = []
17 |
18 | # Now that we have a collection of files, each containing a container name + label, we can
19 | # create a multi-expression sed command to replace a build target with the actual containers.
20 | exports = [f'export {k}={v}' for k,v in params.items()] if params else []
21 | replacement_command = 'cat'
22 | envsubst_vars = ",".join(['${%s}' % k for k in params.keys()] if params else [])
23 | check_rule = None
24 | if containers:
25 | # Pseudo build rule to check that the specified containers exist in the k8s files.
26 | # Has to be a separate rule because containers might only occur in one of the files.
27 | check_rule = build_rule(
28 | name=name,
29 | tag='check',
30 | cmd='for IMG in %s; do grep "$IMG" $SRCS || (echo "Image $IMG not in k8s files"; exit 1); done' % ' '.join(containers),
31 | srcs=srcs,
32 | )
33 |
34 | # macos sed only supports posix regex expressions so escape sequences like \b don't work
35 | boundary_expr = "\\b" if CONFIG.OS != 'darwin' else ""
36 | subcommands = ' '.join([
37 | f'-e "s|{container}{boundary_expr}|$(cat $(location {fqn}))|g"'
38 | for container, fqn in sorted(zip(containers, fqns))
39 | ])
40 | replacement_command = f'sed {subcommands}'
41 |
42 | for src in srcs:
43 | cleaned_src = src.replace('/', '_').replace(':', '_')
44 | src_tag = cleaned_src.replace('.', '_')
45 | src_genrule_name = f'_{name}#{src_tag}'
46 | if cleaned_src.endswith('_yaml'):
47 | cleaned_src = cleaned_src[:-5] + '.yaml'
48 | rules.append(f':{src_genrule_name}')
49 | genrule(
50 | name = src_genrule_name,
51 | srcs = [src],
52 | outs = ['templated_' + cleaned_src],
53 | cmd = exports + [f"cat $SRCS | {replacement_command} | envsubst '{envsubst_vars}' > $OUT"],
54 | deps = fqns + [check_rule if check_rule else None],
55 | )
56 |
57 | files = filegroup(
58 | name = name,
59 | srcs = rules,
60 | visibility = visibility,
61 | labels = labels,
62 | )
63 |
64 | # Generate a rule to push the configs.
65 | sh_cmd(
66 | name = name + '_push',
67 | cmd = ' && '.join([f'kubectl apply -n {namespace} -f $(out_location {x})' for x in rules]),
68 | deps = rules,
69 | labels = ["k8s-push"],
70 | )
71 |
72 | # Generate a rule to cleanup the configs.
73 | sh_cmd(
74 | name = name + '_cleanup',
75 | cmd = ' && '.join([f'kubectl delete --ignore-not-found -f $(out_location {x})' for x in rules]),
76 | deps = rules,
77 | labels = ["k8s-cleanup"],
78 | )
79 | return files
80 |
--------------------------------------------------------------------------------
/build_defs/kustomize.build_defs:
--------------------------------------------------------------------------------
1 | def kustomize_prep(name:str, srcs:list, containers:list=[], params:dict=None, visibility:list=None, labels:list=[], deps:list=[]):
2 | # Make sure to canonicalise each container (they could be in the form of ':local_container').
3 | containers = [c for c in containers if c]
4 | containers = [canonicalise(container) for container in containers]
5 |
6 | # This will template image tags into k8s resource files that reference docker images by build target.
7 | fqns = [f'{c}_fqn' for c in containers]
8 | # Tag with appropriate labels
9 | labels += ['k8s'] + ['container:' + c for c in containers]
10 |
11 | # Now that we have a collection of files, each containing a container name + label, we can
12 | # create a multi-expression sed command to replace a build target with the actual containers.
13 | exports = [f'export {k}={v}' for k,v in params.items()] if params else []
14 | replacement_command = 'cat'
15 | envsubst_vars = ",".join(['${%s}' % k for k in params.keys()] if params else [])
16 | check_rule = None
17 | if containers:
18 | # Pseudo build rule to check that the specified containers exist in the k8s files.
19 | # Has to be a separate rule because containers might only occur in one of the files.
20 | check_rule = build_rule(
21 | name=name,
22 | tag='check',
23 | cmd='for IMG in %s; do grep "$IMG" $SRCS || (echo "Image $IMG not in k8s files"; exit 1); done' % ' '.join(containers),
24 | srcs=srcs,
25 | )
26 |
27 | # macos sed only supports posix regex expressions so escape sequences like \b don't work
28 | boundary_expr = "\\b" if CONFIG.OS != 'darwin' else ""
29 | subcommands = ' '.join([
30 | f'-e "s|{container}{boundary_expr}|$(cat $(location {fqn}))|g"'
31 | for container, fqn in sorted(zip(containers, fqns))
32 | ])
33 | replacement_command = f'sed {subcommands}'
34 |
35 | # Template each config YAML and collect them in the filegroup.
36 | rules = []
37 | for src in srcs:
38 | # For src_genrule_name
39 | cleaned_src = src.replace('/', '_').replace(':', '_')
40 | src_tag = cleaned_src.replace('.', '_')
41 | src_genrule_name = f'_{name}#{src_tag}'
42 |
43 | rules.append(f':{src_genrule_name}')
44 | genrule(
45 | name = src_genrule_name,
46 | srcs = [src],
47 | outs = [src],
48 | cmd = exports + [f"cat $SRCS | {replacement_command} | envsubst '{envsubst_vars}' > $OUT"],
49 | deps = fqns + [check_rule if check_rule else None],
50 | )
51 |
52 | files = filegroup(
53 | name = name,
54 | srcs = rules,
55 | visibility = visibility,
56 | labels = labels,
57 | )
58 |
59 | return files
60 |
61 |
62 | def kustomize(name:str, srcs:list, namespace:str, overlay:str, kustomize_path:str, visibility:list=None, labels:list=[], deps:list=[]):
63 | # Generate a rule to build with kustomize
64 | package_name = package_name()
65 | genrule(
66 | name = f"{name}_build",
67 | cmd = [
68 | f'kustomize build {package_name}/{kustomize_path}/overlays/{overlay} > $OUT'
69 | ],
70 | srcs = srcs,
71 | outs = [f'{name}_{overlay}.yaml'],
72 | deps = deps,
73 | binary = True
74 | )
75 |
76 | # Generate a rule to push the configs.
77 | sh_cmd(
78 | name = name + '_push',
79 | cmd = [f'kubectl apply -n {namespace} -f plz-out/bin/$(location :{name}_build)'],
80 | deps = [f":{name}_build"],
81 | labels = ["kustomize-push"],
82 | )
83 |
84 | # Generate a rule to cleanup the configs.
85 | sh_cmd(
86 | name = name + '_cleanup',
87 | cmd = [f'kubectl delete -n {namespace} -f plz-out/bin/$(location :{name}_build)'],
88 | deps = [f":{name}_build"],
89 | labels = ["kustomize-cleanup"],
90 | )
91 |
--------------------------------------------------------------------------------
/common/BUILD:
--------------------------------------------------------------------------------
1 |
2 | sh_binary(
3 | name = "get_resource_from_selector",
4 | main = "utils/get_resource_from_selector.sh",
5 | )
6 |
7 | sh_binary(
8 | name = "wait_pod",
9 | main = "utils/wait_pod.sh",
10 | )
11 |
--------------------------------------------------------------------------------
/common/utils/get_resource_from_selector.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE=$1
4 | OBJECT=$2
5 | SELECTOR=$3
6 |
7 | name=$(kubectl -n ${NAMESPACE} get ${OBJECT} --selector="${SELECTOR}" -o json | jq -r '.items[].metadata.name')
8 | echo $name
9 |
--------------------------------------------------------------------------------
/common/utils/wait_pod.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE=$1
4 | NAME=$2
5 | SELECTOR=$3
6 |
7 | wait_pod () {
8 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${SELECTOR}" -o json | jq '.items[].status.phase')
9 | while [ -z "$status" ] || [ $status != '"Running"' ]
10 | do
11 | printf "\t[*] Waiting for ${NAME} to be ready...\n"
12 | sleep 5
13 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${SELECTOR}" -o json | jq '.items[].status.phase')
14 | done
15 | printf "\t[*] ${NAME} is ready\n"
16 | }
17 |
18 | wait_pod
19 |
--------------------------------------------------------------------------------
/components/baremetal/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 |
5 | sh_binary(
6 | name = "deploy",
7 | main = "deploy.sh",
8 | )
9 |
10 | # ==============================================================================
11 | # NGINX
12 | # ==============================================================================
13 | k8s_config(
14 | name = "nginx-operator",
15 | srcs = ["//third_party:nginx-operator"],
16 | )
17 |
18 | # ==============================================================================
19 | # METALLB
20 | # ==============================================================================
21 | k8s_config(
22 | name = "metallb-namespace",
23 | srcs = ["//third_party:metallb-namespace"],
24 | )
25 |
26 | k8s_config(
27 | name = "metallb-deployment",
28 | srcs = ["//third_party:metallb-deployment"],
29 | )
30 |
31 | k8s_config(
32 | name = "metallb-config",
33 | srcs = ["k8s/metallb-config.yaml"],
34 | )
35 |
36 | # ==============================================================================
37 | # HAPROXY
38 | # ==============================================================================
39 | helm_chart(
40 | name = "haproxy-helm",
41 | src = "//third_party:helm-haproxy-tar",
42 | install_path = "haproxy-ingress",
43 | namespace = "haproxy",
44 | visibility = ["//components/baremetal/..."],
45 | values_file = ":haproxy-ingress-values",
46 | deps = [
47 | ":haproxy-namespace",
48 | ":haproxy-ingress-values"
49 | ]
50 | )
51 |
52 | k8s_config(
53 | name = "haproxy-namespace",
54 | srcs = ["k8s/haproxy-namespace.yaml"],
55 | )
56 |
57 | filegroup(
58 | name = "haproxy-ingress-values",
59 | srcs = [
60 | "k8s/haproxy-ingress-values.yaml",
61 | ],
62 | )
63 |
64 | # ==============================================================================
65 | # SAMPLES
66 | # ==============================================================================
67 | k8s_config(
68 | name = "sample-ingress",
69 | srcs = ["k8s/sample-ingress.yaml"],
70 | )
71 |
72 | k8s_config(
73 | name = "sample-pvc",
74 | srcs = ["k8s/sample-pvc.yaml"],
75 | )
76 |
--------------------------------------------------------------------------------
/components/baremetal/README.md:
--------------------------------------------------------------------------------
1 | # Baremetal Setup
2 |
3 | Instructions for deploying a Kubernetes cluster on Baremetal,
4 | as described in the
5 | "[Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)"
6 | blog post.
7 |
8 |
9 | ## Kubernetes Installation (manual)
10 |
11 | 1. **Install CoreOS:** manual process, refer to the blog post.
12 | 2. **Install Kubernetes:** copy the set of scripts contained in the [scripts](scripts/) folder, and run them directly on Fedora CoreOS:
13 |
14 | ```bash
15 | [root@cluster core]$ ./1_crio_install.sh
16 | [root@cluster core]$ ./2_crio_config.sh
17 | [root@cluster core]$ ./3_tools_install.sh
18 | [root@cluster core]$ ./4_tools_config.sh
19 | [root@cluster core]$ ./5_cluster_install.sh
20 | ```
21 |
22 | ## Ingress Controllers and LoadBalancing on Baremetal
23 |
24 | ```bash
25 | ❯ plz run //components/baremetal:deploy
26 | ```
27 | * Creates the `ingress-nginx`, `metallb-system`, and `haproxy` namespaces
28 | * Fetches and deploys the NGINX Ingress Controller in the `ingress-nginx` namespace
29 | * Enables strict ARP mode, and deploys MetalLB in the `metallb-system` namespace
30 | * Deploys the MetalLB ConfigMap in the `metallb-system` namespace
31 | * Fetches and deploys the HAProxy Helm chart in the `haproxy` namespace
32 |
33 | 📝 **Note:** remember to edit the address pool in `components/baremetal/k8s/metallb-config.yaml`
34 | to suit your needs.
35 |
36 | Verify pods are healthy:
37 | ```bash
38 | ❯ kgpoall
39 | + kubectl get pods --all-namespaces
40 | NAMESPACE NAME READY STATUS RESTARTS AGE
41 | haproxy haproxy-helm-haproxy-ingress-5546d459cd-v9vz7 1/1 Running 0 9m1s
42 | ingress-nginx ingress-nginx-admission-create-95njm 0/1 Completed 0 9m35s
43 | ingress-nginx ingress-nginx-admission-patch-w4ljg 0/1 Completed 0 9m35s
44 | ingress-nginx ingress-nginx-controller-67897c9494-dgxxw 1/1 Running 0 9m35s
45 | kube-system coredns-74ff55c5b-2qdkf 1/1 Running 0 9d
46 | kube-system coredns-74ff55c5b-5blfn 1/1 Running 0 9d
47 | kube-system etcd-cluster 1/1 Running 0 9d
48 | kube-system kube-apiserver-cluster 1/1 Running 0 9d
49 | kube-system kube-controller-manager-cluster 1/1 Running 0 9d
50 | kube-system kube-flannel-ds-22ltx 1/1 Running 0 9d
51 | kube-system kube-proxy-2lbvn 1/1 Running 0 9d
52 | kube-system kube-scheduler-cluster 1/1 Running 0 9d
53 | metallb-system controller-65db86ddc6-p8pkz 1/1 Running 0 9m28s
54 | metallb-system speaker-kfkm6 1/1 Running 0 9m29s
55 | ```
56 |
57 | ## Tests
58 |
59 | ```bash
60 | ❯ plz run //components/baremetal:sample-ingress_push
61 | ```
62 | * Deploys a test Deployment, Service, and Ingress to leverage the new setup.
63 | * 📝 **Note:** remember to replace the host IP with the one of your host in `components/baremetal/k8s/sample-ingress.yaml`.
64 |
65 | ```bash
66 | ❯ plz run //components/baremetal:sample-pvc_push
67 | ```
68 | * Deploys a `hostPath` PV, PVC, and a Pod which leverages them.
69 |
--------------------------------------------------------------------------------
/components/baremetal/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | wait_pod () {
4 | status=$(kubectl -n ${3} get pods --selector="${2}" -o json | jq '.items[].status.phase')
5 | while [ -z "$status" ] || [ $status != '"Running"' ]
6 | do
7 | printf "\t[*] Waiting for ${1} to be ready...\n"
8 | sleep 5
9 | status=$(kubectl -n ${3} get pods --selector="${2}" -o json | jq '.items[].status.phase')
10 | done
11 | printf "\t[*] ${1} is ready\n"
12 | }
13 |
14 | # Deploy NGINX
15 | # - Create `ingress-nginx` namespace
16 | # - Fetch and deploy the NGINX manifest
17 | printf "\n[+] Deploying NGINX Ingress Controller...\n"
18 | plz run //components/baremetal:nginx-operator_push
19 | wait_pod 'NGINX Operator' 'app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/component=controller' 'ingress-nginx'
20 |
21 | # Deploy MetalLB
22 | # - Enable strict ARP mode
23 | # - Create `metallb-system` namespace
24 | # - Fetch and deploy the MetalLB manifest
25 | # - Create `memberlist` secret
26 | printf "\n[+] Deploying MetalLB...\n"
27 | kubectl get configmap kube-proxy -n kube-system -o yaml | \
28 | sed -e "s/strictARP: false/strictARP: true/" | \
29 | kubectl apply -f - -n kube-system
30 | plz run //components/baremetal:metallb-namespace_push
31 | plz run //components/baremetal:metallb-deployment_push
32 | kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
33 | wait_pod 'MetalLB Controller' 'app=metallb,component=controller' 'metallb-system'
34 | wait_pod 'MetalLB DaemonSet' 'app=metallb,component=speaker' 'metallb-system'
35 |
36 | # Deploy MetalLB ConfigMap
37 | printf "\n[+] Deploying MetalLB ConfigMap...\n"
38 | plz run //components/baremetal:metallb-config_push
39 |
40 | # Deploy HAProxy
41 | # - Create `haproxy` namespace
42 | # - Fetch and deploy the HAProxy Helm chart
43 | printf "\n[+] Deploying HAProxy...\n"
44 | plz run //components/baremetal:haproxy-namespace_push
45 | plz run //components/baremetal:haproxy-helm_push
46 |
--------------------------------------------------------------------------------
/components/baremetal/k8s/haproxy-ingress-values.yaml:
--------------------------------------------------------------------------------
1 | controller:
2 | hostNetwork: true
3 |
--------------------------------------------------------------------------------
/components/baremetal/k8s/haproxy-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: haproxy
5 |
--------------------------------------------------------------------------------
/components/baremetal/k8s/metallb-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | namespace: metallb-system
5 | name: config
6 | data:
7 | config: |
8 | address-pools:
9 | - name: default
10 | protocol: layer2
11 | addresses:
12 | - 192.168.1.160-192.168.1.190
13 |
--------------------------------------------------------------------------------
/components/baremetal/k8s/sample-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: test
6 | ---
7 | apiVersion: networking.k8s.io/v1
8 | kind: Ingress
9 | metadata:
10 | name: bookinfo-ingress
11 | annotations:
12 | kubernetes.io/ingress.class: haproxy
13 | spec:
14 | rules:
15 | # Replace with IP of host
16 | - host: product.192.168.1.151.nip.io
17 | http:
18 | paths:
19 | - path: /
20 | pathType: Prefix
21 | backend:
22 | service:
23 | name: productpage
24 | port:
25 | number: 9080
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: productpage
31 | namespace: test
32 | labels:
33 | app: productpage
34 | service: productpage
35 | spec:
36 | type: LoadBalancer
37 | ports:
38 | - name: http
39 | port: 80
40 | targetPort: 9080
41 | selector:
42 | app: productpage
43 | ---
44 | apiVersion: v1
45 | kind: ServiceAccount
46 | metadata:
47 | name: bookinfo-productpage
48 | namespace: test
49 | ---
50 | apiVersion: apps/v1
51 | kind: Deployment
52 | metadata:
53 | name: productpage-v1
54 | namespace: test
55 | labels:
56 | app: productpage
57 | version: v1
58 | spec:
59 | replicas: 1
60 | selector:
61 | matchLabels:
62 | app: productpage
63 | version: v1
64 | template:
65 | metadata:
66 | labels:
67 | app: productpage
68 | version: v1
69 | spec:
70 | serviceAccountName: bookinfo-productpage
71 | containers:
72 | - name: productpage
73 | image: docker.io/istio/examples-bookinfo-productpage-v1:1.15.0
74 | imagePullPolicy: IfNotPresent
75 | ports:
76 | - containerPort: 9080
77 |
--------------------------------------------------------------------------------
/components/baremetal/k8s/sample-pvc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | name: task-pv-volume
6 | labels:
7 | type: local
8 | spec:
9 | storageClassName: manual
10 | capacity:
11 | storage: 10Gi
12 | accessModes:
13 | - ReadWriteOnce
14 | hostPath:
15 | path: '/mnt/data'
16 | ---
17 | apiVersion: v1
18 | kind: PersistentVolumeClaim
19 | metadata:
20 | name: task-pv-claim
21 | spec:
22 | storageClassName: manual
23 | accessModes:
24 | - ReadWriteOnce
25 | resources:
26 | requests:
27 | storage: 3Gi
28 | ---
29 | apiVersion: v1
30 | kind: Pod
31 | metadata:
32 | name: task-pv-pod
33 | spec:
34 | volumes:
35 | - name: task-pv-storage
36 | persistentVolumeClaim:
37 | claimName: task-pv-claim
38 | containers:
39 | - name: task-pv-container
40 | image: nginx
41 | ports:
42 | - containerPort: 80
43 | name: 'http-server'
44 | volumeMounts:
45 | - mountPath: '/usr/share/nginx/html'
46 | name: task-pv-storage
47 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/1_crio_install.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | # Activating Fedora module repositories
4 | sed -i -z s/enabled=0/enabled=1/ /etc/yum.repos.d/fedora-modular.repo
5 | sed -i -z s/enabled=0/enabled=1/ /etc/yum.repos.d/fedora-updates-modular.repo
6 | sed -i -z s/enabled=0/enabled=1/ /etc/yum.repos.d/fedora-updates-testing-modular.repo
7 |
8 | # Setting up the CRI-O module
9 | mkdir /etc/dnf/modules.d
10 | cat < /etc/dnf/modules.d/cri-o.module
11 | [cri-o]
12 | name=cri-o
13 | stream=1.17
14 | profiles=
15 | state=enabled
16 | EOF
17 |
18 | # Installing CRI-O
19 | rpm-ostree install cri-o
20 | systemctl reboot
21 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/2_crio_config.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | modprobe overlay && modprobe br_netfilter
4 | cat < /etc/modules-load.d/crio-net.conf
5 | overlay
6 | br_netfilter
7 | EOF
8 |
9 | cat < /etc/sysctl.d/99-kubernetes-cri.conf
10 | net.bridge.bridge-nf-call-iptables = 1
11 | net.ipv4.ip_forward = 1
12 | net.bridge.bridge-nf-call-ip6tables = 1
13 | EOF
14 |
15 | sysctl --system
16 | sed -i -z s+/usr/share/containers/oci/hooks.d+/etc/containers/oci/hooks.d+ /etc/crio/crio.conf
17 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/3_tools_install.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | cat < /etc/yum.repos.d/kubernetes.repo
4 | [kubernetes]
5 | name=Kubernetes
6 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
7 | enabled=1
8 | gpgcheck=1
9 | repo_gpgcheck=1
10 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
11 | EOF
12 |
13 | rpm-ostree install kubelet kubeadm kubectl
14 | systemctl reboot
15 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/4_tools_config.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | setenforce 0
4 | sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
5 |
6 | systemctl enable --now cri-o && systemctl enable --now kubelet
7 | echo "KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" | tee /etc/sysconfig/kubelet
8 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/5_cluster_install.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | # Create config
4 | cat < clusterconfig.yml
5 | apiVersion: kubeadm.k8s.io/v1beta2
6 | kind: ClusterConfiguration
7 | kubernetesVersion: v1.20.5
8 | controllerManager:
9 | extraArgs:
10 | flex-volume-plugin-dir: "/etc/kubernetes/kubelet-plugins/volume/exec"
11 | networking:
12 | podSubnet: 10.244.0.0/16
13 | ---
14 | apiVersion: kubeadm.k8s.io/v1beta2
15 | kind: InitConfiguration
16 | nodeRegistration:
17 | criSocket: /var/run/crio/crio.sock
18 | EOF
19 |
20 | # Install
21 | kubeadm init --config clusterconfig.yml
22 | kubectl taint nodes --all node-role.kubernetes.io/master-
23 |
24 | # Setup Flannel
25 | sudo sysctl net.bridge.bridge-nf-call-iptables=1
26 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
27 |
--------------------------------------------------------------------------------
/components/baremetal/scripts/config.fcc:
--------------------------------------------------------------------------------
1 | variant: fcos
2 | version: 1.3.0
3 | passwd:
4 | users:
5 | - name: core
6 | groups:
7 | - docker
8 | - wheel
9 | - sudo
10 | ssh_authorized_keys:
11 | - ssh-rsa AAAA...
12 |
--------------------------------------------------------------------------------
/components/cartography/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker_repository")
2 | subinclude("//build_defs:docker")
3 | subinclude("//build_defs:helm_chart")
4 | subinclude("//build_defs:k8s")
5 | subinclude("//build_defs:kustomize")
6 |
7 | sh_binary(
8 | name = "deploy",
9 | main = "deploy.sh",
10 | )
11 |
12 | sh_binary(
13 | name = "ui",
14 | main = "forward-ui.sh",
15 | )
16 |
17 | sh_binary(
18 | name = "deploy-elastic-ingestor",
19 | main = "setup/elastic-ingestor.sh",
20 | )
21 |
22 | k8s_config(
23 | name = "cartography-namespace",
24 | srcs = ["deployment/cartography-namespace.yaml"],
25 | )
26 |
27 | # ==============================================================================
28 | # Deploy Neo4J
29 | # ==============================================================================
30 | sh_binary(
31 | name = "deploy-neo4j",
32 | main = "setup/neo4j.sh",
33 | )
34 |
35 | kustomize_prep(
36 | name = "kustomize-neo4j",
37 | srcs = glob(["deployment/neo4j/**/*.yaml"]),
38 | containers = [
39 | "//third_party/docker:neo4j",
40 | ],
41 | )
42 |
43 | kustomize(
44 | name = "neo4j-baremetal",
45 | namespace = "cartography",
46 | kustomize_path = "deployment/neo4j",
47 | overlay = "baremetal",
48 | srcs = [":kustomize-neo4j"],
49 | )
50 |
51 | kustomize(
52 | name = "neo4j-minikube",
53 | namespace = "cartography",
54 | kustomize_path = "deployment/neo4j",
55 | overlay = "minikube",
56 | srcs = [":kustomize-neo4j"],
57 | )
58 |
59 | # ==============================================================================
60 | # Deploy Cartography
61 | # ==============================================================================
62 | sh_binary(
63 | name = "deploy-cartography",
64 | main = "setup/cartography.sh",
65 | )
66 |
67 | docker_image(
68 | name = "cartography_docker",
69 | dockerfile = "deployment/cartography/docker/Dockerfile",
70 | srcs = glob(["deployment/cartography/docker/**/*"]),
71 | image = "cartography",
72 | version = "1.5",
73 | repo = "marcolancini",
74 | visibility = ["//components/cartography/..."],
75 | )
76 |
77 | kustomize_prep(
78 | name = "kustomize-cartography",
79 | srcs = glob(["deployment/cartography/**/*.yaml"]),
80 | containers = [
81 | ":cartography_docker",
82 | ],
83 | )
84 |
85 | kustomize(
86 | name = "cartography-baremetal",
87 | namespace = "cartography",
88 | kustomize_path = "deployment/cartography",
89 | overlay = "baremetal",
90 | srcs = [":kustomize-cartography"],
91 | )
92 |
93 | kustomize(
94 | name = "cartography-minikube",
95 | namespace = "cartography",
96 | kustomize_path = "deployment/cartography",
97 | overlay = "minikube",
98 | srcs = [":kustomize-cartography"],
99 | )
100 |
101 | # ==============================================================================
102 | # Deploy Elasticsearch Integration
103 | # ==============================================================================
104 | # Queries file
105 | remote_file(
106 | name = "cartography_queries",
107 | url = "https://raw.githubusercontent.com/marco-lancini/cartography-queries/master/queries/queries.json"
108 | )
109 |
110 | # Python Scripts
111 | python_library(
112 | name = "neo4j_connector",
113 | srcs = ["consumers/elasticsearch/py/neo4j_connector.py"],
114 | deps = [
115 | ":cartography_queries",
116 | "//third_party/python:neo4j",
117 | "//third_party/python:neobolt",
118 | "//third_party/python:neotime",
119 | ],
120 | )
121 |
122 | python_library(
123 | name = "elastic_connector",
124 | srcs = ["consumers/elasticsearch/py/elastic_connector.py"],
125 | )
126 |
127 | python_library(
128 | name = "elastic_ingestor",
129 | srcs = ["consumers/elasticsearch/py/elastic_ingestor.py"],
130 | deps = [
131 | ":neo4j_connector",
132 | ],
133 | )
134 |
135 | # Docker image
136 | filegroup(
137 | name = "docker-requirements",
138 | srcs = [
139 | "consumers/elasticsearch/deployment/docker/requirements.txt",
140 | ],
141 | )
142 |
143 | docker_image(
144 | name = "cartography_ingestor_image",
145 | srcs = [
146 | ":docker-requirements",
147 | ":cartography_queries",
148 | ":neo4j_connector",
149 | ":elastic_connector",
150 | ":elastic_ingestor",
151 | ],
152 | dockerfile = "consumers/elasticsearch/deployment/docker/Dockerfile",
153 | image = "cartography_elastic_ingestor",
154 | version = "0.1.4",
155 | repo = "docker.io/marcolancini",
156 | visibility = ["//components/cartography/..."],
157 | )
158 |
159 | # Kubernetes deployment
160 | kustomize_prep(
161 | name = "kustomize-elastic-ingestor",
162 | srcs = glob(["consumers/elasticsearch/deployment/**/*.*"]),
163 | containers = [
164 | ":cartography_ingestor_image",
165 | ],
166 | )
167 |
168 | kustomize(
169 | name = "elastic-ingestor-baremetal",
170 | namespace = "cartography",
171 | kustomize_path = "consumers/elasticsearch/deployment",
172 | overlay = "baremetal",
173 | srcs = [":kustomize-elastic-ingestor"],
174 | )
175 |
176 | kustomize(
177 | name = "elastic-ingestor-minikube",
178 | namespace = "cartography",
179 | kustomize_path = "consumers/elasticsearch/deployment",
180 | overlay = "minikube",
181 | srcs = [":kustomize-elastic-ingestor"],
182 | )
183 |
--------------------------------------------------------------------------------
/components/cartography/README.md:
--------------------------------------------------------------------------------
1 | # Cartography Setup
2 |
3 | [Cartography](https://github.com/lyft/cartography) is a Python tool that consolidates infrastructure assets and the relationships between them in an intuitive graph view powered by a Neo4j database.
4 |
5 | ## Prerequisites
6 |
7 | | Component | Instructions |
8 | | -------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
9 | | Vault | ⚠️ This module depends on a Vault installation. Please refer to [Vault Setup](../vault/) for more information. |
10 | | Elasticsearch (optional) | ⚠️ This module depends on an ELK installation. Please refer to [ELK Setup](../elk/) for more information. |
11 | | Cloud Provider Credentials | - You will need to generate access tokens for Cartography to use.
- For example, for AWS:
- You can use the [aws-security-reviewer](https://github.com/marco-lancini/utils/tree/main/terraform/aws-security-reviewer) Terraform module to automate the setup of roles and users needed to perform a security audit of AWS accounts in an Hub and Spoke model.
- Then, generate access keys for the IAM user and keep them ready to use.
|
12 |
13 |
14 | ---
15 |
16 |
17 | ## Deploy Cartography and Neo4j
18 |
19 |
20 |
21 |
22 |
23 | * Deploy Cartography and Neo4j:
24 | ```bash
25 | ❯ plz run //components/cartography:deploy [minikube|baremetal]
26 | ```
27 | * This command will:
28 | * Setup namespace: creates a `cartography` namespace, and a Vault Agent service account
29 | * Setup and Deploy Neo4j:
30 | * Generates a random password for Neo4j and stores it into Vault
31 | * Generates TLS Certificates
32 | * Created a StorageClass, PersistentVolume, and Ingress (baremetal only)
33 | * Deploys the Neo4j StatefulSet and Service
34 | * Setup and Deploy Cartography:
35 | * Creates a custom Docker image for Cartography
36 | * Requests user to provide access key, secret key, and Account ID of the Hub
37 | * Setup Vault:
38 | * Enables the AWS secrets engine
39 | * Persists the credentials that Vault will use to communicate with AWS
40 | * Configures a Vault role that maps to a set of permissions in AWS
41 | * Deploys the Cartography CronJob, scheduled to run every day at 7am
42 |
43 | * Verify pods are healthy:
44 | ```bash
45 | ❯ kubectl -n cartography get po
46 | NAME READY STATUS RESTARTS AGE
47 | neo4j-statefulset-0 2/2 Running 0 5h56m
48 | ```
49 |
50 | * Manually trigger the execution of a Cartography Job:
51 | ```bash
52 | ❯ kubectl -n cartography create job --from=cronjob/cartography-run cartography-run
53 | ```
54 |
55 | * 📝 **NOTE FOR BAREMETAL**: before deploying, make sure to prepare
56 | the data folder on the host (and to remove the same folder to reset the installation):
57 | ```bash
58 | ❯ sudo mkdir -p /etc/plz-k8s-lab/cartography/neo4j/
59 | ❯ sudo chmod -R a+rw /etc/plz-k8s-lab/cartography/
60 | ```
61 |
62 |
63 | ---
64 |
65 |
66 | ## Access the Neo4J UI
67 |
68 | ### Via Port-Forward
69 | * Forward the Vault UI to http://127.0.0.1:7474
70 | ```bash
71 | ❯ plz run //components/vault:ui
72 | ```
73 |
74 | ### Via Ingress on Baremetal
75 | * Verify the Ingresses have been deployed:
76 | ```bash
77 | ❯ kubectl -n cartography get ingress
78 | NAME CLASS HOSTS ADDRESS PORTS AGE
79 | neo4j-ingress neo4j.192.168.1.151.nip.io 80, 443 6h7m
80 | neo4j-ingress-bolt bolt.192.168.1.151.nip.io 80, 443 6h7m
81 | ```
82 |
83 | * 📝 **NOTE**: before deploying, make sure to replace the host IP address in:
84 | * `//components/cartography/deployment/neo4j/overlays/baremetal/neo4j-ingress.yaml`
85 | * `//components/cartography/setup/neo4j.sh`
86 | * This assumes you followed the setup described at "[Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)".
87 | * To access the Neo4j web UI:
88 | * Browse to: https://neo4j.192.168.1.151.nip.io/browser/
89 | * Connect URL: `bolt://bolt.192.168.1.151.nip.io:443`
90 | * Username: `neo4j`
91 | * Password: stored in Vault at `secret/cartography/neo4j-password`
92 |
93 |
94 |
95 |
96 |
97 |
98 | ---
99 |
100 |
101 | ## Elasticsearch Ingestor
102 |
103 |
104 |
105 |
106 |
107 | The Elasticsearch Ingestor is a CronJob which executes
108 | a set of [custom queries](https://github.com/marco-lancini/cartography-queries/tree/main/queries)
109 | against the Neo4j database, and pushes the results to Elasticsearch.
110 |
111 | * Deploy the CronJob:
112 | ```bash
113 | ❯ plz run //components/cartography:deploy-elastic-ingestor [minikube|baremetal]
114 | ```
115 | * 📝 **NOTE**: before deploying, make sure to have an Elasticsearch cluster deployed. Refer to [ELK Setup](../elk/) for more information.
116 | * You can then [import pre-populated visualizations and dashboards for Kibana](https://github.com/marco-lancini/cartography-queries/tree/main/consumers/elasticsearch) I made available
117 |
118 |
119 | ---
120 |
121 |
122 | ## References
123 | * **[CODE]** [Cartography's source code](https://github.com/lyft/cartography)
124 | * **[CODE]** [cartography-queries](https://github.com/marco-lancini/cartography-queries)
125 | * **[CODE]** [Terraform AWS Security Reviewer](https://github.com/marco-lancini/utils/tree/main/terraform/aws-security-reviewer)
126 | * **[BLOG]** [Mapping Moving Clouds: How to stay on top of your ephemeral environments with Cartography](https://www.marcolancini.it/2020/blog-mapping-moving-clouds-with-cartography/)
127 | * **[BLOG]** [Tracking Moving Clouds: How to continuously track cloud assets with Cartography](https://www.marcolancini.it/2020/blog-tracking-moving-clouds-with-cartography/)
128 | * **[BLOG]** [Automating Cartography Deployments on Kubernetes](https://www.marcolancini.it/2021/blog-cartography-on-kubernetes/)
129 | * **[BLOG]** [Cross Account Auditing in AWS and GCP](https://www.marcolancini.it/2019/blog-cross-account-auditing/)
130 | * **[BLOG]** [Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)
131 | * **[TALK]** [Cartography: using graphs to improve and scale security decision-making](https://speakerdeck.com/marcolancini/cartography-using-graphs-to-improve-and-scale-security-decision-making)
132 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/base/es-index.json:
--------------------------------------------------------------------------------
1 | {
2 | "mappings": {
3 | "properties": {
4 | "@timestamp": {
5 | "type": "date"
6 | },
7 | "a": {
8 | "properties": {
9 | "displayname": {
10 | "type": "keyword",
11 | "fields": {
12 | "search": {
13 | "type": "text",
14 | "fielddata": true
15 | }
16 | }
17 | },
18 | "id": {
19 | "type": "keyword",
20 | "fields": {
21 | "search": {
22 | "type": "text",
23 | "fielddata": true
24 | }
25 | }
26 | },
27 | "lifecyclestate": {
28 | "type": "keyword",
29 | "fields": {
30 | "search": {
31 | "type": "text",
32 | "fielddata": true
33 | }
34 | }
35 | },
36 | "name": {
37 | "type": "keyword",
38 | "fields": {
39 | "search": {
40 | "type": "text",
41 | "fielddata": true
42 | }
43 | }
44 | }
45 | }
46 | },
47 | "a1": {
48 | "properties": {
49 | "id": {
50 | "type": "keyword",
51 | "fields": {
52 | "search": {
53 | "type": "text",
54 | "fielddata": true
55 | }
56 | }
57 | },
58 | "name": {
59 | "type": "keyword",
60 | "fields": {
61 | "search": {
62 | "type": "text",
63 | "fielddata": true
64 | }
65 | }
66 | }
67 | }
68 | },
69 | "a2": {
70 | "properties": {
71 | "id": {
72 | "type": "keyword",
73 | "fields": {
74 | "search": {
75 | "type": "text",
76 | "fielddata": true
77 | }
78 | }
79 | },
80 | "name": {
81 | "type": "keyword",
82 | "fields": {
83 | "search": {
84 | "type": "text",
85 | "fielddata": true
86 | }
87 | }
88 | }
89 | }
90 | },
91 | "acl": {
92 | "properties": {
93 | "displayname": {
94 | "type": "keyword",
95 | "fields": {
96 | "search": {
97 | "type": "text",
98 | "fielddata": true
99 | }
100 | }
101 | },
102 | "id": {
103 | "type": "keyword",
104 | "fields": {
105 | "search": {
106 | "type": "text",
107 | "fielddata": true
108 | }
109 | }
110 | },
111 | "owner": {
112 | "type": "keyword",
113 | "fields": {
114 | "search": {
115 | "type": "text",
116 | "fielddata": true
117 | }
118 | }
119 | },
120 | "permission": {
121 | "type": "keyword",
122 | "fields": {
123 | "search": {
124 | "type": "text",
125 | "fielddata": true
126 | }
127 | }
128 | },
129 | "type": {
130 | "type": "keyword",
131 | "fields": {
132 | "search": {
133 | "type": "text",
134 | "fielddata": true
135 | }
136 | }
137 | }
138 | }
139 | },
140 | "b": {
141 | "properties": {
142 | "iam_config_bucket_policy_only": {
143 | "type": "boolean"
144 | },
145 | "id": {
146 | "type": "keyword",
147 | "fields": {
148 | "search": {
149 | "type": "text",
150 | "fielddata": true
151 | }
152 | }
153 | },
154 | "location": {
155 | "type": "keyword",
156 | "fields": {
157 | "search": {
158 | "type": "text",
159 | "fielddata": true
160 | }
161 | }
162 | },
163 | "retention_period": {
164 | "type": "keyword",
165 | "fields": {
166 | "search": {
167 | "type": "text",
168 | "fielddata": true
169 | }
170 | }
171 | },
172 | "storage_class": {
173 | "type": "keyword",
174 | "fields": {
175 | "search": {
176 | "type": "text",
177 | "fielddata": true
178 | }
179 | }
180 | }
181 | }
182 | },
183 | "b1": {
184 | "properties": {
185 | "cidr_block": {
186 | "type": "keyword",
187 | "fields": {
188 | "search": {
189 | "type": "text",
190 | "fielddata": true
191 | }
192 | }
193 | },
194 | "id": {
195 | "type": "keyword",
196 | "fields": {
197 | "search": {
198 | "type": "text",
199 | "fielddata": true
200 | }
201 | }
202 | }
203 | }
204 | },
205 | "b2": {
206 | "properties": {
207 | "cidr_block": {
208 | "type": "keyword",
209 | "fields": {
210 | "search": {
211 | "type": "text",
212 | "fielddata": true
213 | }
214 | }
215 | },
216 | "id": {
217 | "type": "keyword",
218 | "fields": {
219 | "search": {
220 | "type": "text",
221 | "fielddata": true
222 | }
223 | }
224 | }
225 | }
226 | },
227 | "c": {
228 | "properties": {
229 | "audit_logging": {
230 | "type": "boolean"
231 | },
232 | "cluster_ipv4cidr": {
233 | "type": "text",
234 | "fields": {
235 | "keyword": {
236 | "type": "keyword",
237 | "ignore_above": 256
238 | }
239 | }
240 | },
241 | "current_master_version": {
242 | "type": "text",
243 | "fields": {
244 | "keyword": {
245 | "type": "keyword",
246 | "ignore_above": 256
247 | }
248 | }
249 | },
250 | "database_encryption": {
251 | "type": "text",
252 | "fields": {
253 | "keyword": {
254 | "type": "keyword",
255 | "ignore_above": 256
256 | }
257 | }
258 | },
259 | "endpoint": {
260 | "type": "text",
261 | "fields": {
262 | "keyword": {
263 | "type": "keyword",
264 | "ignore_above": 256
265 | }
266 | }
267 | },
268 | "endpoint_public_access": {
269 | "type": "boolean"
270 | },
271 | "exposed_internet": {
272 | "type": "boolean",
273 | "null_value": false
274 | },
275 | "initial_version": {
276 | "type": "text",
277 | "fields": {
278 | "keyword": {
279 | "type": "keyword",
280 | "ignore_above": 256
281 | }
282 | }
283 | },
284 | "location": {
285 | "type": "text",
286 | "fields": {
287 | "keyword": {
288 | "type": "keyword",
289 | "ignore_above": 256
290 | }
291 | }
292 | },
293 | "logging_service": {
294 | "type": "text",
295 | "fields": {
296 | "keyword": {
297 | "type": "keyword",
298 | "ignore_above": 256
299 | }
300 | }
301 | },
302 | "master_authorized_networks": {
303 | "type": "boolean",
304 | "null_value": false
305 | },
306 | "monitoring_service": {
307 | "type": "text",
308 | "fields": {
309 | "keyword": {
310 | "type": "keyword",
311 | "ignore_above": 256
312 | }
313 | }
314 | },
315 | "name": {
316 | "type": "keyword",
317 | "fields": {
318 | "search": {
319 | "type": "text",
320 | "fielddata": true
321 | }
322 | }
323 | },
324 | "network": {
325 | "type": "text",
326 | "fields": {
327 | "keyword": {
328 | "type": "keyword",
329 | "ignore_above": 256
330 | }
331 | }
332 | },
333 | "network_policy": {
334 | "type": "text",
335 | "fields": {
336 | "keyword": {
337 | "type": "keyword",
338 | "ignore_above": 256
339 | }
340 | }
341 | },
342 | "platform_version": {
343 | "type": "text",
344 | "fields": {
345 | "keyword": {
346 | "type": "keyword",
347 | "ignore_above": 256
348 | }
349 | }
350 | },
351 | "private_endpoint": {
352 | "type": "text",
353 | "fields": {
354 | "keyword": {
355 | "type": "keyword",
356 | "ignore_above": 256
357 | }
358 | }
359 | },
360 | "private_endpoint_enabled": {
361 | "type": "boolean",
362 | "null_value": false
363 | },
364 | "private_nodes": {
365 | "type": "boolean",
366 | "null_value": false
367 | },
368 | "public_endpoint": {
369 | "type": "text",
370 | "fields": {
371 | "keyword": {
372 | "type": "keyword",
373 | "ignore_above": 256
374 | }
375 | }
376 | },
377 | "region": {
378 | "type": "text",
379 | "fields": {
380 | "keyword": {
381 | "type": "keyword",
382 | "ignore_above": 256
383 | }
384 | }
385 | },
386 | "rolearn": {
387 | "type": "text",
388 | "fields": {
389 | "keyword": {
390 | "type": "keyword",
391 | "ignore_above": 256
392 | }
393 | }
394 | },
395 | "services_ipv4cidr": {
396 | "type": "text",
397 | "fields": {
398 | "keyword": {
399 | "type": "keyword",
400 | "ignore_above": 256
401 | }
402 | }
403 | },
404 | "status": {
405 | "type": "text",
406 | "fields": {
407 | "keyword": {
408 | "type": "keyword",
409 | "ignore_above": 256
410 | }
411 | }
412 | },
413 | "subnetwork": {
414 | "type": "text",
415 | "fields": {
416 | "keyword": {
417 | "type": "keyword",
418 | "ignore_above": 256
419 | }
420 | }
421 | },
422 | "version": {
423 | "type": "text",
424 | "fields": {
425 | "keyword": {
426 | "type": "keyword",
427 | "ignore_above": 256
428 | }
429 | }
430 | }
431 | }
432 | },
433 | "d": {
434 | "properties": {
435 | "id": {
436 | "type": "keyword",
437 | "fields": {
438 | "search": {
439 | "type": "text",
440 | "fielddata": true
441 | }
442 | }
443 | },
444 | "name": {
445 | "type": "keyword",
446 | "fields": {
447 | "search": {
448 | "type": "text",
449 | "fielddata": true
450 | }
451 | }
452 | },
453 | "region": {
454 | "type": "keyword",
455 | "fields": {
456 | "search": {
457 | "type": "text",
458 | "fielddata": true
459 | }
460 | }
461 | }
462 | }
463 | },
464 | "f": {
465 | "properties": {
466 | "direction": {
467 | "type": "keyword",
468 | "fields": {
469 | "search": {
470 | "type": "text",
471 | "fielddata": true
472 | }
473 | }
474 | },
475 | "disabled": {
476 | "type": "boolean",
477 | "null_value": false
478 | },
479 | "displayname": {
480 | "type": "keyword",
481 | "fields": {
482 | "search": {
483 | "type": "text",
484 | "fielddata": true
485 | }
486 | }
487 | },
488 | "id": {
489 | "type": "keyword",
490 | "fields": {
491 | "search": {
492 | "type": "text",
493 | "fielddata": true
494 | }
495 | }
496 | },
497 | "name": {
498 | "type": "keyword",
499 | "fields": {
500 | "search": {
501 | "type": "text",
502 | "fielddata": true
503 | }
504 | }
505 | }
506 | }
507 | },
508 | "g": {
509 | "properties": {
510 | "arn": {
511 | "type": "keyword",
512 | "fields": {
513 | "search": {
514 | "type": "text",
515 | "fielddata": true
516 | }
517 | }
518 | },
519 | "name": {
520 | "type": "keyword",
521 | "fields": {
522 | "search": {
523 | "type": "text",
524 | "fielddata": true
525 | }
526 | }
527 | }
528 | }
529 | },
530 | "inbound_account": {
531 | "properties": {
532 | "name": {
533 | "type": "keyword",
534 | "fields": {
535 | "search": {
536 | "type": "text",
537 | "fielddata": true
538 | }
539 | }
540 | }
541 | }
542 | },
543 | "inbound_group": {
544 | "properties": {
545 | "name": {
546 | "type": "keyword",
547 | "fields": {
548 | "search": {
549 | "type": "text",
550 | "fielddata": true
551 | }
552 | }
553 | }
554 | }
555 | },
556 | "inbound_range": {
557 | "properties": {
558 | "range": {
559 | "type": "keyword",
560 | "fields": {
561 | "search": {
562 | "type": "text",
563 | "fielddata": true
564 | }
565 | }
566 | }
567 | }
568 | },
569 | "inbound_rule": {
570 | "properties": {
571 | "fromport": {
572 | "type": "long"
573 | },
574 | "protocol": {
575 | "type": "keyword",
576 | "fields": {
577 | "search": {
578 | "type": "text",
579 | "fielddata": true
580 | }
581 | }
582 | },
583 | "toport": {
584 | "type": "long"
585 | }
586 | }
587 | },
588 | "inbound_vpc": {
589 | "properties": {
590 | "id": {
591 | "type": "keyword",
592 | "fields": {
593 | "search": {
594 | "type": "text",
595 | "fielddata": true
596 | }
597 | }
598 | }
599 | }
600 | },
601 | "instance": {
602 | "properties": {
603 | "db_instance_identifier": {
604 | "type": "keyword",
605 | "fields": {
606 | "search": {
607 | "type": "text",
608 | "fielddata": true
609 | }
610 | }
611 | },
612 | "exposed_internet": {
613 | "type": "boolean"
614 | },
615 | "exposed_internet_type": {
616 | "type": "keyword",
617 | "fields": {
618 | "search": {
619 | "type": "text",
620 | "fielddata": true
621 | }
622 | }
623 | },
624 | "imageid": {
625 | "type": "keyword",
626 | "fields": {
627 | "search": {
628 | "type": "text",
629 | "fielddata": true
630 | }
631 | }
632 | },
633 | "instanceid": {
634 | "type": "keyword",
635 | "fields": {
636 | "search": {
637 | "type": "text",
638 | "fielddata": true
639 | }
640 | }
641 | },
642 | "instancename": {
643 | "type": "keyword",
644 | "fields": {
645 | "search": {
646 | "type": "text",
647 | "fielddata": true
648 | }
649 | }
650 | },
651 | "instancetype": {
652 | "type": "keyword",
653 | "fields": {
654 | "search": {
655 | "type": "text",
656 | "fielddata": true
657 | }
658 | }
659 | },
660 | "launchtime": {
661 | "type": "keyword",
662 | "fields": {
663 | "search": {
664 | "type": "text",
665 | "fielddata": true
666 | }
667 | }
668 | },
669 | "name": {
670 | "type": "keyword",
671 | "fields": {
672 | "search": {
673 | "type": "text",
674 | "fielddata": true
675 | }
676 | }
677 | },
678 | "privateipaddress": {
679 | "type": "keyword",
680 | "fields": {
681 | "search": {
682 | "type": "text",
683 | "fielddata": true
684 | }
685 | }
686 | },
687 | "publicdnsname": {
688 | "type": "keyword",
689 | "fields": {
690 | "search": {
691 | "type": "text",
692 | "fielddata": true
693 | }
694 | }
695 | },
696 | "region": {
697 | "type": "keyword",
698 | "fields": {
699 | "search": {
700 | "type": "text",
701 | "fielddata": true
702 | }
703 | }
704 | },
705 | "state": {
706 | "type": "keyword",
707 | "fields": {
708 | "search": {
709 | "type": "text",
710 | "fielddata": true
711 | }
712 | }
713 | },
714 | "status": {
715 | "type": "keyword",
716 | "fields": {
717 | "search": {
718 | "type": "text",
719 | "fielddata": true
720 | }
721 | }
722 | },
723 | "zone_name": {
724 | "type": "keyword",
725 | "fields": {
726 | "search": {
727 | "type": "text",
728 | "fielddata": true
729 | }
730 | }
731 | }
732 | }
733 | },
734 | "k": {
735 | "properties": {
736 | "id": {
737 | "type": "keyword",
738 | "fields": {
739 | "search": {
740 | "type": "text",
741 | "fielddata": true
742 | }
743 | }
744 | },
745 | "keyname": {
746 | "type": "keyword",
747 | "fields": {
748 | "search": {
749 | "type": "text",
750 | "fielddata": true
751 | }
752 | }
753 | },
754 | "region": {
755 | "type": "keyword",
756 | "fields": {
757 | "search": {
758 | "type": "text",
759 | "fielddata": true
760 | }
761 | }
762 | }
763 | }
764 | },
765 | "l": {
766 | "properties": {
767 | "createdtime": {
768 | "type": "keyword",
769 | "fields": {
770 | "search": {
771 | "type": "text",
772 | "fielddata": true
773 | }
774 | }
775 | },
776 | "dnsname": {
777 | "type": "keyword",
778 | "fields": {
779 | "search": {
780 | "type": "text",
781 | "fielddata": true
782 | }
783 | }
784 | },
785 | "exposed_internet": {
786 | "type": "boolean"
787 | },
788 | "name": {
789 | "type": "keyword",
790 | "fields": {
791 | "search": {
792 | "type": "text",
793 | "fielddata": true
794 | }
795 | }
796 | },
797 | "description": {
798 | "type": "keyword",
799 | "fields": {
800 | "search": {
801 | "type": "text",
802 | "fielddata": true
803 | }
804 | }
805 | },
806 | "runtime": {
807 | "type": "keyword",
808 | "fields": {
809 | "search": {
810 | "type": "text",
811 | "fielddata": true
812 | }
813 | }
814 | },
815 | "modifieddate": {
816 | "type": "keyword",
817 | "fields": {
818 | "search": {
819 | "type": "text",
820 | "fielddata": true
821 | }
822 | }
823 | },
824 | "scheme": {
825 | "type": "keyword",
826 | "fields": {
827 | "search": {
828 | "type": "text",
829 | "fielddata": true
830 | }
831 | }
832 | }
833 | }
834 | },
835 | "language": {
836 | "type": "keyword",
837 | "fields": {
838 | "search": {
839 | "type": "text",
840 | "fielddata": true
841 | }
842 | }
843 | },
844 | "metadata": {
845 | "properties": {
846 | "query_description": {
847 | "type": "keyword",
848 | "fields": {
849 | "search": {
850 | "type": "text",
851 | "fielddata": true
852 | }
853 | }
854 | },
855 | "query_headers": {
856 | "type": "keyword",
857 | "fields": {
858 | "search": {
859 | "type": "text",
860 | "fielddata": true
861 | }
862 | }
863 | },
864 | "query_id": {
865 | "type": "keyword",
866 | "fields": {
867 | "search": {
868 | "type": "text",
869 | "fielddata": true
870 | }
871 | }
872 | },
873 | "query_name": {
874 | "type": "keyword",
875 | "fields": {
876 | "search": {
877 | "type": "text",
878 | "fielddata": true
879 | }
880 | }
881 | }
882 | }
883 | },
884 | "n": {
885 | "properties": {
886 | "displayname": {
887 | "type": "keyword",
888 | "fields": {
889 | "search": {
890 | "type": "text",
891 | "fielddata": true
892 | }
893 | }
894 | },
895 | "id": {
896 | "type": "keyword",
897 | "fields": {
898 | "search": {
899 | "type": "text",
900 | "fielddata": true
901 | }
902 | }
903 | },
904 | "lifecyclestate": {
905 | "type": "keyword",
906 | "fields": {
907 | "search": {
908 | "type": "text",
909 | "fielddata": true
910 | }
911 | }
912 | },
913 | "name": {
914 | "type": "keyword",
915 | "fields": {
916 | "search": {
917 | "type": "text",
918 | "fielddata": true
919 | }
920 | }
921 | },
922 | "projectid": {
923 | "type": "keyword",
924 | "fields": {
925 | "search": {
926 | "type": "text",
927 | "fielddata": true
928 | }
929 | }
930 | },
931 | "projectnumber": {
932 | "type": "keyword",
933 | "fields": {
934 | "search": {
935 | "type": "text",
936 | "fielddata": true
937 | }
938 | }
939 | },
940 | "region": {
941 | "type": "keyword",
942 | "fields": {
943 | "search": {
944 | "type": "text",
945 | "fielddata": true
946 | }
947 | }
948 | }
949 | }
950 | },
951 | "perm": {
952 | "properties": {
953 | "protocol": {
954 | "type": "keyword",
955 | "fields": {
956 | "search": {
957 | "type": "text",
958 | "fielddata": true
959 | }
960 | }
961 | },
962 | "fromport": {
963 | "type": "keyword",
964 | "fields": {
965 | "search": {
966 | "type": "text",
967 | "fielddata": true
968 | }
969 | }
970 | },
971 | "toport": {
972 | "type": "keyword",
973 | "fields": {
974 | "search": {
975 | "type": "text",
976 | "fielddata": true
977 | }
978 | }
979 | }
980 | }
981 | },
982 | "rule": {
983 | "properties": {
984 | "range": {
985 | "type": "keyword",
986 | "fields": {
987 | "search": {
988 | "type": "text",
989 | "fielddata": true
990 | }
991 | }
992 | }
993 | }
994 | },
995 | "org": {
996 | "properties": {
997 | "id": {
998 | "type": "keyword",
999 | "fields": {
1000 | "search": {
1001 | "type": "text",
1002 | "fielddata": true
1003 | }
1004 | }
1005 | }
1006 | }
1007 | },
1008 | "outbound_account": {
1009 | "properties": {
1010 | "name": {
1011 | "type": "keyword",
1012 | "fields": {
1013 | "search": {
1014 | "type": "text",
1015 | "fielddata": true
1016 | }
1017 | }
1018 | }
1019 | }
1020 | },
1021 | "p": {
1022 | "properties": {
1023 | "arn": {
1024 | "type": "keyword",
1025 | "fields": {
1026 | "search": {
1027 | "type": "text",
1028 | "fielddata": true
1029 | }
1030 | }
1031 | },
1032 | "attachmentcount": {
1033 | "type": "long"
1034 | },
1035 | "fromport": {
1036 | "type": "long"
1037 | },
1038 | "isattachable": {
1039 | "type": "boolean"
1040 | },
1041 | "name": {
1042 | "type": "keyword",
1043 | "fields": {
1044 | "search": {
1045 | "type": "text",
1046 | "fielddata": true
1047 | }
1048 | }
1049 | },
1050 | "passwordlastused": {
1051 | "type": "keyword",
1052 | "fields": {
1053 | "search": {
1054 | "type": "text",
1055 | "fielddata": true
1056 | }
1057 | }
1058 | },
1059 | "protocol": {
1060 | "type": "keyword",
1061 | "fields": {
1062 | "search": {
1063 | "type": "text",
1064 | "fielddata": true
1065 | }
1066 | }
1067 | },
1068 | "toport": {
1069 | "type": "long"
1070 | },
1071 | "updatedate": {
1072 | "type": "keyword",
1073 | "fields": {
1074 | "search": {
1075 | "type": "text",
1076 | "fielddata": true
1077 | }
1078 | }
1079 | },
1080 | "userid": {
1081 | "type": "keyword",
1082 | "fields": {
1083 | "search": {
1084 | "type": "text",
1085 | "fielddata": true
1086 | }
1087 | }
1088 | }
1089 | }
1090 | },
1091 | "policy": {
1092 | "properties": {
1093 | "name": {
1094 | "type": "keyword",
1095 | "fields": {
1096 | "search": {
1097 | "type": "text",
1098 | "fielddata": true
1099 | }
1100 | }
1101 | }
1102 | }
1103 | },
1104 | "peer": {
1105 | "properties": {
1106 | "connection_id": {
1107 | "type": "keyword",
1108 | "fields": {
1109 | "search": {
1110 | "type": "text",
1111 | "fielddata": true
1112 | }
1113 | }
1114 | },
1115 | "status_code": {
1116 | "type": "keyword",
1117 | "fields": {
1118 | "search": {
1119 | "type": "text",
1120 | "fielddata": true
1121 | }
1122 | }
1123 | }
1124 | }
1125 | },
1126 | "r": {
1127 | "properties": {
1128 | "arn": {
1129 | "type": "keyword",
1130 | "fields": {
1131 | "search": {
1132 | "type": "text",
1133 | "fielddata": true
1134 | }
1135 | }
1136 | },
1137 | "createdate": {
1138 | "type": "keyword",
1139 | "fields": {
1140 | "search": {
1141 | "type": "text",
1142 | "fielddata": true
1143 | }
1144 | }
1145 | },
1146 | "name": {
1147 | "type": "keyword",
1148 | "fields": {
1149 | "search": {
1150 | "type": "text",
1151 | "fielddata": true
1152 | }
1153 | }
1154 | },
1155 | "roleid": {
1156 | "type": "keyword",
1157 | "fields": {
1158 | "search": {
1159 | "type": "text",
1160 | "fielddata": true
1161 | }
1162 | }
1163 | },
1164 | "type": {
1165 | "type": "keyword",
1166 | "fields": {
1167 | "search": {
1168 | "type": "text",
1169 | "fielddata": true
1170 | }
1171 | }
1172 | },
1173 | "value": {
1174 | "type": "keyword",
1175 | "fields": {
1176 | "search": {
1177 | "type": "text",
1178 | "fielddata": true
1179 | }
1180 | }
1181 | },
1182 | "cluster_status": {
1183 | "type": "keyword",
1184 | "fields": {
1185 | "search": {
1186 | "type": "text",
1187 | "fielddata": true
1188 | }
1189 | }
1190 | },
1191 | "endpoint_address": {
1192 | "type": "keyword",
1193 | "fields": {
1194 | "search": {
1195 | "type": "text",
1196 | "fielddata": true
1197 | }
1198 | }
1199 | },
1200 | "endpoint_port": {
1201 | "type": "long"
1202 | },
1203 | "number_of_nodes": {
1204 | "type": "long"
1205 | },
1206 | "db_name": {
1207 | "type": "keyword",
1208 | "fields": {
1209 | "search": {
1210 | "type": "text",
1211 | "fielddata": true
1212 | }
1213 | }
1214 | },
1215 | "master_username": {
1216 | "type": "keyword",
1217 | "fields": {
1218 | "search": {
1219 | "type": "text",
1220 | "fielddata": true
1221 | }
1222 | }
1223 | },
1224 | "publicly_accessible": {
1225 | "type": "boolean"
1226 | },
1227 | "region": {
1228 | "type": "keyword",
1229 | "fields": {
1230 | "search": {
1231 | "type": "text",
1232 | "fielddata": true
1233 | }
1234 | }
1235 | },
1236 | "created_at": {
1237 | "type": "keyword",
1238 | "fields": {
1239 | "search": {
1240 | "type": "text",
1241 | "fielddata": true
1242 | }
1243 | }
1244 | }
1245 | }
1246 | },
1247 | "r1": {
1248 | "properties": {
1249 | "arn": {
1250 | "type": "keyword",
1251 | "fields": {
1252 | "search": {
1253 | "type": "text",
1254 | "fielddata": true
1255 | }
1256 | }
1257 | },
1258 | "name": {
1259 | "type": "keyword",
1260 | "fields": {
1261 | "search": {
1262 | "type": "text",
1263 | "fielddata": true
1264 | }
1265 | }
1266 | }
1267 | }
1268 | },
1269 | "rds": {
1270 | "properties": {
1271 | "db_name": {
1272 | "type": "keyword",
1273 | "fields": {
1274 | "search": {
1275 | "type": "text",
1276 | "fielddata": true
1277 | }
1278 | }
1279 | },
1280 | "endpoint_address": {
1281 | "type": "keyword",
1282 | "fields": {
1283 | "search": {
1284 | "type": "text",
1285 | "fielddata": true
1286 | }
1287 | }
1288 | },
1289 | "endpoint_port": {
1290 | "type": "long"
1291 | },
1292 | "engine_version": {
1293 | "type": "text",
1294 | "fields": {
1295 | "keyword": {
1296 | "type": "keyword",
1297 | "ignore_above": 256
1298 | }
1299 | }
1300 | },
1301 | "id": {
1302 | "type": "keyword",
1303 | "fields": {
1304 | "search": {
1305 | "type": "text",
1306 | "fielddata": true
1307 | }
1308 | }
1309 | },
1310 | "instance_create_time": {
1311 | "type": "keyword",
1312 | "fields": {
1313 | "search": {
1314 | "type": "text",
1315 | "fielddata": true
1316 | }
1317 | }
1318 | },
1319 | "master_username": {
1320 | "type": "keyword",
1321 | "fields": {
1322 | "search": {
1323 | "type": "text",
1324 | "fielddata": true
1325 | }
1326 | }
1327 | },
1328 | "publicly_accessible": {
1329 | "type": "boolean"
1330 | },
1331 | "storage_encrypted": {
1332 | "type": "boolean"
1333 | }
1334 | }
1335 | },
1336 | "ru": {
1337 | "properties": {
1338 | "fromport": {
1339 | "type": "long"
1340 | },
1341 | "protocol": {
1342 | "type": "keyword",
1343 | "fields": {
1344 | "search": {
1345 | "type": "text",
1346 | "fielddata": true
1347 | }
1348 | }
1349 | },
1350 | "toport": {
1351 | "type": "long"
1352 | }
1353 | }
1354 | },
1355 | "s": {
1356 | "properties": {
1357 | "anonymous_access": {
1358 | "type": "boolean"
1359 | },
1360 | "anonymous_actions": {
1361 | "type": "keyword",
1362 | "fields": {
1363 | "search": {
1364 | "type": "text",
1365 | "fielddata": true
1366 | }
1367 | }
1368 | },
1369 | "creationdate": {
1370 | "type": "keyword",
1371 | "fields": {
1372 | "search": {
1373 | "type": "text",
1374 | "fielddata": true
1375 | }
1376 | }
1377 | },
1378 | "description": {
1379 | "type": "keyword",
1380 | "fields": {
1381 | "search": {
1382 | "type": "text",
1383 | "fielddata": true
1384 | }
1385 | }
1386 | },
1387 | "error_codes": {
1388 | "type": "keyword",
1389 | "fields": {
1390 | "search": {
1391 | "type": "text",
1392 | "fielddata": true
1393 | }
1394 | }
1395 | },
1396 | "gateway_address": {
1397 | "type": "keyword",
1398 | "fields": {
1399 | "search": {
1400 | "type": "text",
1401 | "fielddata": true
1402 | }
1403 | }
1404 | },
1405 | "ip_cidr_range": {
1406 | "type": "keyword",
1407 | "fields": {
1408 | "search": {
1409 | "type": "text",
1410 | "fielddata": true
1411 | }
1412 | }
1413 | },
1414 | "languages": {
1415 | "type": "keyword",
1416 | "fields": {
1417 | "search": {
1418 | "type": "text",
1419 | "fielddata": true
1420 | }
1421 | }
1422 | },
1423 | "name": {
1424 | "type": "keyword",
1425 | "fields": {
1426 | "search": {
1427 | "type": "text",
1428 | "fielddata": true
1429 | }
1430 | }
1431 | },
1432 | "region": {
1433 | "type": "keyword",
1434 | "fields": {
1435 | "search": {
1436 | "type": "text",
1437 | "fielddata": true
1438 | }
1439 | }
1440 | },
1441 | "target": {
1442 | "type": "keyword",
1443 | "fields": {
1444 | "search": {
1445 | "type": "text",
1446 | "fielddata": true
1447 | }
1448 | }
1449 | }
1450 | }
1451 | },
1452 | "s1": {
1453 | "properties": {
1454 | "description": {
1455 | "type": "keyword",
1456 | "fields": {
1457 | "search": {
1458 | "type": "text",
1459 | "fielddata": true
1460 | }
1461 | }
1462 | },
1463 | "name": {
1464 | "type": "keyword",
1465 | "fields": {
1466 | "search": {
1467 | "type": "text",
1468 | "fielddata": true
1469 | }
1470 | }
1471 | },
1472 | "target": {
1473 | "type": "keyword",
1474 | "fields": {
1475 | "search": {
1476 | "type": "text",
1477 | "fielddata": true
1478 | }
1479 | }
1480 | }
1481 | }
1482 | },
1483 | "s2": {
1484 | "properties": {
1485 | "description": {
1486 | "type": "keyword",
1487 | "fields": {
1488 | "search": {
1489 | "type": "text",
1490 | "fielddata": true
1491 | }
1492 | }
1493 | },
1494 | "name": {
1495 | "type": "keyword",
1496 | "fields": {
1497 | "search": {
1498 | "type": "text",
1499 | "fielddata": true
1500 | }
1501 | }
1502 | },
1503 | "target": {
1504 | "type": "keyword",
1505 | "fields": {
1506 | "search": {
1507 | "type": "text",
1508 | "fielddata": true
1509 | }
1510 | }
1511 | }
1512 | }
1513 | },
1514 | "s3": {
1515 | "properties": {
1516 | "name": {
1517 | "type": "keyword",
1518 | "fields": {
1519 | "search": {
1520 | "type": "text",
1521 | "fielddata": true
1522 | }
1523 | }
1524 | }
1525 | }
1526 | },
1527 | "sg": {
1528 | "properties": {
1529 | "description": {
1530 | "type": "keyword",
1531 | "fields": {
1532 | "search": {
1533 | "type": "text",
1534 | "fielddata": true
1535 | }
1536 | }
1537 | },
1538 | "name": {
1539 | "type": "keyword",
1540 | "fields": {
1541 | "search": {
1542 | "type": "text",
1543 | "fielddata": true
1544 | }
1545 | }
1546 | },
1547 | "id": {
1548 | "type": "keyword",
1549 | "fields": {
1550 | "search": {
1551 | "type": "text",
1552 | "fielddata": true
1553 | }
1554 | }
1555 | }
1556 | }
1557 | },
1558 | "t": {
1559 | "properties": {
1560 | "name": {
1561 | "type": "keyword",
1562 | "fields": {
1563 | "search": {
1564 | "type": "text",
1565 | "fielddata": true
1566 | }
1567 | }
1568 | },
1569 | "arn": {
1570 | "type": "keyword",
1571 | "fields": {
1572 | "search": {
1573 | "type": "text",
1574 | "fielddata": true
1575 | }
1576 | }
1577 | },
1578 | "description": {
1579 | "type": "keyword",
1580 | "fields": {
1581 | "search": {
1582 | "type": "text",
1583 | "fielddata": true
1584 | }
1585 | }
1586 | },
1587 | "state": {
1588 | "type": "keyword",
1589 | "fields": {
1590 | "search": {
1591 | "type": "text",
1592 | "fielddata": true
1593 | }
1594 | }
1595 | },
1596 | "owner_id": {
1597 | "type": "keyword",
1598 | "fields": {
1599 | "search": {
1600 | "type": "text",
1601 | "fielddata": true
1602 | }
1603 | }
1604 | }
1605 | }
1606 | },
1607 | "u": {
1608 | "properties": {
1609 | "name": {
1610 | "type": "keyword",
1611 | "fields": {
1612 | "search": {
1613 | "type": "text",
1614 | "fielddata": true
1615 | }
1616 | }
1617 | },
1618 | "userid": {
1619 | "type": "keyword",
1620 | "fields": {
1621 | "search": {
1622 | "type": "text",
1623 | "fielddata": true
1624 | }
1625 | }
1626 | }
1627 | }
1628 | },
1629 | "v": {
1630 | "properties": {
1631 | "description": {
1632 | "type": "keyword",
1633 | "fields": {
1634 | "search": {
1635 | "type": "text",
1636 | "fielddata": true
1637 | }
1638 | }
1639 | },
1640 | "id": {
1641 | "type": "keyword",
1642 | "fields": {
1643 | "search": {
1644 | "type": "text",
1645 | "fielddata": true
1646 | }
1647 | }
1648 | },
1649 | "is_default": {
1650 | "type": "boolean"
1651 | },
1652 | "name": {
1653 | "type": "keyword",
1654 | "fields": {
1655 | "search": {
1656 | "type": "text",
1657 | "fielddata": true
1658 | }
1659 | }
1660 | },
1661 | "primary_cidr_block": {
1662 | "type": "keyword",
1663 | "fields": {
1664 | "search": {
1665 | "type": "text",
1666 | "fielddata": true
1667 | }
1668 | }
1669 | },
1670 | "region": {
1671 | "type": "keyword",
1672 | "fields": {
1673 | "search": {
1674 | "type": "text",
1675 | "fielddata": true
1676 | }
1677 | }
1678 | },
1679 | "routing_config_routing_mode": {
1680 | "type": "keyword",
1681 | "fields": {
1682 | "search": {
1683 | "type": "text",
1684 | "fielddata": true
1685 | }
1686 | }
1687 | }
1688 | }
1689 | },
1690 | "z": {
1691 | "properties": {
1692 | "name": {
1693 | "type": "keyword",
1694 | "fields": {
1695 | "search": {
1696 | "type": "text",
1697 | "fielddata": true
1698 | }
1699 | }
1700 | }
1701 | }
1702 | }
1703 | }
1704 | }
1705 | }
1706 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/base/ingestor-cronjob.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1beta1
3 | kind: CronJob
4 | metadata:
5 | name: cartography-elastic-ingestor
6 | spec:
7 | schedule: '0 10 * * *' # Run every day at 10am
8 | concurrencyPolicy: Forbid
9 | jobTemplate:
10 | spec:
11 | backoffLimit: 5
12 | template:
13 | metadata:
14 | annotations:
15 | vault.hashicorp.com/agent-inject: 'true'
16 | vault.hashicorp.com/role: 'vault-agent'
17 | #
18 | # NEO4J PASSWORD FROM VAULT
19 | #
20 | vault.hashicorp.com/agent-inject-secret-neo4j-password: 'secret/data/cartography/neo4j-password'
21 | vault.hashicorp.com/agent-inject-template-neo4j-password: |
22 | {{ with secret "secret/data/cartography/neo4j-password" -}}
23 | NEO4J_SECRETS_PASSWORD="{{ .Data.data.NEO4J_SECRETS_PASSWORD }}"
24 | {{- end }}
25 | ## END OF CREDENTIALS SETUP
26 | spec:
27 | serviceAccountName: vault-agent
28 | restartPolicy: Never
29 | securityContext:
30 | fsGroup: 1000
31 | containers:
32 | - name: cartography-elastic-ingestor
33 | image: //components/cartography:cartography_ingestor_image
34 | securityContext:
35 | allowPrivilegeEscalation: false
36 | env:
37 | - name: NEO4J_URI
38 | value: 'bolt://neo4j-bolt-service:7687'
39 | - name: NEO4J_USER
40 | value: 'neo4j'
41 | - name: ELASTIC_URL
42 | value: 'elasticsearch-es-http.elastic-system.svc.cluster.local:9200'
43 | - name: ELASTIC_TLS_ENABLED
44 | value: 'False'
45 | - name: ELASTIC_INDEX
46 | value: 'cartography'
47 | - name: ELASTIC_DRY_RUN
48 | value: 'False'
49 | - name: ELASTIC_INDEX_SPEC
50 | value: '/opt/es-index/es-index.json'
51 | command:
52 | - '/bin/sh'
53 | - '-c'
54 | - |
55 | # Populate env vars from secrets:
56 | # NEO4J_SECRETS_PASSWORD
57 | # ELASTICSEARCH_USER
58 | # ELASTICSEARCH_PASSWORD
59 | export $(grep -v '#.*' /vault/secrets/neo4j-password | xargs)
60 | export ELASTICSEARCH_USER=$(cat /vault/secret/cartography-es-writer/username)
61 | export ELASTICSEARCH_PASSWORD=$(cat /vault/secret/cartography-es-writer/password)
62 |
63 | # Run the ingestor
64 | python3 /app/elastic_ingestor.py
65 | echo "Ingestion run completed"
66 | volumeMounts:
67 | - name: elasticsearch-credentials-volume
68 | mountPath: /vault/secret/cartography-es-writer
69 | readOnly: true
70 | - name: cartography-elastic-configmap-volume
71 | mountPath: /opt/es-index
72 | readOnly: true
73 | volumes:
74 | - name: elasticsearch-credentials-volume
75 | secret:
76 | secretName: elastic-credentials
77 | - name: cartography-elastic-configmap-volume
78 | configMap:
79 | name: cartography-elastic-index-configmap
80 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: cartography
5 | commonLabels:
6 | app: cartography
7 | component: cartography-elastic-ingestor
8 |
9 | resources:
10 | - ingestor-cronjob.yaml
11 |
12 | configMapGenerator:
13 | - name: cartography-elastic-index-configmap
14 | files:
15 | - es-index.json
16 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.5-slim-buster
2 |
3 | # Setup app user
4 | WORKDIR /app/
5 | RUN addgroup --gid 27100 --system app
6 | RUN adduser --shell /bin/false --uid 27100 --ingroup app --system app
7 |
8 | # Install dependencies
9 | COPY /requirements.txt /app/
10 | RUN python -m pip install --upgrade pip
11 | RUN pip3 install --upgrade -r /app/requirements.txt
12 |
13 | # Add libraries
14 | COPY /queries.json /
15 | COPY /neo4j_connector.py /elastic_connector.py /elastic_ingestor.py /app/
16 | RUN chmod +x /app/neo4j_connector.py /app/elastic_connector.py /app/elastic_ingestor.py
17 |
18 | # Entrypoint
19 | RUN chown -R app:app /app/
20 | USER app
21 | CMD ["python3", "/elastic_ingestor.py"]
22 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/docker/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi
2 | elasticsearch
3 | neo4j
4 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/deployment/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/py/elastic_connector.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import logging
4 | from datetime import date
5 | import logging
6 | import certifi
7 | import elasticsearch
8 |
9 | logging.basicConfig()
10 | logger = logging.getLogger("elastic_connector")
11 | logger.setLevel(logging.INFO)
12 |
13 |
14 | class ElasticClient:
15 | """Wrapper around Elasticsearch client"""
16 |
17 | def __init__(self, url, ca, tls_enabled, dry_run=False):
18 | """
19 | url: URL of Elasticsearch cluster to connect to
20 | ca: CA certificate for Elasticsearch
21 | tls_enabled: Whether ES supports TLS
22 | dry_run: if True, it will not push to Elasticsearch
23 | """
24 | if not url:
25 | raise Exception('Elasticsearch URL must be specified')
26 | self.dry_run = dry_run
27 | verify_certs = True if tls_enabled else False
28 | self.es = elasticsearch.Elasticsearch(
29 | [url],
30 | verify_certs=verify_certs,
31 | ca_certs=ca)
32 |
33 | def send(self, index, doc_type, data):
34 | """Send data to Elasticsearch.
35 |
36 | index: Elasticsearch index to send to
37 | doc_type: Elasticsearch document type
38 | data: Data (dictionary) to send to Elasticsearch.
39 | """
40 | if self.dry_run:
41 | logger.info('Dry-run: index=%s doc_type=%s data=%s',
42 | index, doc_type, data)
43 | return
44 | res = self.es.index(index=index, doc_type=doc_type, body=data)
45 | if not (res.get('created') or res.get('result') == 'created'):
46 | raise Exception('Failed to submit to Elasticsearch: created:{} result:{}'.format(
47 | res.get('created'), res.get('result')))
48 |
49 |
50 | class ElasticsearchConsumer(object):
51 | """
52 | Main consumer which abstracts over the Elasticsearch APIs,
53 | which provides 2 functionalities:
54 |
55 | create_index: create an ES index for today's data
56 | send_to_es: push data to the index specified
57 | """
58 | DOC_TYPE = '_doc'
59 |
60 | def __init__(self, url, index, dry_run, elastic_user, elastic_password, tls_enabled):
61 | self._parse_config(url, index, dry_run,
62 | elastic_user,
63 | elastic_password,
64 | tls_enabled)
65 | self._connect()
66 |
67 | def _parse_config(self, url, index, dry_run, elastic_user, elastic_password, tls_enabled):
68 | """
69 | url: The domain name (no protocol, no port) of the Elasticsearch instance to send the results to
70 | index: The Elasticsearch index to use
71 | dry_run: If set, will not upload any data to Elasticsearch
72 | tls_enabled: Whether ES supports TLS
73 | """
74 | self.url = url
75 | self.index = index
76 | self.dry_run = dry_run
77 | self._elatic_user = elastic_user
78 | self._elatic_password = elastic_password
79 | self.tls_enabled = tls_enabled
80 |
81 | # Validate url and index have been specified
82 | if not self.url or not self.index:
83 | raise Exception("Elasticsearch config is incomplete")
84 |
85 | # Append current date to index: indexname-YYYY.MM.DD
86 | self.index = "{}-{}".format(self.index,
87 | date.today().strftime("%Y.%m.%d"))
88 | # dry_run and tls_enabled come from env vars, so they are strings
89 | self.dry_run = True if self.dry_run == 'True' else False
90 | self.tls_enabled = True if self.tls_enabled == 'True' else False
91 |
92 | def _connect(self):
93 | protocol = 'https' if self.tls_enabled else 'http'
94 | self.es_location = "{}://{}:{}@{}".format(
95 | protocol,
96 | self._elatic_user,
97 | self._elatic_password,
98 | self.url
99 | )
100 | self.es_client = ElasticClient(url=self.es_location,
101 | ca=certifi.where(),
102 | tls_enabled=self.tls_enabled,
103 | dry_run=self.dry_run)
104 | logger.info(
105 | 'ElasticSearch Client instantiated: {} / {}'.format(self.url, self.index))
106 |
107 | def create_index(self, data):
108 | logger.info('Creating index for: {}'.format(self.index))
109 | self.es_client.es.indices.create(
110 | index=self.index,
111 | body=data,
112 | ignore=400 # ignore 400 already exists code
113 | )
114 |
115 | def send_to_es(self, query_name, data):
116 | logger.info('Sending data to ES: {}'.format(query_name))
117 | self.es_client.send(index=self.index,
118 | doc_type=self.DOC_TYPE,
119 | data=data)
120 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/py/elastic_ingestor.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import time
4 | import datetime
5 | import logging
6 |
7 | from elastic_connector import ElasticsearchConsumer
8 | from neo4j_connector import Neo4jConnector
9 |
10 | logging.basicConfig()
11 | logger = logging.getLogger("elastic_ingestor")
12 | logger.setLevel(logging.INFO)
13 |
14 |
15 | class Ingestor(object):
16 | def __init__(self):
17 | logger.info("Initialising ingestor")
18 |
19 | # Parse env vars
20 | self.elastic_url = os.environ['ELASTIC_URL']
21 | self.elastic_user = os.environ['ELASTICSEARCH_USER']
22 | self.elastic_password = os.environ['ELASTICSEARCH_PASSWORD']
23 | self.elastic_index_spec = os.environ['ELASTIC_INDEX_SPEC']
24 | self.elastic_dry_run = os.environ['ELASTIC_DRY_RUN']
25 | self.elastic_tls_enabled = os.environ['ELASTIC_TLS_ENABLED']
26 | self.index_standard = os.environ['ELASTIC_INDEX']
27 |
28 | # Compute tag to identify this run
29 | now = datetime.datetime.now()
30 | self.run_tag = now.strftime("%Y-%m-%d %H:%M:%S")
31 |
32 | # Define indexes
33 | self.index_short_term = f"short-term-{self.index_standard}"
34 |
35 | # Instantiate clients
36 | logger.info("Instantiating clients")
37 | self.db = Neo4jConnector()
38 | self._es_init_clients()
39 |
40 | # ===================================================================
41 | # ES INTEGRATION
42 | # ===================================================================
43 | def _es_init_clients(self):
44 | """
45 | Instantiate one ES client for each index to be used:
46 | cartography-
47 | short-term-cartography-
48 | """
49 | self.es_clients = []
50 | for index in [self.index_standard, self.index_short_term]:
51 | c = ElasticsearchConsumer(
52 | self.elastic_url,
53 | index,
54 | self.elastic_dry_run,
55 | self.elastic_user,
56 | self.elastic_password,
57 | self.elastic_tls_enabled
58 | )
59 | self.es_clients.append(c)
60 |
61 | def _es_push_indexes(self, content):
62 | """
63 | For each ES client, create an index for today's ingestion
64 | """
65 | for c in self.es_clients:
66 | c.create_index(content)
67 |
68 | def _es_push_results(self, query_name, records):
69 | """
70 | For each ES client, push the records provided
71 | """
72 | logger.debug(f"Pushing {query_name}: {records}")
73 | for c in self.es_clients:
74 | c.send_to_es(query_name, records)
75 |
76 | # ===================================================================
77 | # RECORD MANIPULATION
78 | # ===================================================================
79 | def _sanitise_fields(self, record):
80 | """
81 | ElasticSearch doesn't like parenthesis in the field names,
82 | so we have to replace them before ingesting the records.
83 | """
84 | sanitised = {}
85 | for k, v in record.items():
86 | new_key = k.replace('(', '_').replace(')', '_')
87 | sanitised[new_key] = v
88 | return sanitised
89 |
90 | def _enrich_results(self, record, query):
91 | """
92 | Enrich results from Neo4j with metadata needed by ES
93 | """
94 | record['metadata.query_name'] = query['name']
95 | record['metadata.query_id'] = '{}_{}'.format(
96 | query['name'], self.run_tag)
97 | record['metadata.query_description'] = query['description']
98 | record['metadata.query_headers'] = query['headers']
99 | record['@timestamp'] = int(round(time.time() * 1000))
100 | return record
101 |
102 | # ===================================================================
103 | # EXPOSED OPERATIONS
104 | # ===================================================================
105 | def push_indexes(self):
106 | with open(self.elastic_index_spec) as fp:
107 | content = fp.read()
108 | self._es_push_indexes(content)
109 |
110 | def query_by_tag(self, tags):
111 | logger.info("Querying Neo4J by tags: {}".format(tags))
112 | return self.db.query_by_tag(tags)
113 |
114 | def push_results(self, queries_results):
115 | logger.info("Pushing query results to ES")
116 | for query in queries_results:
117 | # query = {
118 | # 'name': 'gcp_project_list',
119 | # 'description': 'Full list of GCPProjects',
120 | # 'headers': ['project_id', ...],
121 | # 'result': [ {...}, ]
122 | logger.debug(f"Processing query: {query}")
123 | for r in query['result']:
124 | # Sanitise fields
125 | sanitised = self._sanitise_fields(r)
126 | # Enrich data
127 | enriched = self._enrich_results(sanitised, query)
128 | # Send to elastic
129 | self._es_push_results(query['name'], enriched)
130 |
131 |
132 | def main():
133 | # Instantiate ingestor
134 | ingestor = Ingestor()
135 |
136 | # Define index
137 | logger.info("Pushing Elasticsearch indexes...")
138 | ingestor.push_indexes()
139 |
140 | logger.info("Starting ingesting data from Neo4j...")
141 |
142 | # Queries - AWS
143 | queries_results = ingestor.query_by_tag(['aws'])
144 | ingestor.push_results(queries_results)
145 |
146 | # Queries - GCP
147 | queries_results = ingestor.query_by_tag(['gcp'])
148 | ingestor.push_results(queries_results)
149 |
150 | logger.info("Ingestion completed successfully")
151 |
152 |
153 | if __name__ == '__main__':
154 | main()
155 |
--------------------------------------------------------------------------------
/components/cartography/consumers/elasticsearch/py/neo4j_connector.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import logging
4 | from neo4j import GraphDatabase
5 | from datetime import datetime, timedelta
6 |
7 | logging.basicConfig()
8 | logger = logging.getLogger("neo4j_connector")
9 | logger.setLevel(logging.INFO)
10 |
11 | NEO4J_QUERIES_FILES = [
12 | 'queries.json',
13 | ]
14 |
15 |
16 | class NeoDB(object):
17 | """
18 | Neo4j Wrapper around `neo4j.GraphDatabase`,
19 | which is in charge of instaurating a connection with
20 | the backend Neo4j database.
21 | This should never be instantiated directly.
22 | """
23 |
24 | def __init__(self):
25 | self._parse_config()
26 | self._connect()
27 |
28 | def _parse_config(self):
29 | """
30 | uri: The URI of Neo4j (e.g., bolt://neo4j-bolt-service:7687)
31 | username: Username for Neo4j
32 | password: Password for Neo4j
33 |
34 | If no config has been passed to __init__,
35 | fetch the connection string from environment variables
36 | """
37 | self._neo4j_uri = os.environ['NEO4J_URI']
38 | self._neo4j_user = os.environ['NEO4J_USER']
39 | self._neo4j_password = os.environ['NEO4J_SECRETS_PASSWORD']
40 |
41 | def _connect(self):
42 | """
43 | Instantiate the Neo4j python driver
44 | """
45 | self._driver = GraphDatabase.driver(self._neo4j_uri,
46 | auth=(self._neo4j_user, self._neo4j_password))
47 | logger.info('Neo4J Client instantiated: {}'.format(self._neo4j_uri))
48 |
49 | @staticmethod
50 | def _exec_query(tx, query, kwargs):
51 | if kwargs:
52 | result = tx.run(query, **kwargs)
53 | else:
54 | result = tx.run(query)
55 | values = [record.data() for record in result]
56 | return values
57 |
58 | def query(self, q, kwargs=None):
59 | with self._driver.session() as session:
60 | return session.read_transaction(self._exec_query, q, kwargs)
61 |
62 | def close(self):
63 | self._driver.close()
64 |
65 |
66 | class Neo4jConnector(object):
67 | """
68 | Main connector which abstract over the actual execution of queries,
69 | and provide an interface to run queries and obtain results
70 | """
71 |
72 | def __init__(self):
73 | # Initialize DB
74 | self.db = NeoDB()
75 | # Load the queries file into memory
76 | self._load_queries()
77 |
78 | def _load_queries(self):
79 | extracted = []
80 | for fname in NEO4J_QUERIES_FILES:
81 | path = os.path.join("/", fname)
82 | if not os.path.isfile(path):
83 | logger.warning('File "{}" not found. Skipping...'.format(path))
84 | continue
85 | with open(path, 'r') as fp:
86 | logger.debug('Loading queries file: {}'.format(path))
87 | body = fp.read()
88 | temp = body.strip()[1:-1]
89 | extracted.append(temp)
90 | queries_str = "[%s]" % (",".join(extracted))
91 | self.QUERIES = json.loads(queries_str)
92 | logger.info(f"{len(self.QUERIES)} queries loaded")
93 |
94 | #
95 | # UTILS
96 | #
97 | @staticmethod
98 | def _n_recent_days(N):
99 | return (datetime.utcnow() - timedelta(days=N))
100 |
101 | def _parse_dynamic_params(self, q):
102 | params = q.get('params', '')
103 | kwargs = ""
104 | if params:
105 | # Iterate through the parameters and verify if one matches the supported types
106 | for p in params.keys():
107 | kwargs = {}
108 | # The query has a parameter specifying to
109 | # retrieve the assets for the N most recent days
110 | if p == "n_recent_days":
111 | kwargs[params[p]["param_name"]] = \
112 | str(self._n_recent_days(params[p]["param_value"]))
113 | return kwargs
114 |
115 | #
116 | # FILTERS
117 | #
118 | def _filter_by_tags(self, queries, tags):
119 | """
120 | Returns all the queries which contain *all* the tags provided
121 | (it is an AND)
122 | """
123 | if type(tags) is not list:
124 | tags = list(tags)
125 | return [q for q in queries if all(elem in q['tags'] for elem in tags)]
126 |
127 | def _filter_by_account(self, cypher, account):
128 | if account:
129 | if 'WHERE' in cypher:
130 | cypher = cypher.replace(
131 | ' WHERE ', ' WHERE a.name = "{}" and '.format(account))
132 | else:
133 | cypher = cypher.replace(
134 | ' RETURN ', ' WHERE a.name = "{}" RETURN '.format(account))
135 | return cypher
136 |
137 | #
138 | # EXECUTE QUERIES
139 | #
140 | def query_raw(self, cypher):
141 | logger.info("Executing a raw query: {}".format(cypher))
142 | return self.db.query(cypher)
143 |
144 | def _execute_queries(self, queries, account):
145 | queries_result = []
146 | for q in queries:
147 | # Parse optional dynamic parameters
148 | kwargs = self._parse_dynamic_params(q)
149 | # If an account is provided, inject a WHERE clause to filter by account
150 | cypher = self._filter_by_account(q['query'], account)
151 | # Add return clause
152 | cypher = "{} {}".format(cypher, q['return'])
153 | # Execute the query and parse results as dictionaries
154 | logger.debug(f"Running query: {cypher}")
155 | records = self.db.query(cypher, kwargs)
156 | # Add records to result list
157 | temp = {}
158 | temp['name'] = q['name']
159 | temp['description'] = q['description']
160 | temp['headers'] = q['result_headers']
161 | temp['result'] = records
162 | logger.debug(f"Result: {len(records)} records")
163 | queries_result.append(temp)
164 | return queries_result
165 |
166 | def query_by_tag(self, tags, account=None):
167 | logger.info("Executing queries by tag: {}".format(tags))
168 | # Filter queries
169 | selected_queries = self._filter_by_tags(self.QUERIES, tags)
170 | # Run queries
171 | return self._execute_queries(selected_queries, account)
172 |
173 |
174 | if __name__ == '__main__':
175 | Neo4jConnector()
176 |
--------------------------------------------------------------------------------
/components/cartography/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="cartography"
4 | SELECTOR_NEO4J="app=cartography,component=neo4j"
5 | SELECTOR_JOB="app=cartography,component=cartography"
6 | TARGET=$1
7 | if [[ $# -lt 1 ]] ; then
8 | TARGET="minikube"
9 | fi
10 |
11 | # Create `cartography` namespace
12 | printf "\n[+] Creating ${NAMESPACE} namespace...\n"
13 | plz run //components/cartography:cartography-namespace_push
14 |
15 | # Setup and Deploy Neo4j
16 | plz run //components/cartography:deploy-neo4j ${NAMESPACE} ${TARGET} ${SELECTOR_NEO4J}
17 |
18 | # Setup and Deploy Cartography
19 | plz run //components/cartography:deploy-cartography ${NAMESPACE} ${TARGET} ${SELECTOR_JOB}
20 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: cartography
5 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/base/cartography-job.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1beta1
3 | kind: CronJob
4 | metadata:
5 | name: cartography-run
6 | spec:
7 | schedule: '0 7 * * *' # Run every day at 7am
8 | concurrencyPolicy: Forbid
9 | successfulJobsHistoryLimit: 1
10 | failedJobsHistoryLimit: 1
11 | jobTemplate:
12 | spec:
13 | backoffLimit: 5
14 | template:
15 | metadata:
16 | annotations:
17 | vault.hashicorp.com/agent-inject: 'true'
18 | vault.hashicorp.com/role: 'vault-agent'
19 | #
20 | # NEO4J PASSWORD FROM VAULT
21 | #
22 | vault.hashicorp.com/agent-inject-secret-neo4j-password: 'secret/data/cartography/neo4j-password'
23 | vault.hashicorp.com/agent-inject-template-neo4j-password: |
24 | {{ with secret "secret/data/cartography/neo4j-password" -}}
25 | NEO4J_SECRETS_PASSWORD="{{ .Data.data.NEO4J_SECRETS_PASSWORD }}"
26 | {{- end }}
27 | #
28 | # AWS CREDENTIALS FROM VAULT
29 | #
30 | vault.hashicorp.com/agent-inject-secret-aws-user.txt: '/aws/sts/cartography'
31 | ## END OF CREDENTIALS SETUP
32 | spec:
33 | serviceAccountName: vault-agent
34 | restartPolicy: Never
35 | containers:
36 | - name: cartography-run
37 | image: //components/cartography:cartography_docker
38 | securityContext:
39 | allowPrivilegeEscalation: false
40 | env:
41 | - name: NEO4J_URI
42 | value: 'bolt://neo4j-bolt-service:7687'
43 | - name: NEO4J_USER
44 | value: 'neo4j'
45 | - name: AWS_DEFAULT_REGION
46 | value: 'eu-west-1'
47 | - name: AWS_DEFAULT_OUTPUT
48 | value: 'json'
49 | - name: AWS_DEFAULT_PROFILE
50 | value: 'default'
51 | # - name: GOOGLE_APPLICATION_CREDENTIALS
52 | # value: /vault/secrets/security-reviewer-sa.json
53 | command:
54 | - '/bin/bash'
55 | - '-c'
56 | - |
57 | # Populate env vars from secrets: NEO4J_SECRETS_PASSWORD
58 | export $(grep -v '#.*' /vault/secrets/neo4j-password | xargs)
59 |
60 | # Retrieve credentials for the security audit user,
61 | # and setup config for the Spoke (child) accounts
62 | /docker-entrypoint.sh
63 |
64 | # Run Cartography
65 | cartography --neo4j-uri ${NEO4J_URI} --neo4j-user ${NEO4J_USER} --neo4j-password-env-var NEO4J_SECRETS_PASSWORD --aws-sync-all-profiles
66 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: cartography
5 | commonLabels:
6 | app: cartography
7 | component: cartography
8 | resources:
9 | - cartography-job.yaml
10 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7
2 |
3 | WORKDIR /src
4 |
5 | # Setup app user
6 | RUN addgroup --gid 27100 --system app
7 | RUN adduser --shell /bin/false --uid 27100 --ingroup app --system app
8 | RUN chown -R app:app /src
9 |
10 | # Install dependencies
11 | COPY requirements.txt /src
12 | RUN python -m pip install --upgrade pip
13 | RUN pip3 install --upgrade -r /src/requirements.txt
14 |
15 | # Add entrypoint
16 | COPY docker-entrypoint.sh /
17 | RUN chmod +x /docker-entrypoint.sh
18 |
19 | USER app
20 | ENTRYPOINT ["/docker-entrypoint.sh"]
21 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/docker/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mkdir -p ~/.aws/
4 |
5 | # Retrieve credentials for the security-audit-user
6 | aws_access_key_id=$(grep access_key /vault/secrets/aws-user.txt | awk '{print $2}')
7 | aws_secret_access_key=$(grep secret_key /vault/secrets/aws-user.txt | awk '{print $2}')
8 | aws_session_token=$(grep security_token /vault/secrets/aws-user.txt | awk '{print $2}')
9 |
10 | # Populate the ~/.aws/credentials file
11 | # [default]
12 | # aws_access_key_id = X
13 | # aws_secret_access_key = X
14 | # aws_session_token = x
15 | cat <> ~/.aws/credentials
16 | [${AWS_DEFAULT_PROFILE}]
17 | aws_access_key_id=$aws_access_key_id
18 | aws_secret_access_key=$aws_secret_access_key
19 | aws_session_token=$aws_session_token
20 | EOF
21 |
22 | # Populate the ~/.aws/config file
23 | # # SETUP CONFIG
24 | # [default]
25 | # region=eu-west-1
26 | # output=json
27 | #
28 | # # SETUP SPOKE ACCOUNTS
29 | # [profile ]
30 | # role_arn = arn:aws:iam:::role/role_security_audit
31 | # region=eu-west-1
32 | # output=json
33 | # source_profile=default
34 | cat <> ~/.aws/config
35 | [${AWS_DEFAULT_PROFILE}]
36 | region=${AWS_DEFAULT_REGION}
37 | output=json
38 | retry_mode=standard
39 | max_attempts=6
40 | EOF
41 |
42 | # Fetch accounts in the Org
43 | accounts=$(aws organizations list-accounts --query 'Accounts[?Status==`ACTIVE`]'.Id --output text)
44 |
45 | for account_id in $accounts; do
46 | cat <> ~/.aws/config
47 | [profile ${account_id}]
48 | role_arn = arn:aws:iam::${account_id}:role/role_security_audit
49 | region=${AWS_DEFAULT_REGION}
50 | output=json
51 | source_profile=${AWS_DEFAULT_PROFILE}
52 | retry_mode=standard
53 | max_attempts=6
54 | EOF
55 |
56 | done
57 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/docker/requirements.txt:
--------------------------------------------------------------------------------
1 | awscli
2 | cartography==0.42.1
3 | python-dateutil==2.8.0
4 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/cartography/deployment/cartography/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: cartography
5 |
6 | commonLabels:
7 | app: cartography
8 | component: neo4j
9 |
10 | resources:
11 | - vault-agent-sa.yaml
12 | - neo4j-service.yaml
13 | - neo4j-statefulset.yaml
14 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/base/neo4j-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | labels:
6 | service: http
7 | name: neo4j-service
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - name: https
12 | port: 7473
13 | targetPort: 7473
14 | - name: http
15 | port: 7474
16 | targetPort: 7474
17 | - name: discovery
18 | port: 5000
19 | targetPort: 5000
20 | - name: raft
21 | port: 7000
22 | targetPort: 7000
23 | - name: tx
24 | port: 6000
25 | targetPort: 6000
26 |
27 | ---
28 | apiVersion: v1
29 | kind: Service
30 | metadata:
31 | labels:
32 | service: bolt
33 | name: neo4j-bolt-service
34 | spec:
35 | type: LoadBalancer
36 | ports:
37 | - name: bolt
38 | port: 7687
39 | targetPort: 7687
40 | protocol: TCP
41 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/base/neo4j-statefulset.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: 'apps/v1'
3 | kind: StatefulSet
4 | metadata:
5 | name: neo4j-statefulset
6 | spec:
7 | serviceName: neo4j-statefulset
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: cartography
12 | template:
13 | metadata:
14 | annotations:
15 | vault.hashicorp.com/agent-inject: 'true'
16 | vault.hashicorp.com/role: 'vault-agent'
17 | # NEO4J PASSWORD FROM VAULT
18 | vault.hashicorp.com/agent-inject-secret-neo4j-password: 'secret/data/cartography/neo4j-password'
19 | vault.hashicorp.com/agent-inject-template-neo4j-password: |
20 | {{ with secret "secret/data/cartography/neo4j-password" -}}
21 | NEO4J_SECRETS_PASSWORD="{{ .Data.data.NEO4J_SECRETS_PASSWORD }}"
22 | {{- end }}
23 | ## END OF NEO4J PASSWORD SETUP
24 | spec:
25 | serviceAccountName: vault-agent
26 | containers:
27 | - name: neo4j-statefulset
28 | image: //third_party/docker:neo4j
29 | securityContext:
30 | allowPrivilegeEscalation: false
31 | env:
32 | - name: NEO4J_ACCEPT_LICENSE_AGREEMENT
33 | value: 'true'
34 | - name: NUMBER_OF_CORES
35 | value: '1'
36 | - name: AUTH_ENABLED
37 | value: 'true'
38 | - name: NEO_HOSTNAME
39 | value: 'neo4j-bolt-service'
40 | - name: SSL_FOLDER
41 | value: '/var/lib/neo4j/ssl'
42 | command:
43 | - '/bin/bash'
44 | - '-c'
45 | - |
46 | # Populate env vars from secrets
47 | # - NEO4J_SECRETS_PASSWORD
48 | export $(grep -v '#.*' /vault/secrets/neo4j-password | xargs)
49 |
50 | # Setup folders
51 | mkdir -p ${SSL_FOLDER}/client_policy/trusted
52 | mkdir -p ${SSL_FOLDER}/client_policy/revoked
53 | chmod -R a+rw ${SSL_FOLDER}
54 |
55 | # Setup Neo4j config
56 | # - Prefix with NEO4J_.
57 | # - Underscores must be written twice: _ is written as __.
58 | # - Periods are converted to underscores: . is written as _.
59 | #
60 | # TODO: re-enable password auth
61 | #
62 | # export NEO4J_dbms_security_auth__enabled=true
63 | # export NEO4J_AUTH="neo4j/${NEO4J_SECRETS_PASSWORD}"
64 |
65 | #
66 | # Global Configs
67 | #
68 | export NEO4J_dbms_connector_bolt_listen__address=0.0.0.0:7687
69 | export NEO4J_dbms_connectors_default__advertised__address=${NEO_HOSTNAME}
70 |
71 | #
72 | # Setup SSL - Folders
73 | #
74 | export NEO4J_dbms_ssl_policy_client__policy_base__directory=${SSL_FOLDER}/client_policy
75 | export NEO4J_dbms_ssl_policy_client__policy_trusted__dir=${SSL_FOLDER}/client_policy/trusted
76 | export NEO4J_dbms_ssl_policy_client__policy_revoked__dir=${SSL_FOLDER}/client_policy/revoked
77 |
78 | #
79 | # Setup SSL - Certificates
80 | #
81 | export NEO4J_dbms_ssl_policy_client__policy_private__key=/etc/certs/server_key.pem
82 | export NEO4J_dbms_ssl_policy_client__policy_public__certificate=/etc/certs/server_chain.pem
83 | cp /etc/certs/server_chain.pem ${SSL_FOLDER}/client_policy/trusted/
84 |
85 | #
86 | # Setup SSL - Enable policy
87 | #
88 | export NEO4J_https_ssl__policy=client_policy
89 | export NEO4J_bolt_ssl__policy=client_policy
90 | export NEO4J_dbms_ssl_policy_client__policy_client__auth=NONE
91 | export NEO4J_dbms_connector_bolt_tls__level=OPTIONAL
92 |
93 | #
94 | # Performance
95 | #
96 | export NEO4J_dbms_memory_heap_initial__size=4G
97 | export NEO4J_dbms_memory_heap_max__size=4G
98 |
99 | #
100 | # Run Neo4j
101 | #
102 | echo "Starting Neo4j...."
103 | exec /docker-entrypoint.sh "neo4j"
104 | ports:
105 | - name: discovery
106 | containerPort: 5000
107 | - name: raft
108 | containerPort: 7000
109 | - name: tx
110 | containerPort: 6000
111 | - name: http
112 | containerPort: 7474
113 | - name: https
114 | containerPort: 7473
115 | - name: bolt
116 | containerPort: 7687
117 | resources:
118 | requests:
119 | memory: 2Gi
120 | cpu: '0.1'
121 | limits:
122 | memory: 5Gi
123 | cpu: '0.2'
124 | volumeMounts:
125 | - name: datadir
126 | mountPath: '/data'
127 | - name: neo4j-certs-volume
128 | mountPath: /etc/certs/
129 | readOnly: true
130 | volumes:
131 | - name: neo4j-certs-volume
132 | secret:
133 | secretName: neo4j-bolt-tls
134 | items:
135 | - key: tls.key
136 | path: server_key.pem
137 | - key: tls.crt
138 | path: server_chain.pem
139 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/base/vault-agent-sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: vault-agent
5 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
7 | resources:
8 | - neo4j-pv.yaml
9 | - neo4j-ingress.yaml
10 |
11 | patchesStrategicMerge:
12 | - neo4j-statefulset.yaml
13 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/baremetal/neo4j-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: neo4j-ingress
6 | annotations:
7 | kubernetes.io/ingress.class: haproxy
8 | labels:
9 | certmanager.k8s.io/provider: cluster
10 | cert-manager.io/provider: cluster
11 | spec:
12 | rules:
13 | # Replace with IP of host
14 | - host: neo4j.192.168.1.151.nip.io
15 | http:
16 | paths:
17 | - path: /
18 | pathType: Prefix
19 | backend:
20 | service:
21 | name: neo4j-service
22 | port:
23 | number: 7474
24 | tls:
25 | - hosts:
26 | - neo4j.192.168.1.151.nip.io
27 | secretName: neo4j-web-tls
28 |
29 | ---
30 | apiVersion: networking.k8s.io/v1
31 | kind: Ingress
32 | metadata:
33 | name: neo4j-ingress-bolt
34 | annotations:
35 | kubernetes.io/ingress.class: haproxy
36 | spec:
37 | rules:
38 | # Replace with IP of host
39 | - host: bolt.192.168.1.151.nip.io
40 | http:
41 | paths:
42 | - path: /
43 | pathType: Prefix
44 | backend:
45 | service:
46 | name: neo4j-bolt-service
47 | port:
48 | number: 7687
49 | tls:
50 | - hosts:
51 | - bolt.192.168.1.151.nip.io
52 | secretName: neo4j-web-tls
53 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/baremetal/neo4j-pv.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: StorageClass
3 | apiVersion: storage.k8s.io/v1
4 | metadata:
5 | name: neo4j-storage
6 | provisioner: kubernetes.io/no-provisioner
7 | volumeBindingMode: WaitForFirstConsumer
8 |
9 | ---
10 | apiVersion: v1
11 | kind: PersistentVolume
12 | metadata:
13 | name: neo4j-pv-volume
14 | labels:
15 | type: local
16 | spec:
17 | storageClassName: neo4j-storage
18 | capacity:
19 | storage: 1Gi
20 | accessModes:
21 | - ReadWriteOnce
22 | hostPath:
23 | path: '/etc/plz-k8s-lab/cartography/neo4j/'
24 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/baremetal/neo4j-statefulset.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: 'apps/v1'
3 | kind: StatefulSet
4 | metadata:
5 | name: neo4j-statefulset
6 | spec:
7 | volumeClaimTemplates:
8 | - metadata:
9 | name: datadir
10 | annotations:
11 | spec:
12 | accessModes:
13 | - ReadWriteOnce
14 | storageClassName: neo4j-storage
15 | resources:
16 | requests:
17 | storage: '1Gi'
18 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
7 | patchesStrategicMerge:
8 | - neo4j-statefulset.yaml
9 |
--------------------------------------------------------------------------------
/components/cartography/deployment/neo4j/overlays/minikube/neo4j-statefulset.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: 'apps/v1'
3 | kind: StatefulSet
4 | metadata:
5 | name: neo4j-statefulset
6 | spec:
7 | template:
8 | spec:
9 | volumes:
10 | - name: datadir
11 | emptyDir: {}
12 |
--------------------------------------------------------------------------------
/components/cartography/forward-ui.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="cartography"
4 | SELECTOR="app=cartography,component=neo4j,service=http"
5 |
6 | SERVICE_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} svc ${SELECTOR})
7 |
8 | printf "[+] Forwarding Neo4J UI to http://127.0.0.1:7474\n"
9 | kubectl -n ${NAMESPACE} port-forward svc/${SERVICE_NAME} 7474:7474 7473:7473 7687:7687
10 |
--------------------------------------------------------------------------------
/components/cartography/setup/cartography.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | NAMESPACE=$1
4 | TARGET=$2
5 | SELECTOR=$3
6 |
7 | # Vault References
8 | VAULT_NAMESPACE="vault"
9 | VAULT_SELECTOR="app.kubernetes.io/name=vault,component=server"
10 | VAULT_KEY_FILE="cluster-keys-${TARGET}.json"
11 | VAULT_POD_NAME=$(plz run //common:get_resource_from_selector ${VAULT_NAMESPACE} pod ${VAULT_SELECTOR})
12 |
13 | #
14 | # AWS References:
15 | # - Request user to provide access key, secret key, and Account ID of the Hub
16 | #
17 | ROLE_ASSUME="role_security_assume"
18 | printf "\n[+] Please provide ACCESS KEY for IAM user: "
19 | read ACCESS_KEY
20 | printf "\n[+] Please provide SECRET KEY for IAM user: "
21 | read SECRET_KEY
22 | printf "\n[+] Please provide Account ID of the Hub: "
23 | read HUB_ID
24 |
25 | #
26 | # Setup Vault:
27 | # - Enable the AWS secrets engine
28 | # - Persist the credentials that Vault will use to communicate with AWS
29 | # - Configure a Vault role that maps to a set of permissions in AWS
30 | #
31 | printf "[+] Enable the AWS secrets engine...\n"
32 | kubectl -n ${VAULT_NAMESPACE} exec ${VAULT_POD_NAME} -- vault secrets enable aws
33 | printf "[+] Configure the credentials that Vault will us to communicate with AWS...\n"
34 | kubectl -n ${VAULT_NAMESPACE} exec ${VAULT_POD_NAME} -- vault write aws/config/root \
35 | access_key=${ACCESS_KEY} \
36 | secret_key=${SECRET_KEY} \
37 | region=eu-west-1
38 | printf "[+] Configure Vault role...\n"
39 | kubectl -n ${VAULT_NAMESPACE} exec ${VAULT_POD_NAME} -- vault write aws/roles/cartography \
40 | role_arns=arn:aws:iam::${HUB_ID}:role/${ROLE_ASSUME} \
41 | credential_type=assumed_role \
42 | default_sts_ttl=21600 \
43 | max_sts_ttl=21600
44 |
45 | #
46 | # Deploy Cartography:
47 | # - Deploy the Cartography CronJob
48 | #
49 | printf "\n[+] Deploying Cartography on ${TARGET}...\n"
50 | if [[ $TARGET == "baremetal" ]]
51 | then
52 | plz run //components/cartography:cartography-baremetal_push
53 | else
54 | plz run //components/cartography:cartography-minikube_push
55 | fi
56 |
--------------------------------------------------------------------------------
/components/cartography/setup/elastic-ingestor.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | NAMESPACE="cartography"
4 | TARGET=$1
5 | if [[ $# -lt 1 ]] ; then
6 | TARGET="minikube"
7 | fi
8 |
9 | #
10 | # Setup Elastic credentials
11 | #
12 | printf "\n[+] Setting up Elastic credentials on ${TARGET}...\n"
13 | NAMESPACE_ELK="elastic-system"
14 | ELASTIC_USER="elastic"
15 | ELASTIC_PASSWORD=$(kubectl -n ${NAMESPACE_ELK} get secrets elasticsearch-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode)
16 |
17 | kubectl -n ${NAMESPACE} create secret generic elastic-credentials \
18 | --from-literal=username=${ELASTIC_USER} \
19 | --from-literal=password=${ELASTIC_PASSWORD}
20 |
21 | #
22 | # Deploy Elastic Ingestor:
23 | # - Deploy the Cartography CronJob
24 | #
25 | printf "\n[+] Deploying Elastic Ingestor on ${TARGET}...\n"
26 | if [[ $TARGET == "baremetal" ]]
27 | then
28 | plz run //components/cartography:elastic-ingestor-baremetal_push
29 | else
30 | plz run //components/cartography:elastic-ingestor-minikube_push
31 | fi
32 |
--------------------------------------------------------------------------------
/components/cartography/setup/neo4j.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | NAMESPACE=$1
4 | TARGET=$2
5 | SELECTOR=$3
6 |
7 | # Vault References
8 | VAULT_NAMESPACE="vault"
9 | VAULT_SELECTOR="app.kubernetes.io/name=vault,component=server"
10 | VAULT_POD_NAME=$(plz run //common:get_resource_from_selector ${VAULT_NAMESPACE} pod ${VAULT_SELECTOR})
11 |
12 | # Neo4J References
13 | NEO4J_BAREMETAL_HOST="neo4j.192.168.1.151.nip.io"
14 | NEO4J_PASSWORD_LOCATION="secret/cartography/neo4j-password"
15 | NEO4J_PASSWORD=$(openssl rand -base64 32)
16 |
17 | # Neo4j Password
18 | printf "\n[+] Generating Neo4j Password and persisting it into Vault at: ${NEO4J_PASSWORD_LOCATION}\n"
19 | # With -cas=0 a write will only be allowed if the key doesn't exist
20 | kubectl -n ${VAULT_NAMESPACE} exec ${VAULT_POD_NAME} -it -- vault kv put -cas=0 ${NEO4J_PASSWORD_LOCATION} NEO4J_SECRETS_PASSWORD=${NEO4J_PASSWORD}
21 |
22 | # TLS Certificates
23 | printf "\n[+] Generating TLS Certificates...\n"
24 | if [[ $TARGET == "baremetal" ]]
25 | then
26 | NEO4J_HOST=$NEO4J_BAREMETAL_HOST
27 | else
28 | NEO4J_HOST="neo4j-service"
29 | fi
30 | openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/neo4j-tls.key -out /tmp/neo4j-tls.crt -subj "/CN=${NEO4J_HOST}"
31 | kubectl -n ${NAMESPACE} create secret generic neo4j-bolt-tls --from-file=tls.crt=/tmp/neo4j-tls.crt --from-file=tls.key=/tmp/neo4j-tls.key
32 |
33 | # Deploy Neo4j
34 | # - Create Vault Agent service account
35 | # - Create StorageClass and PersistentVolume, and Ingress (baremetal only)
36 | # - Deploy the Neo4j StatefulSet, Service
37 | printf "\n[+] Deploying Neo4j on ${TARGET}...\n"
38 | if [[ $TARGET == "baremetal" ]]
39 | then
40 | plz run //components/cartography:neo4j-baremetal_push
41 | else
42 | plz run //components/cartography:neo4j-minikube_push
43 | fi
44 | plz run //common:wait_pod -- ${NAMESPACE} "Neo4j" ${SELECTOR}
45 |
--------------------------------------------------------------------------------
/components/elk/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 | subinclude("//build_defs:kustomize")
5 |
6 | sh_binary(
7 | name = "deploy",
8 | main = "deploy.sh",
9 | )
10 |
11 | sh_binary(
12 | name = "ui",
13 | main = "forward-ui.sh",
14 | )
15 |
16 | k8s_config(
17 | name = "elk-namespace",
18 | srcs = ["deployment/elk-namespace.yaml"],
19 | )
20 |
21 | # ==============================================================================
22 | # OPERATOR
23 | # ==============================================================================
24 | k8s_config(
25 | name = "eck-crds",
26 | srcs = ["//third_party:eck-crds"],
27 | visibility = ["//components/elk/..."]
28 | )
29 |
30 | k8s_config(
31 | name = "eck-operator",
32 | srcs = ["//third_party:eck-operator"],
33 | visibility = ["//components/elk/..."]
34 | )
35 |
36 | # ==============================================================================
37 | # ELK
38 | # ==============================================================================
39 | kustomize_prep(
40 | name = "kustomize-elk",
41 | srcs = glob(["deployment/**/*.yaml"]),
42 | )
43 |
44 | kustomize(
45 | name = "elk-baremetal",
46 | namespace = "elastic-system",
47 | kustomize_path = "deployment/",
48 | overlay = "baremetal",
49 | srcs = [":kustomize-elk"],
50 | )
51 |
52 | kustomize(
53 | name = "elk-minikube",
54 | namespace = "elastic-system",
55 | kustomize_path = "deployment/",
56 | overlay = "minikube",
57 | srcs = [":kustomize-elk"],
58 | )
59 |
--------------------------------------------------------------------------------
/components/elk/README.md:
--------------------------------------------------------------------------------
1 | # ELK Setup
2 |
3 | ## Deploy ELK
4 | ```bash
5 | ❯ plz run //components/elk:deploy [minikube|baremetal]
6 | ```
7 | * Creates `elastic-system` namespace
8 | * Deploys the Elastic Operator
9 | * Deploys an Elasticsearch cluster
10 | * Deploys a Kibana instance
11 |
12 | Verify pods are healthy:
13 | ```bash
14 | ❯ kubectl -n elastic-system get pods
15 | NAME READY STATUS RESTARTS AGE
16 | elastic-operator-0 1/1 Running 1 106m
17 | elasticsearch-es-default-0 0/1 Running 1 105m
18 | kibana-kb-84645887d8-9fcsf 0/1 Running 1 105m
19 | ```
20 |
21 | 📝 **NOTE FOR BAREMETAL**: before deploying, make sure to prepare
22 | the data folder on the host (and to remove the same folder to reset the installation):
23 | ```bash
24 | ❯ sudo mkdir -p /etc/plz-k8s-lab/elastic/data/
25 | ❯ sudo chmod -R a+rw /etc/plz-k8s-lab/elastic/
26 | ```
27 |
28 |
29 | ---
30 |
31 |
32 | ## Access Elasticsearch and Kibana
33 |
34 | ### Via Port-Forward
35 | * Forwards the Kibana UI to http://127.0.0.1:5601
36 | ```bash
37 | ❯ plz run //components/elk:ui
38 | ```
39 |
40 | 
41 |
42 | * Forwards the Elasticsearch Service:
43 | * From inside the Kubernetes cluster: `curl -u '${ELASTIC_USER}:${ELASTIC_PASSWORD}' -k 'http://elasticsearch-es-http:9200'`
44 | * From your local workstation: `curl -u '${ELASTIC_USER}:${ELASTIC_PASSWORD}' -k 'http://localhost:9200'`
45 |
46 | ```bash
47 | ❯ curl -u 'elastic:' -k 'http://localhost:9200'
48 | {
49 | "name" : "elasticsearch-es-default-0",
50 | "cluster_name" : "elasticsearch",
51 | "cluster_uuid" : "WLFbN05xRKqYYA0H6RN_mg",
52 | "version" : {
53 | "number" : "7.8.0",
54 | "build_flavor" : "default",
55 | "build_type" : "docker",
56 | "build_hash" : "757314695644ea9a1dc2fecd26d1a43856725e65",
57 | "build_date" : "2020-06-14T19:35:50.234439Z",
58 | "build_snapshot" : false,
59 | "lucene_version" : "8.5.1",
60 | "minimum_wire_compatibility_version" : "6.8.0",
61 | "minimum_index_compatibility_version" : "6.0.0-beta1"
62 | },
63 | "tagline" : "You Know, for Search"
64 | }
65 | ```
66 |
67 | ### Via Ingress on Baremetal
68 | * Verify the Ingresses have been deployed:
69 | ```bash
70 | ❯ kubectl -n elastic-system get ingress
71 | NAME CLASS HOSTS ADDRESS PORTS AGE
72 | kibana-ingress kibana.192.168.1.151.nip.io 80 8m29s
73 | ```
74 |
75 | 📝 **NOTE**: before deploying, make sure to replace the host IP address in: `//components/elk/deployment/overlays/baremetal/kibana-ui-ingress.yaml`
76 |
77 | This assumes you followed the setup described at "[Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)".
78 |
79 |
80 | ## References
81 | * [Getting started with Elastic Cloud on Kubernetes: Deployment](https://www.elastic.co/blog/getting-started-with-elastic-cloud-on-kubernetes-deployment)
82 | * [Deploy ECK in your Kubernetes cluster](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-deploy-eck.html)
83 | * [Run Filebeat on Kubernetes](https://www.elastic.co/guide/en/beats/filebeat/master/running-on-kubernetes.html)
84 | * [How To Set Up an Elasticsearch, Fluentd and Kibana (EFK) Logging Stack on Kubernetes](https://www.digitalocean.com/community/tutorials/how-to-set-up-an-elasticsearch-fluentd-and-kibana-efk-logging-stack-on-kubernetes)
85 |
--------------------------------------------------------------------------------
/components/elk/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="elastic-system"
4 | SELECTOR="control-plane=elastic-operator"
5 | TARGET=$1
6 | if [[ $# -lt 1 ]] ; then
7 | TARGET="minikube"
8 | fi
9 |
10 | # Create `elastic` namespace
11 | printf "\n[+] Creating ${NAMESPACE} namespace...\n"
12 | plz run //components/elk:elk-namespace_push
13 |
14 | #
15 | # Deploying Elastic Operator
16 | #
17 | printf "[+] Deploying Elastic Operator...\n"
18 | plz run //components/elk:eck-crds_push
19 | plz run //components/elk:eck-operator_push
20 | plz run //common:wait_pod -- ${NAMESPACE} "Elastic Operator" ${SELECTOR}
21 |
22 | #
23 | # Deploying ELK:
24 | # - Elasticsearch cluster
25 | # - Kibana instance
26 | #
27 | printf "\n[+] Deploying ELK on ${TARGET}...\n"
28 | if [[ $TARGET == "baremetal" ]]
29 | then
30 | plz run //components/elk:elk-baremetal_push
31 | else
32 | plz run //components/elk:elk-minikube_push
33 | fi
34 |
--------------------------------------------------------------------------------
/components/elk/deployment/base/elasticsearch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: elasticsearch.k8s.elastic.co/v1
2 | kind: Elasticsearch
3 | metadata:
4 | name: elasticsearch
5 | spec:
6 | version: 7.14.0
7 | nodeSets:
8 | - name: default
9 | count: 1
10 | http:
11 | tls:
12 | selfSignedCertificate:
13 | disabled: true
14 |
--------------------------------------------------------------------------------
/components/elk/deployment/base/kibana.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kibana.k8s.elastic.co/v1
2 | kind: Kibana
3 | metadata:
4 | name: kibana
5 | spec:
6 | version: 7.14.0
7 | count: 1
8 | elasticsearchRef:
9 | name: elasticsearch
10 | http:
11 | tls:
12 | selfSignedCertificate:
13 | disabled: true
14 |
--------------------------------------------------------------------------------
/components/elk/deployment/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: elastic-system
5 |
6 | resources:
7 | - elasticsearch.yaml
8 | - kibana.yaml
9 |
--------------------------------------------------------------------------------
/components/elk/deployment/elk-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: elastic-system
5 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/baremetal/elastic-pv.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: StorageClass
3 | apiVersion: storage.k8s.io/v1
4 | metadata:
5 | name: elastic-storage
6 | provisioner: kubernetes.io/no-provisioner
7 | volumeBindingMode: WaitForFirstConsumer
8 |
9 | ---
10 | apiVersion: v1
11 | kind: PersistentVolume
12 | metadata:
13 | name: elastic-pv-volume
14 | labels:
15 | type: local
16 | spec:
17 | storageClassName: elastic-storage
18 | capacity:
19 | storage: 1Gi
20 | accessModes:
21 | - ReadWriteOnce
22 | hostPath:
23 | path: '/etc/plz-k8s-lab/elastic/data/'
24 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/baremetal/elasticsearch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: elasticsearch.k8s.elastic.co/v1
2 | kind: Elasticsearch
3 | metadata:
4 | name: elasticsearch
5 | spec:
6 | nodeSets:
7 | - name: default
8 | count: 1
9 | config:
10 | node.master: true
11 | node.data: true
12 | node.ingest: true
13 | node.store.allow_mmap: false
14 | podTemplate:
15 | spec:
16 | initContainers:
17 | - name: sysctl
18 | securityContext:
19 | privileged: true
20 | command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
21 | containers:
22 | - name: elasticsearch
23 | securityContext:
24 | # Needed by Elasticsearch for chroot
25 | privileged: true
26 | runAsUser: 0
27 | volumeMounts:
28 | - name: elasticsearch-data
29 | mountPath: '/usr/share/elasticsearch/data'
30 | volumeClaimTemplates:
31 | - metadata:
32 | name: elasticsearch-data
33 | spec:
34 | accessModes:
35 | - ReadWriteOnce
36 | resources:
37 | requests:
38 | storage: 1Gi
39 | storageClassName: elastic-storage
40 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/baremetal/kibana-ui-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: kibana-ingress
6 | annotations:
7 | kubernetes.io/ingress.class: haproxy
8 | spec:
9 | rules:
10 | # Replace with IP of host
11 | - host: kibana.192.168.1.151.nip.io
12 | http:
13 | paths:
14 | - path: /
15 | pathType: Prefix
16 | backend:
17 | service:
18 | name: kibana-kb-http
19 | port:
20 | number: 5601
21 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
7 | resources:
8 | - elastic-pv.yaml
9 | - kibana-ui-ingress.yaml
10 |
11 | patchesStrategicMerge:
12 | - elasticsearch.yaml
13 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/minikube/elasticsearch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: elasticsearch.k8s.elastic.co/v1
2 | kind: Elasticsearch
3 | metadata:
4 | name: elasticsearch
5 | spec:
6 | nodeSets:
7 | - name: default
8 | count: 1
9 | config:
10 | node.master: true
11 | node.data: true
12 | node.ingest: true
13 | node.store.allow_mmap: false
14 | podTemplate:
15 | spec:
16 | containers:
17 | - name: elasticsearch
18 | securityContext:
19 | # Needed by Elasticsearch for chroot
20 | privileged: true
21 | runAsUser: 0
22 | volumeMounts:
23 | - name: elasticsearch-data
24 | mountPath: '/usr/share/elasticsearch/data'
25 | volumes:
26 | - name: elasticsearch-data
27 | emptyDir: {}
28 |
--------------------------------------------------------------------------------
/components/elk/deployment/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
7 | patchesStrategicMerge:
8 | - elasticsearch.yaml
9 |
--------------------------------------------------------------------------------
/components/elk/forward-ui.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="elastic-system"
4 | ELASTIC_USER="elastic"
5 | ELASTIC_PASSWORD=$(kubectl -n ${NAMESPACE} get secrets elasticsearch-es-elastic-user -o=jsonpath='{.data.elastic}' | base64 --decode)
6 |
7 | #
8 | # Elasticsearch Endpoint
9 | #
10 | printf "[+] Forwarding Elasticsearch Service to http://127.0.0.1:9200\n"
11 | printf "\t[*] From inside the Kubernetes cluster: curl -u '${ELASTIC_USER}:${ELASTIC_PASSWORD}' -k 'http://elasticsearch-es-http:9200'\n"
12 | printf "\t[*] From your local workstation: curl -u '${ELASTIC_USER}:${ELASTIC_PASSWORD}' -k 'http://localhost:9200'\n"
13 | kubectl -n ${NAMESPACE} port-forward svc/elasticsearch-es-http 9200 &
14 |
15 | #
16 | # Kibana UI
17 | #
18 | printf "[+] Forwarding Kibana UI to http://127.0.0.1:5601\n"
19 | printf "\t[*] Username: ${ELASTIC_USER}\n"
20 | printf "\t[*] Password: ${ELASTIC_PASSWORD}\n"
21 | kubectl -n ${NAMESPACE} port-forward svc/kibana-kb-http 5601 &
22 |
--------------------------------------------------------------------------------
/components/kafka/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 | subinclude("//build_defs:k8s_namespaced")
5 |
6 | sh_binary(
7 | name = "deploy",
8 | main = "deploy.sh",
9 | )
10 |
11 | # ==============================================================================
12 | # Deploy
13 | # ==============================================================================
14 | k8s_config(
15 | name = "kafka-namespace",
16 | srcs = ["k8s/kafka-namespace.yaml"],
17 | )
18 |
19 | genrule(
20 | # Needed to replace the placeholder namespace with ours
21 | name = "pull-operator",
22 | srcs = ["//third_party:kafka-operator"],
23 | outs = ["kafka-operator.yaml"],
24 | cmd = "sed 's/namespace: .*/namespace: kafka/' $SRCS > $OUT",
25 | )
26 |
27 | k8s_config_namespaced(
28 | name = "kafka-operator",
29 | srcs = [":pull-operator"],
30 | namespace = "kafka",
31 | visibility = ["//components/kafka/..."]
32 | )
33 |
34 | k8s_config(
35 | name = "kafka-cluster",
36 | srcs = ["k8s/kafka-cluster.yaml"],
37 | visibility = ["//components/kafka/..."]
38 | )
39 |
40 | # ==============================================================================
41 | # Interact
42 | # ==============================================================================
43 | sh_binary(
44 | name = "list-topics",
45 | main = "scripts/list-topics.sh",
46 | )
47 |
48 | sh_binary(
49 | name = "consume-topic",
50 | main = "scripts/consume-topic.sh",
51 | )
52 |
53 | sh_binary(
54 | name = "produce-topic",
55 | main = "scripts/produce-topic.sh",
56 | )
57 |
--------------------------------------------------------------------------------
/components/kafka/README.md:
--------------------------------------------------------------------------------
1 | # Kafka Setup
2 |
3 |
4 | ## Deploy Kafka/Zookeeper
5 | ```bash
6 | ❯ plz run //components/kafka:deploy
7 | ```
8 | * Creates `kafka` namespace
9 | * Deploys the Kafka Operator
10 | * Deploys the Kafka cluster (Kafka, Zookeeper, KafkaExporter, Entity Operator)
11 |
12 | Verify pods are healthy:
13 | ```bash
14 | ❯ kubectl -n kafka get pods
15 | NAME READY STATUS RESTARTS AGE
16 | kafka-cluster-entity-operator-77b9f56dd-625m7 3/3 Running 0 22s
17 | kafka-cluster-kafka-0 2/2 Running 0 47s
18 | kafka-cluster-kafka-exporter-795f5ccb5b-2hlff 1/1 Running 0 74s
19 | kafka-cluster-zookeeper-0 1/1 Running 0 2m59s
20 | strimzi-cluster-operator-54565f8c56-nf24m 1/1 Running 0 5m46s
21 | ```
22 |
23 |
24 | ## Interact with the brokers
25 | Once the cluster is running, you can run a simple producer to interact with Kafka
26 | and produce/consumer messages to/from a topic.
27 |
28 | * List topics:
29 | ```bash
30 | ❯ plz run //components/kafka:list-topics
31 | [+] Starting container...
32 | If you don't see a command prompt, try pressing enter.
33 | __consumer_offsets
34 | my-topic
35 | test-topic
36 | test.topic
37 | pod "kafka-interact-list" deleted
38 | ```
39 |
40 | * Send messages:
41 | ```bash
42 | ❯ plz run //components/kafka:produce-topic -- test-topic
43 | [+] Starting container...
44 | If you don't see a command prompt, try pressing enter.
45 | >1
46 | >2
47 | >3
48 | >4
49 | >5
50 | >^C
51 | ```
52 |
53 | * Receive messages:
54 | ```bash
55 | ❯ plz run //components/kafka:consume-topic -- test-topic
56 | [+] Starting container...
57 | If you don't see a command prompt, try pressing enter.
58 | 1
59 | 2
60 | 3
61 | 4
62 | 5
63 | Processed a total of 5 messages
64 | ```
65 |
66 |
67 | ## References
68 | * [Kafka Operator](https://github.com/strimzi/strimzi-kafka-operator)
69 | * [Strimzi Overview guide](https://strimzi.io/docs/operators/latest/overview.html)
70 | * [Using Strimzi](https://strimzi.io/docs/operators/latest/using.html)
71 |
--------------------------------------------------------------------------------
/components/kafka/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="kafka"
4 |
5 | wait_pod () {
6 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${2}" -o json | jq '.items[].status.phase')
7 | while [ -z "$status" ] || [ $status != '"Running"' ]
8 | do
9 | printf "\t[*] Waiting for ${1} to be ready...\n"
10 | sleep 5
11 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${2}" -o json | jq '.items[].status.phase')
12 | done
13 | printf "\t[*] ${1} is ready\n"
14 | }
15 |
16 | #
17 | # Create namespace
18 | #
19 | printf "[+] Creating kafka namespace...\n"
20 | plz run //components/kafka:kafka-namespace_push
21 |
22 | #
23 | # Deploying Kafka Operator
24 | #
25 | printf "[+] Deploying Kafka Operator...\n"
26 | plz run //components/kafka:kafka-operator_push
27 | wait_pod 'Kafka Operator' 'name=strimzi-cluster-operator'
28 |
29 | #
30 | # Deploying Kafka cluster (Kafka, Zookeeper, KafkaExporter, Entity Operator)
31 | #
32 | printf "[+] Deploying Kafka cluster...\n"
33 | plz run //components/kafka:kafka-cluster_push
34 | wait_pod 'Zookeeper' 'app.kubernetes.io/name=zookeeper'
35 | wait_pod 'Kafka' 'app.kubernetes.io/name=kafka'
36 | wait_pod 'Entity Operator' 'app.kubernetes.io/name=entity-operator'
37 | wait_pod 'KafkaExporter' 'app.kubernetes.io/name=kafka-exporter'
38 |
--------------------------------------------------------------------------------
/components/kafka/k8s/kafka-cluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kafka.strimzi.io/v1beta1
2 | kind: Kafka
3 | metadata:
4 | name: kafka-cluster
5 | namespace: kafka
6 | spec:
7 | kafka:
8 | version: 2.5.0
9 | replicas: 1
10 | listeners:
11 | plain: {}
12 | tls: {}
13 | config:
14 | offsets.topic.replication.factor: 1
15 | transaction.state.log.replication.factor: 1
16 | transaction.state.log.min.isr: 1
17 | log.message.format.version: "2.5"
18 | storage:
19 | type: jbod
20 | volumes:
21 | - id: 0
22 | type: persistent-claim
23 | size: 1Gi
24 | deleteClaim: false
25 | zookeeper:
26 | replicas: 1
27 | storage:
28 | type: persistent-claim
29 | size: 1Gi
30 | deleteClaim: false
31 | entityOperator:
32 | topicOperator: {}
33 | userOperator: {}
34 | kafkaExporter:
35 | groupRegex: ".*"
36 | topicRegex: ".*"
37 | logging: info
38 | enableSaramaLogging: true
39 |
--------------------------------------------------------------------------------
/components/kafka/k8s/kafka-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: kafka
5 |
--------------------------------------------------------------------------------
/components/kafka/scripts/consume-topic.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="kafka"
4 | PODNAME="kafka-interact-consume"
5 | IMAGE="strimzi/kafka:0.18.0-kafka-2.5.0"
6 | SERVICE="kafka-cluster-kafka-bootstrap:9092"
7 |
8 | printf "[+] Starting container...\n"
9 | kubectl -n ${NAMESPACE} run ${PODNAME} -it --rm=true --restart=Never \
10 | --image=${IMAGE} -- bin/kafka-console-consumer.sh \
11 | --bootstrap-server ${SERVICE} --topic $1 --from-beginning
12 |
--------------------------------------------------------------------------------
/components/kafka/scripts/list-topics.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="kafka"
4 | PODNAME="kafka-interact-list"
5 | IMAGE="strimzi/kafka:0.18.0-kafka-2.5.0"
6 | SERVICE="kafka-cluster-kafka-bootstrap:9092"
7 |
8 | printf "[+] Starting container...\n"
9 | kubectl -n ${NAMESPACE} run ${PODNAME} -it --rm=true --restart=Never \
10 | --image=${IMAGE} -- bin/kafka-topics.sh \
11 | --bootstrap-server ${SERVICE} --list
12 |
--------------------------------------------------------------------------------
/components/kafka/scripts/produce-topic.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="kafka"
4 | PODNAME="kafka-interact-produce"
5 | IMAGE="strimzi/kafka:0.18.0-kafka-2.5.0"
6 | SERVICE="kafka-cluster-kafka-bootstrap:9092"
7 |
8 | printf "[+] Starting container...\n"
9 | kubectl -n ${NAMESPACE} run ${PODNAME} -it --rm=true --restart=Never \
10 | --image=${IMAGE} -- bin/kafka-console-producer.sh \
11 | --broker-list ${SERVICE} --topic $1
12 |
--------------------------------------------------------------------------------
/components/observability/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 |
5 | sh_binary(
6 | name = "deploy",
7 | main = "deploy.sh",
8 | )
9 |
10 | sh_binary(
11 | name = "ui",
12 | main = "forward-ui.sh",
13 | )
14 |
15 | # ==============================================================================
16 | # Deploy via Helm
17 | # ==============================================================================
18 | helm_chart(
19 | name = "prometheus-helm",
20 | src = "//third_party:helm-prometheus-tar",
21 | install_path = "prometheus-operator",
22 | namespace = "observability",
23 | visibility = ["//components/prometheus/..."],
24 | deps = [
25 | ":observability-namespace",
26 | ]
27 | )
28 |
29 | k8s_config(
30 | name = "observability-namespace",
31 | srcs = ["k8s/namespace.yaml"],
32 | )
33 |
--------------------------------------------------------------------------------
/components/observability/README.md:
--------------------------------------------------------------------------------
1 | # Prometheus Setup
2 |
3 |
4 | ## Deploy Prometheus/Grafana/Alertmanager
5 | ```bash
6 | ❯ plz run //components/prometheus:deploy
7 | ```
8 | * Creates `observability` namespace
9 | * Deploys the Prometheus Operator
10 | * Deploys a Grafana
11 | * Deploys a Alertmanager
12 |
13 | Verify pods are healthy:
14 | ```bash
15 | ❯ kubectl -n observability get pods
16 | NAME READY STATUS RESTARTS AGE
17 | alertmanager-prometheus-helm-prometheus-alertmanager-0 2/2 Running 0 6s
18 | prometheus-helm-grafana-5fc87c9979-vszqg 2/2 Running 0 76s
19 | prometheus-helm-grafana-test 0/1 Completed 0 23s
20 | prometheus-helm-kube-state-metrics-6cbf48fd46-8qzff 1/1 Running 0 75s
21 | prometheus-helm-prometheus-admission-create-ldjmd 0/1 Completed 0 23s
22 | prometheus-helm-prometheus-admission-patch-kk4fx 0/1 Completed 1 23s
23 | prometheus-helm-prometheus-node-exporter-pppzv 1/1 Running 0 76s
24 | prometheus-helm-prometheus-operator-d5c9f5675-dh5kd 2/2 Running 0 75s
25 | ```
26 |
27 |
28 | ## Access UIs
29 | ```bash
30 | ❯ plz run //components/prometheus:ui
31 | ```
32 | * Forwards the Prometheus Service to http://127.0.0.1:9090
33 |
34 | 
35 |
36 | * Forwards the Grafana UI to http://127.0.0.1:9191
37 |
38 | 
39 |
40 | * Forwards the Alertmanager Service to http://127.0.0.1:9093
41 |
42 | 
43 |
44 |
45 | ## References
46 | * [Prometheus Operator](https://github.com/coreos/prometheus-operator)
47 | * [Kubernetes monitoring with Prometheus in 15 minutes](https://itnext.io/kubernetes-monitoring-with-prometheus-in-15-minutes-8e54d1de2e13)
48 | * [Kubernetes Monitoring with Prometheus](https://sysdig.com/blog/kubernetes-monitoring-prometheus/)
49 |
--------------------------------------------------------------------------------
/components/observability/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="observability"
4 |
5 | wait_pod () {
6 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${2}" -o json | jq '.items[].status.phase')
7 | while [ -z "$status" ] || [ $status != '"Running"' ]
8 | do
9 | printf "\t[*] Waiting for $1 to be ready...\n"
10 | sleep 5
11 | status=$(kubectl -n ${NAMESPACE} get pods --selector="${2}" -o json | jq '.items[].status.phase')
12 | done
13 | printf "\t[*] $1 is ready\n"
14 | }
15 |
16 | #
17 | # Create namespace
18 | #
19 | printf "[+] Creating observability namespace...\n"
20 | plz run //components/prometheus:observability-namespace_push
21 |
22 | #
23 | # Deploying Prometheus Operator
24 | #
25 | printf "[+] Deploying Prometheus Operator...\n"
26 | plz run //components/prometheus:prometheus-helm_push
27 | wait_pod 'Prometheus Operator' 'app=prometheus-operator-operator'
28 |
--------------------------------------------------------------------------------
/components/observability/forward-ui.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="observability"
4 | GRAFANA_USER=$(kubectl -n ${NAMESPACE} get secrets prometheus-helm-grafana -o=jsonpath='{.data.admin-user}' | base64 --decode)
5 | GRAFANA_PASSWORD=$(kubectl -n ${NAMESPACE} get secrets prometheus-helm-grafana -o=jsonpath='{.data.admin-password}' | base64 --decode)
6 |
7 | #
8 | # Prometheus
9 | #
10 | printf "[+] Forwarding Prometheus Service to http://127.0.0.1:9090\n"
11 | kubectl -n ${NAMESPACE} port-forward svc/prometheus-helm-prometheus-prometheus 9090 &
12 |
13 | #
14 | # Grafana
15 | #
16 | printf "[+] Forwarding Grafana UI to http://127.0.0.1:9191\n"
17 | printf "\t[*] Username: ${GRAFANA_USER}\n"
18 | printf "\t[*] Password: ${GRAFANA_PASSWORD}\n"
19 | kubectl -n ${NAMESPACE} port-forward svc/prometheus-helm-grafana 9191:80 &
20 |
21 | #
22 | # Alertmanager
23 | #
24 | printf "[+] Forwarding Alertmanager Service to http://127.0.0.1:9093\n"
25 | kubectl -n ${NAMESPACE} port-forward svc/prometheus-helm-prometheus-alertmanager 9093 &
26 |
--------------------------------------------------------------------------------
/components/observability/k8s/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: observability
5 |
--------------------------------------------------------------------------------
/components/vault/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 | subinclude("//build_defs:kustomize")
5 |
6 | sh_binary(
7 | name = "deploy",
8 | main = "deploy.sh",
9 | )
10 |
11 | sh_binary(
12 | name = "ui",
13 | main = "forward-ui.sh",
14 | )
15 |
16 | k8s_config(
17 | name = "vault-namespace",
18 | srcs = ["deployment/vault-namespace.yaml"],
19 | )
20 |
21 | # ==============================================================================
22 | # Deploy Vault
23 | # ==============================================================================
24 | kustomize_prep(
25 | name = "kustomize-vault",
26 | srcs = glob(["deployment/**/*.yaml"]),
27 | )
28 |
29 | #
30 | # BAREMETAL
31 | #
32 | kustomize(
33 | name = "vault-baremetal-components",
34 | namespace = "vault",
35 | kustomize_path = "deployment",
36 | overlay = "baremetal",
37 | srcs = [":kustomize-vault"],
38 | )
39 |
40 | filegroup(
41 | name = "vault-baremetal-helm-values",
42 | srcs = [
43 | "deployment/overlays/baremetal/helm-values.yaml",
44 | ],
45 | )
46 |
47 | helm_chart(
48 | name = "vault-baremetal-helm",
49 | src = "//third_party:helm-vault-tar",
50 | install_path = "vault-helm-0.13.0/",
51 | namespace = "vault",
52 | visibility = ["//components/vault/..."],
53 | values_file = ":vault-baremetal-helm-values",
54 | deps = [
55 | ":vault-namespace",
56 | ":vault-baremetal-helm-values"
57 | ]
58 | )
59 |
60 | #
61 | # MINIKUBE
62 | #
63 | filegroup(
64 | name = "vault-minikube-helm-values",
65 | srcs = [
66 | "deployment/overlays/minikube/helm-values.yaml",
67 | ],
68 | )
69 |
70 | helm_chart(
71 | name = "vault-minikube-helm",
72 | src = "//third_party:helm-vault-tar",
73 | install_path = "vault-helm-0.13.0/",
74 | namespace = "vault",
75 | visibility = ["//components/vault/..."],
76 | values_file = ":vault-minikube-helm-values",
77 | deps = [
78 | ":vault-namespace",
79 | ":vault-minikube-helm-values"
80 | ]
81 | )
82 |
83 | # ==============================================================================
84 | # Initialize Vault and enable Kubernetes backend
85 | # ==============================================================================
86 | sh_binary(
87 | name = "vault-init",
88 | main = "setup/vault-init.sh",
89 | )
90 |
91 | sh_binary(
92 | name = "vault-unseal",
93 | main = "setup/vault-unseal.sh"
94 | )
95 |
96 | # ==============================================================================
97 | # Setup sidecar Agent
98 | # ==============================================================================
99 | filegroup(
100 | name = "agent-policy",
101 | srcs = [
102 | "setup/agent-policy.json",
103 | ],
104 | )
105 |
106 | sh_binary(
107 | name = "agent-init",
108 | main = "setup/agent-init.sh",
109 | data = [
110 | ":agent-policy"
111 | ],
112 | deps = [
113 | ":agent-policy"
114 | ]
115 | )
116 |
117 | # ==============================================================================
118 | # Sample deployment
119 | # ==============================================================================
120 | k8s_config(
121 | name = "k8s-sample-deployment",
122 | srcs = [
123 | "sample/agent-service-account.yaml",
124 | "sample/sample-deployment.yaml"
125 | ],
126 | )
127 |
128 | sh_binary(
129 | name = "sample-deployment",
130 | main = "sample/deploy.sh",
131 | )
132 |
--------------------------------------------------------------------------------
/components/vault/README.md:
--------------------------------------------------------------------------------
1 | # Vault Setup
2 |
3 | ## Deploy Vault
4 | ```bash
5 | ❯ plz run //components/vault:deploy [minikube|baremetal]
6 | ```
7 | * Creates a `vault` namespace
8 | * Creates a StorageClass and a PersistentVolume (baremetal only)
9 | * Fetches and deploys the Vault Helm chart in the `vault` namespace
10 | * Initializes (unseal) Vault
11 | * Enables Kubernetes backend (will print the root token to screen)
12 | * Setup the sidecar Agent, by creating a role/policy for the sidecar which allows it to read any secret in the kv-v2 `secret/` backend
13 |
14 | Verify pods are healthy:
15 | ```bash
16 | ❯ kubectl -n vault get po
17 | NAME READY STATUS RESTARTS AGE
18 | vault-helm-0 1/1 Running 0 2m2s
19 | vault-helm-agent-injector-5d7c4965d7-7dnp9 1/1 Running 0 2m3s
20 | ```
21 |
22 | 📝 **NOTE FOR BAREMETAL**: before deploying, make sure to prepare
23 | the data folder on the host (and to remove the same folder to reset the installation):
24 | ```bash
25 | ❯ sudo mkdir -p /etc/plz-k8s-lab/vault/data/
26 | ❯ sudo chmod -R a+rw /etc/plz-k8s-lab/vault/
27 | ```
28 |
29 |
30 | ### Subsequent Unseals
31 | If, for any reason, Vault goes into a sealed state, it can be unsealed with the following command:
32 | ```bash
33 | ❯ plz run //components/vault:vault-unseal [minikube|baremetal]
34 | ```
35 |
36 |
37 | ---
38 |
39 |
40 | ## Access the Vault UI
41 |
42 | ### Via Port-Forward
43 | ```bash
44 | ❯ plz run //components/vault:ui [minikube|baremetal]
45 | ```
46 | * Forwards the Vault UI to http://127.0.0.1:8200
47 |
48 | 
49 |
50 | ### Via Ingress on Baremetal
51 | ```bash
52 | ❯ kubectl -n vault get ingress
53 | NAME CLASS HOSTS ADDRESS PORTS AGE
54 | vault-ingress vault.192.168.1.151.nip.io 80 49s
55 | ```
56 |
57 | 📝 **NOTE**: before deploying, make sure to replace the host IP address in
58 | `components/vault/deployment/overlays/baremetal/vault-ui-ingress.yaml`.
59 |
60 | This assumes you followed the setup described at "[Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)".
61 |
62 |
63 | ---
64 |
65 |
66 | ## Inject secrets into Pods
67 | For this setup, the Vault Agent has been given access to read any secret in the kv-v2 `secret/` backend, so any secret stored in that backend will be able to get picked by the Agent.
68 |
69 | In addition, the Vault Agent injector only modifies a deployment if it contains a specific set of annotations. An existing deployment may have its definition patched to include the necessary annotations.
70 | A sample is provided at `components/vault/sample/sample-deployment.yaml`, where the deployment requires the `secret/data/database/config` secret.
71 |
72 | To run the example (which will create a secret at `secret/database/config` before deploying the pod):
73 | ```bash
74 | ❯ plz run //components/vault:sample-deployment [minikube|baremetal]
75 | ```
76 |
77 | To check the pod is healthy:
78 | ```bash
79 | ❯ kubectl -n vault logs -f vault-agent-injector-demo-78d49f7c6-lx88q orgchart
80 | Listening on port 8000...
81 | ```
82 |
83 |
84 | ---
85 |
86 |
87 | ## References
88 | * [Vault Installation to Minikube via Helm](https://learn.hashicorp.com/vault/kubernetes/minikube)
89 | * [Injecting Secrets into Kubernetes Pods via Vault Sidecar](https://learn.hashicorp.com/vault/kubernetes/sidecar)
90 | * [Kubernetes authentication method](https://www.vaultproject.io/docs/auth/kubernetes.html)
91 | * [Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)
92 |
--------------------------------------------------------------------------------
/components/vault/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="vault"
4 | SELECTOR="app.kubernetes.io/name=vault,component=server"
5 | TARGET=$1
6 | if [[ $# -lt 1 ]] ; then
7 | TARGET="minikube"
8 | fi
9 |
10 | # Create `vault` namespace
11 | printf "\n[+] Creating ${NAMESPACE} namespace...\n"
12 | plz run //components/vault:vault-namespace_push
13 |
14 | # Deploy Vault
15 | printf "\n[+] Deploying Vault on ${TARGET}...\n"
16 | if [[ $TARGET == "baremetal" ]]
17 | then
18 | # Create StorageClass and PersistentVolume (baremetal only)
19 | # Create Ingress
20 | plz run //components/vault:vault-baremetal-components_push
21 |
22 | # Fetch and deploy the Vault Helm chart
23 | plz run //components/vault:vault-baremetal-helm_push
24 | else
25 | # Fetch and deploy the Vault Helm chart
26 | plz run //components/vault:vault-minikube-helm_push
27 | fi
28 | plz run //common:wait_pod -- ${NAMESPACE} "Vault Operator" ${SELECTOR}
29 |
30 | # Initialize Vault and enable Kubernetes backend (will print root token)
31 | printf "\n[+] Initializing Vault...\n"
32 | plz run //components/vault:vault-init ${NAMESPACE} ${TARGET} ${SELECTOR}
33 |
34 | # Setup sidecar Agent (create role/policy)
35 | printf "\n[+] Setting up sidecar Agent...\n"
36 | plz run //components/vault:agent-init ${NAMESPACE} ${TARGET} ${SELECTOR}
37 |
--------------------------------------------------------------------------------
/components/vault/deployment/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: vault
5 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/baremetal/helm-values.yaml:
--------------------------------------------------------------------------------
1 | storageClass: vault-storage
2 |
3 | server:
4 | affinity: ''
5 | ha:
6 | replicas: 1
7 | dataStorage:
8 | enabled: true
9 | size: 1Gi
10 | mountPath: '/etc/plz-k8s-lab/vault/data/'
11 | storageClass: vault-storage
12 | accessMode: ReadWriteOnce
13 | annotations: {}
14 |
15 | ui:
16 | enabled: true
17 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 | resources:
7 | - vault-pv.yaml
8 | - vault-ui-ingress.yaml
9 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/baremetal/vault-pv.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: StorageClass
3 | apiVersion: storage.k8s.io/v1
4 | metadata:
5 | name: vault-storage
6 | provisioner: kubernetes.io/no-provisioner
7 | volumeBindingMode: WaitForFirstConsumer
8 | ---
9 | apiVersion: v1
10 | kind: PersistentVolume
11 | metadata:
12 | name: vault-pv-volume
13 | labels:
14 | type: local
15 | spec:
16 | storageClassName: vault-storage
17 | capacity:
18 | storage: 1Gi
19 | accessModes:
20 | - ReadWriteOnce
21 | hostPath:
22 | path: '/etc/plz-k8s-lab/vault/data/'
23 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/baremetal/vault-ui-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: vault-ingress
6 | namespace: vault
7 | annotations:
8 | kubernetes.io/ingress.class: haproxy
9 | spec:
10 | rules:
11 | # Replace with IP of host
12 | - host: vault.192.168.1.151.nip.io
13 | http:
14 | paths:
15 | - path: /
16 | pathType: Prefix
17 | backend:
18 | service:
19 | name: vault-baremetal-helm-ui
20 | port:
21 | number: 8200
22 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/minikube/helm-values.yaml:
--------------------------------------------------------------------------------
1 | server:
2 | affinity: ''
3 | ha:
4 | replicas: 1
5 | ui:
6 | enabled: true
7 |
--------------------------------------------------------------------------------
/components/vault/deployment/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/vault/deployment/vault-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: vault
5 |
--------------------------------------------------------------------------------
/components/vault/forward-ui.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="vault"
4 | TARGET=$1
5 | if [[ $# -lt 1 ]] ; then
6 | TARGET="minikube"
7 | fi
8 | KEY_FILE="cluster-keys-${TARGET}.json"
9 | SELECTOR="app.kubernetes.io/name=vault-ui"
10 |
11 | # Should be:
12 | # - Baremetal: vault-helm-baremetal-ui
13 | # - Minikube: vault-helm-ui
14 | SERVICE_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} svc ${SELECTOR})
15 |
16 | VAULT_ROOT_TOKEN=$(cat ${KEY_FILE} | jq -r ".root_token")
17 | printf "[+] Forwarding Vault UI to http://127.0.0.1:8200\n"
18 | printf "\t[*] Root Token: ${VAULT_ROOT_TOKEN}\n"
19 | kubectl -n ${NAMESPACE} port-forward svc/${SERVICE_NAME} 8200
20 |
--------------------------------------------------------------------------------
/components/vault/sample/agent-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: vault-agent
5 | namespace: vault
6 |
--------------------------------------------------------------------------------
/components/vault/sample/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="vault"
4 | TARGET=$1
5 | if [[ $# -lt 1 ]] ; then
6 | TARGET="minikube"
7 | fi
8 | SELECTOR="app.kubernetes.io/name=vault,component=server"
9 |
10 | # Should be:
11 | # - Baremetal: vault-helm-baremetal-0
12 | # - Minikube: vault-helm-0
13 | POD_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} pod ${SELECTOR})
14 |
15 | printf "[+] Creating test secret at: secret/database/config\n"
16 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -it -- vault kv put secret/database/config username="db-readonly-username" password="db-secret-password"
17 |
18 | printf "[+] Deploying sample deployment...\n"
19 | plz run //components/vault:k8s-sample-deployment_push
20 |
--------------------------------------------------------------------------------
/components/vault/sample/sample-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: vault-agent-injector-demo
5 | namespace: vault
6 | labels:
7 | app: vault-agent-injector-demo
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: vault-agent-injector-demo
12 | replicas: 1
13 | template:
14 | metadata:
15 | annotations:
16 | vault.hashicorp.com/agent-inject: 'true'
17 | vault.hashicorp.com/role: 'vault-agent'
18 | vault.hashicorp.com/agent-inject-secret-database-config.txt: 'secret/data/database/config'
19 | labels:
20 | app: vault-agent-injector-demo
21 | spec:
22 | serviceAccountName: vault-agent
23 | containers:
24 | - name: orgchart
25 | image: jweissig/app:0.0.1
26 |
--------------------------------------------------------------------------------
/components/vault/setup/agent-init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE=$1
4 | TARGET=$2
5 | SELECTOR=$3
6 | POLICY_FILE="components/vault/setup/agent-policy.json"
7 |
8 | # Should be:
9 | # - Baremetal: vault-helm-baremetal-0
10 | # - Minikube: vault-helm-0
11 | POD_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} pod ${SELECTOR})
12 |
13 | # Enable secret engine
14 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault secrets enable -path=secret kv-v2
15 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault write secret/config max_versions=1
16 |
17 | # Create policy for the sidecar
18 | printf "[+] Creating policy for the Vault Sidecar...\n"
19 | POLICY=$(cat ${POLICY_FILE})
20 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- /bin/sh -c "echo '$POLICY' > /tmp/policy.json"
21 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault policy write vault-agent /tmp/policy.json
22 |
23 | # Create role for the sidecar
24 | printf "[+] Creating role for the Vault Sidecar...\n"
25 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault write \
26 | auth/kubernetes/role/vault-agent \
27 | bound_service_account_names=vault-agent \
28 | bound_service_account_namespaces=* \
29 | policies=vault-agent \
30 | ttl=24h
31 |
--------------------------------------------------------------------------------
/components/vault/setup/agent-policy.json:
--------------------------------------------------------------------------------
1 | path "secret/data/*" {
2 | capabilities = [
3 | "read"
4 | ]
5 | }
6 |
7 | path "aws/sts/*" {
8 | capabilities = [
9 | "read"
10 | ]
11 | }
12 |
--------------------------------------------------------------------------------
/components/vault/setup/vault-init.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE=$1
4 | TARGET=$2
5 | SELECTOR=$3
6 | KEY_FILE="cluster-keys-${TARGET}.json"
7 |
8 | # Should be:
9 | # - Baremetal: vault-helm-baremetal-0
10 | # - Minikube: vault-helm-0
11 | POD_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} pod ${SELECTOR})
12 |
13 | # Initialize Vault
14 | printf "[+] Initializing Vault...\n"
15 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault operator init -key-shares=1 -key-threshold=1 -format=json > ${KEY_FILE}
16 |
17 | VAULT_UNSEAL_KEY=$(cat ${KEY_FILE} | jq -r ".unseal_keys_b64[]")
18 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault operator unseal $VAULT_UNSEAL_KEY
19 |
20 | VAULT_ROOT_TOKEN=$(cat ${KEY_FILE} | jq -r ".root_token")
21 | printf ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
22 | printf "[!] ROOT TOKEN: ${VAULT_ROOT_TOKEN}\n"
23 | printf ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
24 |
25 | # Configure Kubernetes Authentication
26 | printf "[+] Configuring Kubernetes Authentication...\n"
27 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault login ${VAULT_ROOT_TOKEN}
28 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault auth enable kubernetes
29 |
30 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault write auth/kubernetes/config \
31 | token_reviewer_jwt=@/var/run/secrets/kubernetes.io/serviceaccount/token \
32 | kubernetes_host="https://kubernetes.default.svc.cluster.local:443" \
33 | kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
34 |
--------------------------------------------------------------------------------
/components/vault/setup/vault-unseal.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | TARGET=$1
4 | if [[ $# -lt 1 ]] ; then
5 | TARGET="minikube"
6 | fi
7 | NAMESPACE="vault"
8 | SELECTOR="app.kubernetes.io/name=vault,component=server"
9 | KEY_FILE="cluster-keys-${TARGET}.json"
10 |
11 | POD_NAME=$(plz run //common:get_resource_from_selector ${NAMESPACE} pod ${SELECTOR})
12 | VAULT_UNSEAL_KEY=$(cat ${KEY_FILE} | jq -r ".unseal_keys_b64[]")
13 | kubectl -n ${NAMESPACE} exec ${POD_NAME} -- vault operator unseal $VAULT_UNSEAL_KEY
14 |
--------------------------------------------------------------------------------
/components/yopass/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker")
2 | subinclude("//build_defs:helm_chart")
3 | subinclude("//build_defs:k8s")
4 | subinclude("//build_defs:kustomize")
5 |
6 | sh_binary(
7 | name = "deploy",
8 | main = "deploy.sh",
9 | )
10 |
11 | sh_binary(
12 | name = "ui",
13 | main = "forward-ui.sh",
14 | )
15 |
16 | k8s_config(
17 | name = "yopass-namespace",
18 | srcs = ["deployment/yopass-namespace.yaml"],
19 | )
20 |
21 | # ==============================================================================
22 | # YOPASS
23 | # ==============================================================================
24 | kustomize_prep(
25 | name = "kustomize-yopass",
26 | srcs = glob(["deployment/**/*.yaml"]),
27 | )
28 |
29 | kustomize(
30 | name = "yopass-baremetal",
31 | namespace = "yopass",
32 | kustomize_path = "deployment/",
33 | overlay = "baremetal",
34 | srcs = [":kustomize-yopass"],
35 | )
36 |
37 | kustomize(
38 | name = "yopass-minikube",
39 | namespace = "yopass",
40 | kustomize_path = "deployment/",
41 | overlay = "minikube",
42 | srcs = [":kustomize-yopass"],
43 | )
44 |
--------------------------------------------------------------------------------
/components/yopass/README.md:
--------------------------------------------------------------------------------
1 | # Yopass Setup
2 |
3 | [Yopass](https://github.com/jhaals/yopass) is a project for sharing secrets in a quick and secure manner
4 |
5 |
6 | ## Deploy yopass
7 |
8 | ```bash
9 | ❯ plz run //components/yopass:deploy [minikube|baremetal]
10 | ```
11 | * Creates `yopass` namespace
12 | * Deploys Yopass
13 |
14 | Verify pods are healthy:
15 | ```bash
16 | ❯ kubectl -n yopass get pods
17 | NAME READY STATUS RESTARTS AGE
18 | yopass-5b98b66b8d-j5jmr 0/2 Running 0 101s
19 | ```
20 |
21 |
22 | ---
23 |
24 |
25 | ## Access the Yopass UI
26 |
27 | ### Via Port-Forward
28 |
29 | * Forward the Yopass UI to http://127.0.0.1:1337
30 | ```bash
31 | ❯ plz run //components/yopass:ui
32 | ```
33 |
34 | 
35 |
36 |
37 | ### Via Ingress on Baremetal
38 |
39 | * Verify the Ingress has been deployed:
40 | ```bash
41 | ❯ kubectl -n yopass get ingress
42 | NAME CLASS HOSTS ADDRESS PORTS AGE
43 | yopass-ingress yopass.192.168.1.151.nip.io 80 8m45s
44 | ```
45 |
46 | 📝 **NOTE**: before deploying, make sure to replace the host IP address in: `//components/yopass/deployment/overlays/baremetal/ingress.yaml`
47 |
48 | This assumes you followed the setup described at "[Kubernetes Lab on Baremetal](https://www.marcolancini.it/2021/blog-kubernetes-lab-baremetal/)".
49 |
--------------------------------------------------------------------------------
/components/yopass/deploy.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="yopass"
4 | TARGET=$1
5 | if [[ $# -lt 1 ]] ; then
6 | TARGET="minikube"
7 | fi
8 |
9 | # Create `yopass` namespace
10 | printf "\n[+] Creating ${NAMESPACE} namespace...\n"
11 | plz run //components/yopass:yopass-namespace_push
12 |
13 | # Deploying Yopass:
14 | printf "\n[+] Deploying Yopass on ${TARGET}...\n"
15 | if [[ $TARGET == "baremetal" ]]
16 | then
17 | plz run //components/yopass:yopass-baremetal_push
18 | else
19 | plz run //components/yopass:yopass-minikube_push
20 | fi
21 |
--------------------------------------------------------------------------------
/components/yopass/deployment/base/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: yopass
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app.kubernetes.io/name: yopass
10 | template:
11 | metadata:
12 | labels:
13 | app.kubernetes.io/name: yopass
14 | spec:
15 | securityContext:
16 | runAsUser: 31000
17 | runAsGroup: 31000
18 | fsGroup: 31000
19 | containers:
20 | - name: yopass
21 | image: jhaals/yopass
22 | args:
23 | - '--memcached=localhost:11211'
24 | ports:
25 | - name: http
26 | containerPort: 1337
27 | resources:
28 | limits:
29 | cpu: 100m
30 | memory: 50Mi
31 | requests:
32 | cpu: 100m
33 | memory: 50Mi
34 | securityContext:
35 | allowPrivilegeEscalation: false
36 | - name: yopass-memcached
37 | image: memcached
38 | resources:
39 | limits:
40 | cpu: 100m
41 | memory: 100Mi
42 | requests:
43 | cpu: 100m
44 | memory: 100Mi
45 | args:
46 | - '-m 64'
47 | ports:
48 | - name: memcached
49 | containerPort: 11211
50 |
--------------------------------------------------------------------------------
/components/yopass/deployment/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: yopass
5 |
6 | resources:
7 | - deployment.yaml
8 | - service.yaml
9 |
--------------------------------------------------------------------------------
/components/yopass/deployment/base/service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | labels:
6 | service: http
7 | name: yopass-service
8 | spec:
9 | selector:
10 | app.kubernetes.io/name: yopass
11 | type: NodePort
12 | ports:
13 | - name: http
14 | port: 1337
15 | targetPort: 1337
16 |
--------------------------------------------------------------------------------
/components/yopass/deployment/overlays/baremetal/ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: yopass-ingress
6 | annotations:
7 | kubernetes.io/ingress.class: haproxy
8 | spec:
9 | rules:
10 | # Replace with IP of host
11 | - host: yopass.192.168.1.151.nip.io
12 | http:
13 | paths:
14 | - path: /
15 | pathType: Prefix
16 | backend:
17 | service:
18 | name: yopass-service
19 | port:
20 | number: 1337
21 |
--------------------------------------------------------------------------------
/components/yopass/deployment/overlays/baremetal/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
7 | resources:
8 | - ingress.yaml
9 |
--------------------------------------------------------------------------------
/components/yopass/deployment/overlays/minikube/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | bases:
5 | - ../../base
6 |
--------------------------------------------------------------------------------
/components/yopass/deployment/yopass-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: yopass
5 |
--------------------------------------------------------------------------------
/components/yopass/forward-ui.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 |
3 | NAMESPACE="yopass"
4 |
5 | #
6 | # Yopass UI
7 | #
8 | printf "[+] Forwarding Yopass UI to http://127.0.0.1:1337\n"
9 | kubectl -n ${NAMESPACE} port-forward svc/yopass-service 1337 &
10 |
--------------------------------------------------------------------------------
/third_party/BUILD:
--------------------------------------------------------------------------------
1 | # ==============================================================================
2 | # VAULT
3 | # ==============================================================================
4 | VAULT_VERSION="0.13.0"
5 | VAULT_HASH="15f1d2fab0039b261882020473ec201a610dbfea0c306115438fae1d55cb7945"
6 |
7 | remote_file(
8 | name = "helm-vault-tar",
9 | out = "vault",
10 | url = "https://github.com/hashicorp/vault-helm/archive/v%s.tar.gz" % VAULT_VERSION,
11 | hashes = [VAULT_HASH],
12 | visibility = ["//components/vault/..."],
13 | extract = True
14 | )
15 |
16 | # ==============================================================================
17 | # ECK
18 | # ==============================================================================
19 | ECK_VERSION="1.7.1"
20 | ECK_CRDS_HASH="8e650017650d315bcb9a992a69d2a171c9e819cf"
21 | ECK_OPERATOR_HASH="edde881bdfbfdb78ba041e19fa59578f712b0926"
22 |
23 | remote_file(
24 | name = "eck-crds",
25 | out = "eck-crds.yaml",
26 | url = "https://download.elastic.co/downloads/eck/%s/crds-legacy.yaml" % ECK_VERSION,
27 | hashes = [ECK_CRDS_HASH],
28 | visibility = ["//components/elk/..."]
29 | )
30 |
31 | remote_file(
32 | name = "eck-operator",
33 | out = "eck-operator.yaml",
34 | url = "https://download.elastic.co/downloads/eck/%s/operator-legacy.yaml" % ECK_VERSION,
35 | hashes = [ECK_OPERATOR_HASH],
36 | visibility = ["//components/elk/..."]
37 | )
38 |
39 | # ==============================================================================
40 | # PROMETHEUS
41 | # ==============================================================================
42 | PROMETHEUS_VERSION="8.15.6"
43 | PROMETHEUS_HASH="3938f67923067b23c2859b6a7e07b8edb96a13b8eaa0806e3f460ed4d59ff4d0"
44 |
45 | remote_file(
46 | name = "helm-prometheus-tar",
47 | out = "prometheus",
48 | url = "https://kubernetes-charts.storage.googleapis.com/prometheus-operator-%s.tgz" % PROMETHEUS_VERSION,
49 | hashes = [PROMETHEUS_HASH],
50 | visibility = ["//components/prometheus/..."],
51 | extract = True
52 | )
53 |
54 | # ==============================================================================
55 | # KAFKA
56 | # ==============================================================================
57 | KAFKA_OPERATOR_VERSION="0.20.0"
58 | KAFKA_OPERATOR_HASH="c065c9b075b1bdfb59d9d2444e3cda86c14942d2a3da86e2e7b377b64cfa3d06"
59 |
60 | remote_file(
61 | name = "kafka-operator",
62 | out = "kafka-operator.yaml",
63 | url = "https://github.com/strimzi/strimzi-kafka-operator/releases/download/%s/strimzi-cluster-operator-%s.yaml" % (KAFKA_OPERATOR_VERSION, KAFKA_OPERATOR_VERSION),
64 | hashes = [KAFKA_OPERATOR_HASH],
65 | visibility = ["//components/kafka/..."],
66 | extract = False
67 | )
68 |
69 | # ==============================================================================
70 | # NGINX
71 | # ==============================================================================
72 | NGINX_OPERATOR_VERSION="0.44.0"
73 | NGINX_OPERATOR_HASH="d5071e1d983c130641bf6261cf4ef61f58bd422d80a888946db27c6660190bc0"
74 |
75 | remote_file(
76 | name = "nginx-operator",
77 | out = "nginx-operator.yaml",
78 | url = "https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v%s/deploy/static/provider/baremetal/deploy.yaml" % NGINX_OPERATOR_VERSION,
79 | hashes = [NGINX_OPERATOR_HASH],
80 | visibility = ["//components/..."],
81 | extract = False
82 | )
83 |
84 | # ==============================================================================
85 | # METALLB
86 | # ==============================================================================
87 | METALLB_NAMESPACE_VERSION="0.9.5"
88 | METALLB_NAMESPACE_HASH="51a1c68cd8f92e4653075ea978642689a7786099000f9017a616567bab76114f"
89 |
90 | remote_file(
91 | name = "metallb-namespace",
92 | out = "metallb-namespace.yaml",
93 | url = "https://raw.githubusercontent.com/metallb/metallb/v%s/manifests/namespace.yaml" % METALLB_NAMESPACE_VERSION,
94 | hashes = [METALLB_NAMESPACE_HASH],
95 | visibility = ["//components/baremetal/..."],
96 | extract = False
97 | )
98 |
99 | METALLB_DEPLOYMENT_VERSION="0.9.5"
100 | METALLB_DEPLOYMENT_HASH="15c30085c20bbc36c2ce81881180b75e503a5d95bacce45733e3b5131a7d866b"
101 |
102 | remote_file(
103 | name = "metallb-deployment",
104 | out = "metallb-deployment.yaml",
105 | url = "https://raw.githubusercontent.com/metallb/metallb/v%s/manifests/metallb.yaml" % METALLB_DEPLOYMENT_VERSION,
106 | hashes = [METALLB_DEPLOYMENT_HASH],
107 | visibility = ["//components/baremetal/..."],
108 | extract = False
109 | )
110 |
111 | # ==============================================================================
112 | # HAPROXY
113 | # ==============================================================================
114 | HAPROXY_VERSION="0.12.2"
115 | HAPROXY_HASH="819cb1df52060f35caa8cb81ace715bfe90dca2fed34344e00a670d25b0df941"
116 |
117 | remote_file(
118 | name = "helm-haproxy-tar",
119 | out = "haproxy",
120 | url = "https://github.com/haproxy-ingress/charts/releases/download/%s/haproxy-ingress-%s.tgz" % (HAPROXY_VERSION, HAPROXY_VERSION),
121 | hashes = [HAPROXY_HASH],
122 | visibility = ["//components/baremetal/..."],
123 | extract = True
124 | )
125 |
--------------------------------------------------------------------------------
/third_party/docker/BUILD:
--------------------------------------------------------------------------------
1 | subinclude("//build_defs:docker_repository")
2 |
3 | docker_repository(
4 | name = "neo4j",
5 | image = "neo4j",
6 | version = "3.5.8",
7 | visibility = ["//components/cartography/..."]
8 | )
9 |
--------------------------------------------------------------------------------
/third_party/python/BUILD:
--------------------------------------------------------------------------------
1 | package(
2 | default_visibility = ["PUBLIC"],
3 | python_wheel_repo = "pypi.org",
4 | python_wheel_name_scheme = [
5 | "{url_base}/{package_name}-{version}-${{OS}}_${{ARCH}}.whl",
6 | "{url_base}/{package_name}-{version}.whl",
7 | "https://files.pythonhosted.org/packages/py3/{initial}/{package_name}/{package_name}-{version}-py3-none-any.whl",
8 | "https://files.pythonhosted.org/packages/py2.py3/{initial}/{package_name}/{package_name}-{version}-py2.py3-none-any.whl",
9 | ],
10 | )
11 |
12 | pip_library(
13 | name = "neo4j",
14 | version = "4.3.1",
15 | hashes = ["dd53a8ad3d26878ec48c2fdb7774bf5dc0634a024b65e62894db89324a99a65d"],
16 | )
17 |
18 | pip_library(
19 | name = "neobolt",
20 | version = "1.7.17",
21 | hashes = ["5b8cb9c155180bd1521e4df36da74cb75ba5dc7a5e36046d74bad4e6cedfc10f"],
22 | )
23 |
24 | pip_library(
25 | name = "neotime",
26 | version = "1.7.4",
27 | hashes = ["481df3ea864383c4cf30b80be04d16235d518f17886c5b6613a11590ad9ecadc"],
28 | )
29 |
30 | python_wheel(
31 | name = "botocore",
32 | hashes = ["3f719fed8184cda9f7ed84747cd5b344d42557712f225f93dd183721410ce01c"],
33 | version = "1.20.98",
34 | )
35 |
--------------------------------------------------------------------------------