├── k8s-test
├── tests
│ ├── kubernetes_role
│ │ ├── test-not-found-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── variables.tf
│ │ ├── role.yaml
│ │ ├── test-get-expected.json
│ │ └── test-list-expected.json
│ ├── kubernetes_cronjob
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── test-not-found-query.sql
│ │ ├── test-list-query.sql
│ │ ├── posttest-variables.tf
│ │ ├── test-get-query.sql
│ │ ├── cronjob.yaml
│ │ └── variables.tf
│ ├── kubernetes_daemonset
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── posttest-variables.tf
│ │ ├── variables.tf
│ │ └── daemonset.yaml
│ ├── kubernetes_ingress
│ │ ├── test-not-found-expected.json
│ │ ├── test-list-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-query.sql
│ │ ├── minimal-ingress.yaml
│ │ ├── test-list-expected.json
│ │ ├── ingress-resource-backend.yaml
│ │ ├── posttest-variables.tf
│ │ ├── ingress-wildcard-host.yaml
│ │ ├── test-get-expected.json
│ │ └── variables.tf
│ ├── kubernetes_cluster_role
│ │ ├── test-not-found-expected.json
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── posttest-variables.tf
│ │ ├── test-not-found-query.sql
│ │ ├── variables.tf
│ │ ├── cluster_role.yaml
│ │ ├── test-get-expected.json
│ │ └── test-list-expected.json
│ ├── kubernetes_config_map
│ │ ├── test-not-found-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── config_map.yaml
│ │ ├── variables.tf
│ │ ├── test-get-expected.json
│ │ └── test-list-expected.json
│ ├── kubernetes_deployment
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── nginx-deployment.yaml
│ │ ├── posttest-variables.tf
│ │ └── variables.tf
│ ├── kubernetes_limit_range
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-query.sql
│ │ ├── limit_range.yaml
│ │ └── variables.tf
│ ├── kubernetes_resource_quota
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-query.sql
│ │ ├── variables.tf
│ │ └── resource_quota.yaml
│ ├── kubernetes_role_binding
│ │ ├── test-not-found-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── test-not-found-query.sql
│ │ ├── variables.tf
│ │ └── role_binding.yaml
│ ├── kubernetes_service
│ │ ├── test-not-found-expected.json
│ │ ├── test-list-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── service.yaml
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-expected.json
│ │ └── variables.tf
│ ├── kubernetes_service_account
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── variables.tf
│ │ └── service-account.yaml
│ ├── kubernetes_stateful_set
│ │ ├── test-not-found-expected.json
│ │ ├── test-list-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-query.sql
│ │ ├── variables.tf
│ │ ├── test-get-expected.json
│ │ └── statefulset.yaml
│ ├── kubernetes_storage_class
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-not-found-query.sql
│ │ ├── test-list-expected.json
│ │ ├── test-get-query.sql
│ │ ├── test-list-query.sql
│ │ ├── storageclass.yaml
│ │ └── variables.tf
│ ├── kubernetes_cluster_role_binding
│ │ ├── test-not-found-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── test-list-query.sql
│ │ ├── test-get-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── variables.tf
│ │ └── cluster_role.yaml
│ ├── kubernetes_pod_disruption_budget
│ │ ├── test-not-found-expected.json
│ │ ├── test-get-expected.json
│ │ ├── test-list-expected.json
│ │ ├── test-list-query.sql
│ │ ├── test-not-found-query.sql
│ │ ├── posttest-variables.tf
│ │ ├── pdb.yaml
│ │ ├── test-get-query.sql
│ │ └── variables.tf
│ ├── host_port
│ │ ├── test-host-port-expected.json
│ │ ├── test-host-port-query.sql
│ │ ├── nginx-app-PASSED.yaml
│ │ ├── posttest-variables.tf
│ │ ├── variables.tf
│ │ └── DS-node-exporter-FAILED.yaml
│ ├── kubernetes_replicaset
│ │ ├── posttest-variables.tf
│ │ ├── test-list-replicaset-query.sql
│ │ ├── test-get-replicaset-expected.json
│ │ ├── test-list-replicaset-expected.json
│ │ ├── test-get-replicaset-query.sql
│ │ ├── frontend.yaml
│ │ └── variables.tf
│ ├── kubernetes_pod
│ │ ├── test-list-unowned-pods-expected.json
│ │ ├── test-list-unowned-pods-query.sql
│ │ ├── naked-pod.yml
│ │ ├── test-get-naked-pod-expected.json
│ │ ├── pull-backoff.yml
│ │ ├── test-get-naked-pod-query.sql
│ │ ├── posttest-variables.tf
│ │ ├── privileged-pod.yml
│ │ └── variables.tf
│ ├── root_containers
│ │ ├── test-run-as-user-expected.json
│ │ ├── test-run-as-user-query.sql
│ │ ├── test-run-as-non-root-query.sql
│ │ ├── test-run-as-non-root-expected.json
│ │ ├── posttest-variables.tf
│ │ ├── variables.tf
│ │ └── rootContainersFAILED.yaml
│ └── docker_daemon_socket
│ │ └── variables.tf
├── package.json
├── posttest-naked-pod.yml
└── .gitignore
├── .github
├── PULL_REQUEST_TEMPLATE.md
├── workflows
│ ├── sync-labels.yml
│ ├── golangci-lint.yml
│ ├── registry-publish.yml
│ ├── add-issue-to-project.yml
│ ├── stale.yml
│ └── steampipe-anywhere.yml
├── ISSUE_TEMPLATE
│ ├── feature-request---new-table.md
│ ├── config.yml
│ ├── bug_report.md
│ └── feature_request.md
└── dependabot.yml
├── Makefile
├── test_files
├── network_policy
│ ├── default-deny-all-egress.yml
│ ├── default-allow-all-egress.yml
│ ├── nginx-policy.yaml
│ └── test-network-policy.yaml
├── pod
│ ├── hostpid.yml
│ ├── pull-backoff.yml
│ └── privileged-pod.yml
├── volume
│ ├── pv-claim.yaml
│ ├── pv-volume.yaml
│ └── pv-pod.yaml
├── job
│ ├── job.yaml
│ └── pi-with-timeout.yaml
├── application
│ └── guestbook
│ │ ├── mongo-service.yaml
│ │ ├── frontend-service.yaml
│ │ ├── mongo-deployment.yaml
│ │ └── frontend-deployment.yaml
├── controllers
│ └── replication.yaml
├── ingress
│ ├── minimal-ingress.yaml
│ ├── ingress-resource-backend.yaml
│ └── ingress-wildcard-host.yaml
└── pod_security_policy
│ ├── privileged-psp.yaml
│ └── restricted-psp.yaml
├── main.go
├── .gitignore
├── .goreleaser.yml
├── kubernetes
├── connection_config.go
├── table_helm_template.go
├── table_helm_template_rendered.go
├── table_helm_chart.go
└── table_helm_value.go
├── docs
└── tables
│ ├── helm_template.md
│ ├── kubernetes_namespace.md
│ ├── kubernetes_service.md
│ ├── kubernetes_config_map.md
│ ├── kubernetes_ingress.md
│ ├── kubernetes_persistent_volume_claim.md
│ ├── kubernetes_cronjob.md
│ ├── kubernetes_resource_quota.md
│ ├── kubernetes_endpoint.md
│ ├── kubernetes_role.md
│ ├── kubernetes_event.md
│ ├── kubernetes_endpoint_slice.md
│ ├── kubernetes_stateful_set.md
│ ├── kubernetes_secret.md
│ ├── kubernetes_persistent_volume.md
│ ├── kubernetes_pod_disruption_budget.md
│ ├── kubernetes_replication_controller.md
│ ├── helm_template_rendered.md
│ ├── kubernetes_limit_range.md
│ ├── helm_value.md
│ └── kubernetes_horizontal_pod_autoscaler.md
└── config
└── kubernetes.spc
/k8s-test/tests/kubernetes_role/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
2 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
2 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
2 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-not-found-expected.json:
--------------------------------------------------------------------------------
1 | null
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "pods-medium",
4 | "namespace": "default"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "cpu-limit-range",
4 | "namespace": "default"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "cpu-limit-range",
4 | "namespace": "default"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "pods-medium",
4 | "namespace": "default"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "hello",
4 | "namespace": "default",
5 | "suspend": false
6 | }
7 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "hello",
4 | "namespace": "default",
5 | "suspend": false
6 | }
7 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "secrets": 1
6 | }
7 | ]
8 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "secrets": 1
6 | }
7 | ]
8 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes_cronjob
6 | where
7 | name = 'hello_123_123';
8 |
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "max_unavailable": "{Type:0 IntVal:1 StrVal:}",
4 | "name": "zk-pdb"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "max_unavailable": "{Type:0 IntVal:1 StrVal:}",
4 | "name": "zk-pdb"
5 | }
6 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "cluster_ip": "10.108.144.78",
4 | "name": "jenkins",
5 | "namespace": "default"
6 | }
7 | ]
8 |
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/test-host-port-expected.json:
--------------------------------------------------------------------------------
1 |
2 | [
3 | {
4 | "host_port": 9100,
5 | "name": "prometheus-node-exporter",
6 | "namespace": "monitoring"
7 | }
8 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | rules
4 | from
5 | kubernetes.kubernetes_cluster_role
6 | where
7 | name like '%jenkins%';
8 |
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | suspend
5 | from
6 | kubernetes_cronjob
7 | where
8 | name = 'hello';
9 |
10 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "web",
4 | "namespace": "default",
5 | "replicas": 2,
6 | "service_name": "nginx"
7 | }
8 | ]
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | max_unavailable
4 | from
5 | kubernetes_pod_disruption_budget
6 | where
7 | name = 'zk-pdb';
8 |
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | max_unavailable
4 | from
5 | kubernetes_pod_disruption_budget
6 | where
7 | name = 'zk-pdb-aa';
8 |
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/role.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-service" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/service.yaml"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | rules
4 | from
5 | kubernetes.kubernetes_cluster_role
6 | where
7 | name = 'jenkins'
8 | order by
9 | name;
10 |
11 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/cronjob.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | suspend
5 | from
6 | kubernetes_cronjob
7 | where
8 | name = 'hello'
9 | and namespace = 'default';
10 |
11 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | # Example query results
2 |
3 | Results
4 |
5 | ```
6 | Add example SQL query results here (please include the input queries as well)
7 | ```
8 |
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete_config_map" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/config_map.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete_limit" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/limit_range.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-pdb" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/pdb.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-statefulset" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/statefulset.yaml"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "allow_volume_expansion": true,
4 | "name": "mystorage",
5 | "reclaim_policy": "Retain",
6 | "volume_binding_mode": "Immediate"
7 | }
8 | ]
9 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-cluster-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/cluster_role.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete_quota" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/resource_quota.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete_role_binding" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/role_binding.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-storageclass" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/storageclass.yaml"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/pdb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: zk-pdb
5 | spec:
6 | maxUnavailable: 1
7 | selector:
8 | matchLabels:
9 | app: zookeeper
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-frontend-replicaset" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/frontend.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_resource_quota
6 | where
7 | name = 'pods-medium'
8 | order by
9 | namespace,
10 | name;
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules
5 | from
6 | kubernetes.kubernetes_role
7 | where
8 | name = 'jenkins'
9 | order by
10 | namespace,
11 | name;
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-cluster-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/cluster_role.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules,
5 | ingress_class_name as class
6 | from
7 | kubernetes.kubernetes_ingress
8 | where
9 | name = 'minimal-ingress';
10 |
11 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_limit_range
6 | where
7 | name = 'cpu-limit-range'
8 | order by
9 | namespace,
10 | name;
11 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | max_unavailable
4 | from
5 | kubernetes_pod_disruption_budget
6 | where
7 | name = 'zk-pdb'
8 | and namespace = 'default';
9 |
10 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | cluster_ip
5 | from
6 | kubernetes.kubernetes_service
7 | where
8 | name = 'jenkins'
9 | order by
10 | namespace,
11 | name;
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete-service-account" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/service-account.yaml"
4 | }
5 | }
6 |
7 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | STEAMPIPE_INSTALL_DIR ?= ~/.steampipe
2 | BUILD_TAGS = netgo
3 | install:
4 | go build -o $(STEAMPIPE_INSTALL_DIR)/plugins/hub.steampipe.io/plugins/turbot/kubernetes@latest/steampipe-plugin-kubernetes.plugin -tags "${BUILD_TAGS}" *.go
5 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | role_name,
4 | role_kind,
5 | subjects
6 | from
7 | kubernetes.kubernetes_cluster_role_binding
8 | where
9 | name = 'jenkins';
10 |
11 |
--------------------------------------------------------------------------------
/test_files/network_policy/default-deny-all-egress.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: NetworkPolicy
4 | metadata:
5 | name: default-deny-egress
6 | spec:
7 | podSelector: {}
8 | policyTypes:
9 | - Egress
10 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules,
5 | ingress_class_name as class
6 | from
7 | kubernetes.kubernetes_ingress
8 | where
9 | name = '' and namespace = '';
10 |
11 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | data.key,
5 | data.value
6 | from
7 | kubernetes.kubernetes_config_map,
8 | jsonb_each(data) as data
9 | where
10 | name = 'game-demo'
11 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_limit_range
6 | where
7 | name = ''
8 | and namespace = ''
9 | order by
10 | namespace,
11 | name;
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_resource_quota
6 | where
7 | name = ''
8 | and namespace = ''
9 | order by
10 | namespace,
11 | name;
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | allow_volume_expansion,
4 | reclaim_policy,
5 | volume_binding_mode
6 | from
7 | kubernetes.kubernetes_storage_class
8 | where
9 | name = 'abc';
10 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/test-list-unowned-pods-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "static-web",
4 | "namespace": "default",
5 | "node_name": "gke-dev-steampipe-morale-default-pool-03958ae1-a7mf",
6 | "phase": "Running"
7 | }
8 | ]
9 |
--------------------------------------------------------------------------------
/.github/workflows/sync-labels.yml:
--------------------------------------------------------------------------------
1 | name: Sync Labels
2 | on:
3 | schedule:
4 | - cron: "30 22 * * 1"
5 | workflow_dispatch:
6 |
7 | jobs:
8 | sync_labels_workflow:
9 | uses: turbot/steampipe-workflows/.github/workflows/sync-labels.yml@main
10 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | rules,
4 | age(current_timestamp, creation_timestamp)
5 | from
6 | kubernetes.kubernetes_cluster_role
7 | where
8 | name = ''
9 | order by
10 | name;
11 |
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/test-list-unowned-pods-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | phase,
5 | -- pod_ip,
6 | node_name
7 | from
8 | kubernetes_pod
9 | where
10 | name = 'static-web'
11 | and owner_references is null;
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_resource_quota
6 | where
7 | name = 'pods-medium'
8 | and namespace = 'default'
9 | order by
10 | namespace,
11 | name;
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | service_name,
5 | replicas
6 | from
7 | kubernetes.kubernetes_stateful_set
8 | where
9 | name = 'web'
10 | order by
11 | namespace,
12 | name;
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "allow_volume_expansion": true,
4 | "name": "mystorage",
5 | "reclaim_policy": "Retain",
6 | "title": "mystorage",
7 | "volume_binding_mode": "Immediate"
8 | }
9 | ]
10 |
--------------------------------------------------------------------------------
/test_files/pod/hostpid.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: view-pid
5 | spec:
6 | hostPID: true
7 | containers:
8 | - name: view-pid
9 | image: nginx:1.14.2
10 | ports:
11 | - containerPort: 80
12 |
13 |
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | role_name,
4 | role_kind,
5 | subjects
6 | from
7 | kubernetes.kubernetes_cluster_role_binding
8 | where
9 | name = 'jenkins'
10 | order by
11 | name;
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace
4 | from
5 | kubernetes.kubernetes_limit_range
6 | where
7 | name = 'cpu-limit-range'
8 | and namespace = 'default'
9 | order by
10 | namespace,
11 | name;
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules
5 | from
6 | kubernetes.kubernetes_role
7 | where
8 | name = 'jenkins'
9 | and namespace = 'default'
10 | order by
11 | namespace,
12 | name;
13 |
14 |
--------------------------------------------------------------------------------
/test_files/network_policy/default-allow-all-egress.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: NetworkPolicy
4 | metadata:
5 | name: allow-all-egress
6 | spec:
7 | podSelector: {}
8 | egress:
9 | - {}
10 | policyTypes:
11 | - Egress
12 |
--------------------------------------------------------------------------------
/test_files/volume/pv-claim.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: task-pv-claim
5 | spec:
6 | storageClassName: manual
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 3Gi
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules,
5 | ingress_class_name as class
6 | from
7 | kubernetes.kubernetes_ingress
8 | where
9 | name = 'ingress-wildcard-host'
10 | and namespace = 'default';
11 |
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/limit_range.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: LimitRange
3 | metadata:
4 | name: cpu-limit-range
5 | spec:
6 | limits:
7 | - default:
8 | cpu: 1
9 | defaultRequest:
10 | cpu: 0.5
11 | type: Container
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | allow_volume_expansion,
4 | reclaim_policy,
5 | volume_binding_mode
6 | from
7 | kubernetes.kubernetes_storage_class
8 | where
9 | name = 'mystorage'
10 | order by
11 | name;
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available": 0,
4 | "current": 0,
5 | "desired": 0,
6 | "name": "prometheus-node-exporter",
7 | "namespace": "monitoring",
8 | "ready": 0,
9 | "selector": null
10 | }
11 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available": 0,
4 | "current": 0,
5 | "desired": 0,
6 | "name": "prometheus-node-exporter",
7 | "namespace": "monitoring",
8 | "ready": 0,
9 | "selector": null
10 | }
11 | ]
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-kubernetes/kubernetes"
5 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
6 | )
7 |
8 | func main() {
9 | plugin.Serve(&plugin.ServeOpts{
10 | PluginFunc: kubernetes.Plugin})
11 | }
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | role_name,
5 | role_kind,
6 | subjects
7 | from
8 | kubernetes.kubernetes_role_binding
9 | where
10 | name = 'jenkins'
11 | order by
12 | namespace,
13 | name;
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | jsonb_array_length(secrets) as secrets
5 | from
6 | kubernetes.kubernetes_service_account
7 | where
8 | name = 'jenkins'
9 | order by
10 | namespace,
11 | name;
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | role_name,
4 | role_kind,
5 | subjects,
6 | age(current_timestamp, creation_timestamp)
7 | from
8 | kubernetes.kubernetes_cluster_role_binding
9 | where
10 | name = '';
11 |
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | data.key,
5 | data.value
6 | from
7 | kubernetes.kubernetes_config_map,
8 | jsonb_each(data) as data
9 | where
10 | name = 'game-demo'
11 | and namespace = 'default';
12 |
13 |
--------------------------------------------------------------------------------
/.github/workflows/golangci-lint.yml:
--------------------------------------------------------------------------------
1 | name: golangci-lint
2 | on:
3 | push:
4 | tags:
5 | - v*
6 | branches:
7 | - main
8 | pull_request:
9 |
10 | jobs:
11 | golangci_lint_workflow:
12 | uses: turbot/steampipe-workflows/.github/workflows/golangci-lint.yml@main
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: jenkins
6 | spec:
7 | clusterIP: "10.108.144.78"
8 | selector:
9 | app: MyApp
10 | ports:
11 | - protocol: TCP
12 | port: 80
13 | targetPort: 9376
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | allow_volume_expansion,
4 | reclaim_policy,
5 | volume_binding_mode,
6 | title
7 | from
8 | kubernetes.kubernetes_storage_class
9 | where
10 | title = 'mystorage'
11 | order by
12 | title;
13 |
--------------------------------------------------------------------------------
/k8s-test/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "k8s-test",
3 | "dependencies": {
4 | "chalk": "^4.1.0",
5 | "custom-env": "^2.0.1",
6 | "diff": "^4.0.2",
7 | "fs-extra": "^9.0.1",
8 | "json-diff": "^0.5.4",
9 | "lodash": "^4.17.20",
10 | "micromatch": "^4.0.8"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/k8s-test/posttest-naked-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: static-web
5 | labels:
6 | role: myrole
7 | spec:
8 | containers:
9 | - name: web
10 | image: nginx
11 | ports:
12 | - name: web
13 | containerPort: 80
14 | protocol: TCP
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available_replicas": 3,
4 | "name": "nginx-deployment-test",
5 | "namespace": "default",
6 | "ready_replicas": 3,
7 | "status_replicas": 3,
8 | "unavailable_replicas": 0,
9 | "updated_replicas": 3
10 | }
11 | ]
--------------------------------------------------------------------------------
/k8s-test/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # .tfvars files
9 | *.tfvars
10 |
11 | .lock/*
12 | !.lock/.gitkeep
13 |
14 | .DS_Store
15 |
16 | .*.swo
17 | .*.swp
18 |
19 |
20 | # Ignore node dependencies
21 | node_modules
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | data.key,
5 | data.value,
6 | age(current_timestamp, creation_timestamp)
7 | from
8 | kubernetes.kubernetes_config_map,
9 | jsonb_each(data) as data
10 | where
11 | namespace = '' and name = '';
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available_replicas": 3,
4 | "name": "nginx-deployment-test",
5 | "namespace": "default",
6 | "ready_replicas": 3,
7 | "status_replicas": 3,
8 | "unavailable_replicas": 0,
9 | "updated_replicas": 3
10 | }
11 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | rules,
5 | age(current_timestamp, creation_timestamp)
6 | from
7 | kubernetes.kubernetes_role
8 | where
9 | name = 'jenkins_123_123'
10 | and namespace = ''
11 | order by
12 | namespace,
13 | name;
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | role_name,
5 | role_kind,
6 | subjects
7 | from
8 | kubernetes.kubernetes_role_binding
9 | where
10 | name = 'jenkins'
11 | and namespace = 'default'
12 | order by
13 | namespace,
14 | name;
15 |
16 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | jsonb_array_length(secrets) as secrets
5 | from
6 | kubernetes.kubernetes_service_account
7 | where
8 | name = 'jenkins'
9 | and namespace = 'default'
10 | order by
11 | namespace,
12 | name;
13 |
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | status_replicas,
5 | ready_replicas,
6 | updated_replicas,
7 | available_replicas,
8 | unavailable_replicas
9 | from
10 | kubernetes.kubernetes_deployment
11 | where
12 | name = 'nginx-deployment-test';
13 |
14 |
--------------------------------------------------------------------------------
/test_files/volume/pv-volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: task-pv-volume
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: manual
9 | capacity:
10 | storage: 10Gi
11 | accessModes:
12 | - ReadWriteOnce
13 | hostPath:
14 | path: "/mnt/data"
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | cluster_ip,
5 | type,
6 | cluster_ips,
7 | ports,
8 | selector
9 | from
10 | kubernetes.kubernetes_service
11 | where
12 | name = 'jenkins'
13 | and namespace = 'default'
14 | order by
15 | namespace,
16 | name;
17 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | ports,
5 | age(current_timestamp, creation_timestamp)
6 | from
7 | kubernetes.kubernetes_service
8 | where
9 | name = 'jenkins_123_123'
10 | and namespace = 'default'
11 | order by
12 | namespace,
13 | name;
14 |
--------------------------------------------------------------------------------
/test_files/network_policy/nginx-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: access-nginx
5 | spec:
6 | podSelector:
7 | matchLabels:
8 | app: nginx
9 | ingress:
10 | - from:
11 | - podSelector:
12 | matchLabels:
13 | access: "true"
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/naked-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: static-web
5 | labels:
6 | role: myrole
7 | spec:
8 | containers:
9 | - name: web
10 | image: nginx
11 | ports:
12 | - name: web
13 | containerPort: 80
14 | protocol: TCP
15 |
16 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "role_kind": "Role",
6 | "role_name": "jenkins",
7 | "subjects": [
8 | {
9 | "kind": "ServiceAccount",
10 | "name": "jenkins"
11 | }
12 | ]
13 | }
14 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "role_kind": "Role",
6 | "role_name": "jenkins",
7 | "subjects": [
8 | {
9 | "kind": "ServiceAccount",
10 | "name": "jenkins"
11 | }
12 | ]
13 | }
14 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod_disruption_budget/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-pdb" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/pdb.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/test_files/job/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: pi
10 | image: perl
11 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
12 | restartPolicy: Never
13 | backoffLimit: 4
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/test-list-replicaset-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | replicas as desired,
5 | ready_replicas as ready,
6 | available_replicas as available,
7 | selector,
8 | fully_labeled_replicas
9 | from
10 | kubernetes.kubernetes_replicaset
11 | where
12 | name = 'frontend';
13 |
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | service_name,
5 | age(current_timestamp, creation_timestamp)
6 | from
7 | kubernetes.kubernetes_stateful_set
8 | where
9 | name = 'jenkins_123_123'
10 | and namespace = 'default'
11 | order by
12 | namespace,
13 | name;
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/storageclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: mystorage
5 | provisioner: kubernetes.io/aws-ebs
6 | parameters:
7 | type: gp2
8 | reclaimPolicy: Retain
9 | allowVolumeExpansion: true
10 | mountOptions:
11 | - debug
12 | volumeBindingMode: Immediate
--------------------------------------------------------------------------------
/.github/workflows/registry-publish.yml:
--------------------------------------------------------------------------------
1 | name: Build and Deploy OCI Image
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | registry_publish_workflow_ghcr:
10 | uses: turbot/steampipe-workflows/.github/workflows/registry-publish-ghcr.yml@main
11 | secrets: inherit
12 | with:
13 | releaseTimeout: 60m
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | status_replicas,
5 | ready_replicas,
6 | updated_replicas,
7 | available_replicas,
8 | unavailable_replicas
9 | from
10 | kubernetes.kubernetes_deployment
11 | where
12 | name = 'nginx-deployment-test'
13 | and namespace = 'default';
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_limit_range/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create_limit" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/limit_range.yaml --namespace=default"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | role_name,
5 | role_kind,
6 | subjects,
7 | age(current_timestamp, creation_timestamp)
8 | from
9 | kubernetes.kubernetes_role_binding
10 | where
11 | name = ''
12 | and namespace = ''
13 | order by
14 | namespace,
15 | name;
16 |
17 |
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/test-host-port-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | port -> 'hostPort' as host_port
5 | from
6 | k8s_minikube.kubernetes_daemonset,
7 | jsonb_array_elements(template -> 'spec' -> 'containers') as container,
8 | jsonb_array_elements(container -> 'ports') as port
9 | where
10 | port::jsonb ? 'hostPort';
11 |
12 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/test-get-naked-pod-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "container_count": 1,
4 | "ephemeral_container_count": null,
5 | "init_container_count": null,
6 | "name": "static-web",
7 | "namespace": "default",
8 | "node_name": "gke-dev-steampipe-morale-default-pool-03958ae1-a7mf",
9 | "phase": "Running"
10 | }
11 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/test-get-replicaset-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available": 3,
4 | "desired": 3,
5 | "fully_labeled_replicas": 3,
6 | "name": "frontend",
7 | "namespace": "default",
8 | "ready": 3,
9 | "selector": {
10 | "matchLabels": {
11 | "tier": "frontend"
12 | }
13 | }
14 | }
15 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "role_kind": "ClusterRole",
5 | "role_name": "jenkins",
6 | "subjects": [
7 | {
8 | "kind": "ServiceAccount",
9 | "name": "jenkins",
10 | "namespace": "kubernetes-plugin-test"
11 | }
12 | ]
13 | }
14 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "role_kind": "ClusterRole",
5 | "role_name": "jenkins",
6 | "subjects": [
7 | {
8 | "kind": "ServiceAccount",
9 | "name": "jenkins",
10 | "namespace": "kubernetes-plugin-test"
11 | }
12 | ]
13 | }
14 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/test-list-replicaset-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "available": 3,
4 | "desired": 3,
5 | "fully_labeled_replicas": 3,
6 | "name": "frontend",
7 | "namespace": "default",
8 | "ready": 3,
9 | "selector": {
10 | "matchLabels": {
11 | "tier": "frontend"
12 | }
13 | }
14 | }
15 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-list-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | desired_number_scheduled as desired,
5 | current_number_scheduled as current,
6 | number_ready as ready,
7 | number_available as available,
8 | selector
9 | from
10 | kubernetes.kubernetes_daemonset
11 | where
12 | name = 'prometheus-node-exporter';
13 |
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/test-get-replicaset-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | replicas as desired,
5 | ready_replicas as ready,
6 | available_replicas as available,
7 | selector,
8 | fully_labeled_replicas
9 | from
10 | kubernetes.kubernetes_replicaset
11 | where
12 | name = 'frontend'
13 | and namespace = 'default';
14 |
15 |
--------------------------------------------------------------------------------
/test_files/pod/pull-backoff.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: image-that-doesnt-exist
5 | labels:
6 | role: myrole
7 | spec:
8 | containers:
9 | - name: image-that-doesnt-exist
10 | image: thisdoesntexist5930205867573
11 | ports:
12 | - name: web
13 | containerPort: 80
14 | protocol: TCP
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 |
17 | .vscode/*
18 | .idea
19 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | status_replicas,
5 | ready_replicas,
6 | updated_replicas,
7 | available_replicas,
8 | unavailable_replicas,
9 | age(current_timestamp, creation_timestamp)
10 | from
11 | kubernetes.kubernetes_deployment
12 | where
13 | name = '' and namespace = '';
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | jsonb_array_length(secrets) as secrets,
5 | age(current_timestamp, creation_timestamp)
6 | from
7 | kubernetes.kubernetes_service_account
8 | where
9 | name = 'jenkins_123_123'
10 | and namespace = 'default'
11 | order by
12 | namespace,
13 | name;
14 |
15 |
--------------------------------------------------------------------------------
/test_files/job/pi-with-timeout.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi-with-timeout
5 | spec:
6 | backoffLimit: 5
7 | activeDeadlineSeconds: 100
8 | template:
9 | spec:
10 | containers:
11 | - name: pi
12 | image: perl
13 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
14 | restartPolicy: Never
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/pull-backoff.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: image-that-doesnt-exist
5 | labels:
6 | role: myrole
7 | spec:
8 | containers:
9 | - name: image-that-doesnt-exist
10 | image: thisdoesntexist5930205867573
11 | ports:
12 | - name: web
13 | containerPort: 80
14 | protocol: TCP
15 |
--------------------------------------------------------------------------------
/test_files/application/guestbook/mongo-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mongo
5 | labels:
6 | app.kubernetes.io/name: mongo
7 | app.kubernetes.io/component: backend
8 | spec:
9 | ports:
10 | - port: 27017
11 | targetPort: 27017
12 | selector:
13 | app.kubernetes.io/name: mongo
14 | app.kubernetes.io/component: backend
15 |
--------------------------------------------------------------------------------
/.github/workflows/add-issue-to-project.yml:
--------------------------------------------------------------------------------
1 | name: Assign Issue to Project
2 |
3 | on:
4 | issues:
5 | types: [opened]
6 |
7 | jobs:
8 | add-to-project:
9 | uses: turbot/steampipe-workflows/.github/workflows/assign-issue-to-project.yml@main
10 | with:
11 | issue_number: ${{ github.event.issue.number }}
12 | repository: ${{ github.repository }}
13 | secrets: inherit
14 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/test-run-as-user-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "pod4",
4 | "namespace": "default",
5 | "run_as_user": 1000,
6 | "security_context": {
7 | "runAsUser": 1000
8 | }
9 | },
10 | {
11 | "name": "pod9",
12 | "namespace": "default",
13 | "run_as_user": 1000,
14 | "security_context": {
15 | "runAsUser": 1000
16 | }
17 | }
18 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | desired_number_scheduled as desired,
5 | current_number_scheduled as current,
6 | number_ready as ready,
7 | number_available as available,
8 | selector
9 | from
10 | kubernetes.kubernetes_daemonset
11 | where
12 | name = 'prometheus-node-exporter'
13 | and namespace = 'monitoring';
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/test-run-as-user-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | security_context,
5 | (security_context -> 'runAsUser') as run_as_user
6 | from
7 | kubernetes.kubernetes_pod
8 | where
9 | name like '%pod%'
10 | and security_context::jsonb ? 'runAsUser'
11 | and (security_context -> 'runAsUser')::int > 0
12 | order by
13 | name,
14 | namespace;
15 |
16 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/test-not-found-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | desired_number_scheduled as desired,
5 | current_number_scheduled as current,
6 | number_ready as ready,
7 | number_available as available,
8 | selector,
9 | age(current_timestamp, creation_timestamp)
10 | from
11 | kubernetes.kubernetes_daemonset
12 | where
13 | name = '' and namespace ='';
14 |
15 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "cluster_ip": "10.108.144.78",
4 | "cluster_ips": ["10.108.144.78"],
5 | "name": "jenkins",
6 | "namespace": "default",
7 | "ports": [
8 | {
9 | "port": 80,
10 | "protocol": "TCP",
11 | "targetPort": 9376
12 | }
13 | ],
14 | "selector": {
15 | "app": "MyApp"
16 | },
17 | "type": "ClusterIP"
18 | }
19 | ]
20 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/test-run-as-non-root-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | security_context,
5 | (security_context -> 'runAsNonRoot') as run_as_non_root
6 | from
7 | kubernetes.kubernetes_pod
8 | where
9 | name like '%pod%'
10 | and security_context::jsonb ? 'runAsNonRoot'
11 | and (security_context -> 'runAsNonRoot')::bool
12 | order by
13 | name,
14 | namespace;
15 |
16 |
--------------------------------------------------------------------------------
/test_files/controllers/replication.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ReplicationController
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 3
7 | selector:
8 | app: nginx
9 | template:
10 | metadata:
11 | name: nginx
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/test-run-as-non-root-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "pod1",
4 | "namespace": "default",
5 | "run_as_non_root": true,
6 | "security_context": {
7 | "runAsNonRoot": true
8 | }
9 | },
10 | {
11 | "name": "pod7",
12 | "namespace": "default",
13 | "run_as_non_root": true,
14 | "security_context": {
15 | "runAsNonRoot": true
16 | }
17 | }
18 | ]
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request---new-table.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request - New table
3 | about: Suggest a new table for this project
4 | title: Add table kubernetes_
5 | labels: enhancement, new table
6 | assignees: ''
7 |
8 | ---
9 |
10 | **References**
11 | Add any related links that will help us understand the resource, including vendor documentation, related Kubernetes issues, and Go SDK documentation.
12 |
--------------------------------------------------------------------------------
/test_files/ingress/minimal-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: minimal-ingress
5 | annotations:
6 | nginx.ingress.kubernetes.io/rewrite-target: /
7 | spec:
8 | rules:
9 | - http:
10 | paths:
11 | - path: /testpath
12 | pathType: Prefix
13 | backend:
14 | service:
15 | name: test
16 | port:
17 | number: 80
18 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/minimal-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: minimal-ingress
5 | annotations:
6 | nginx.ingress.kubernetes.io/rewrite-target: /
7 | spec:
8 | rules:
9 | - http:
10 | paths:
11 | - path: /testpath
12 | pathType: Prefix
13 | backend:
14 | service:
15 | name: test
16 | port:
17 | number: 80
18 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | name: Stale Issues and PRs
2 | on:
3 | schedule:
4 | - cron: "30 23 * * *"
5 | workflow_dispatch:
6 | inputs:
7 | dryRun:
8 | description: Set to true for a dry run
9 | required: false
10 | default: "false"
11 | type: string
12 |
13 | jobs:
14 | stale_workflow:
15 | uses: turbot/steampipe-workflows/.github/workflows/stale.yml@main
16 | with:
17 | dryRun: ${{ github.event.inputs.dryRun }}
18 |
--------------------------------------------------------------------------------
/.github/workflows/steampipe-anywhere.yml:
--------------------------------------------------------------------------------
1 | name: Release Steampipe Anywhere Components
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | workflow_dispatch:
9 | inputs:
10 | version:
11 | description: "The version to release (must be prefixed with 'v')"
12 | required: true
13 |
14 | jobs:
15 | anywhere_publish_workflow:
16 | uses: turbot/steampipe-workflows/.github/workflows/steampipe-anywhere-kubernetes.yml@main
17 | secrets: inherit
18 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment-test
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:1.14.2
20 | ports:
21 | - containerPort: 80
22 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | # delete rootContainersFAILED.yaml
2 | resource "null_resource" "root_containers_failed" {
3 | provisioner "local-exec" {
4 | command = "kubectl delete -f ${path.cwd}/rootContainersFAILED.yaml"
5 | }
6 | }
7 |
8 | # delete rootContainersPASSED.yaml
9 | resource "null_resource" "root_containers_passed" {
10 | provisioner "local-exec" {
11 | command = "kubectl delete -f ${path.cwd}/rootContainersPASSED.yaml"
12 | }
13 | }
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/test_files/volume/pv-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: task-pv-pod
5 | spec:
6 | volumes:
7 | - name: task-pv-storage
8 | persistentVolumeClaim:
9 | claimName: task-pv-claim
10 | containers:
11 | - name: task-pv-container
12 | image: nginx
13 | ports:
14 | - containerPort: 80
15 | name: "http-server"
16 | volumeMounts:
17 | - mountPath: "/usr/share/nginx/html"
18 | name: task-pv-storage
19 |
20 |
21 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/test-get-naked-pod-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | namespace,
3 | name,
4 | phase,
5 | -- age(current_timestamp, creation_timestamp),
6 | -- pod_ip,
7 | node_name,
8 | jsonb_array_length(containers) as container_count,
9 | jsonb_array_length(init_containers) as init_container_count,
10 | jsonb_array_length(ephemeral_containers) as ephemeral_container_count
11 | from
12 | kubernetes_pod
13 | where
14 | name = 'static-web'
15 | order by
16 | namespace,
17 | name;
18 |
19 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/config_map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: game-demo
5 | data:
6 | # property-like keys; each key maps to a simple value
7 | player_initial_lives: "3"
8 | ui_properties_file_name: "user-interface.properties"
9 |
10 | # file-like keys
11 | game.properties: |
12 | enemy.types=aliens,monsters
13 | player.maximum-lives=5
14 | user-interface.properties: |
15 | color.good=purple
16 | color.bad=yellow
17 | allow.textmode=true
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/frontend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: ReplicaSet
3 | metadata:
4 | name: frontend
5 | labels:
6 | app: guestbook
7 | tier: frontend
8 | spec:
9 | # modify replicas according to your case
10 | replicas: 3
11 | selector:
12 | matchLabels:
13 | tier: frontend
14 | template:
15 | metadata:
16 | labels:
17 | tier: frontend
18 | spec:
19 | containers:
20 | - name: php-redis
21 | image: gcr.io/google_samples/gb-frontend:v3
22 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: hello
5 | spec:
6 | schedule: "* * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: hello
13 | image: busybox
14 | imagePullPolicy: IfNotPresent
15 | command:
16 | - /bin/sh
17 | - -c
18 | - date; echo Hello from the Kubernetes cluster
19 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-get-query.sql:
--------------------------------------------------------------------------------
1 | select
2 | name,
3 | namespace,
4 | service_name,
5 | replicas,
6 | selector,
7 | selector_query,
8 | collision_count,
9 | current_replicas,
10 | observed_generation,
11 | pod_management_policy,
12 | ready_replicas,
13 | revision_history_limit,
14 | updated_replicas,
15 | update_strategy
16 | from
17 | kubernetes.kubernetes_stateful_set
18 | where
19 | name = 'web'
20 | and namespace = 'default'
21 | order by
22 | namespace,
23 | name;
24 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "naked-pod" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/naked-pod.yml"
4 | }
5 | }
6 |
7 | resource "null_resource" "privileged-pod" {
8 | provisioner "local-exec" {
9 | command = "kubectl delete -f ${path.cwd}/privileged-pod.yml"
10 | }
11 | }
12 |
13 | resource "null_resource" "pull-backoff" {
14 | provisioner "local-exec" {
15 | command = "kubectl delete -f ${path.cwd}/pull-backoff.yml"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/test_files/application/guestbook/frontend-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: frontend
5 | labels:
6 | app.kubernetes.io/name: guestbook
7 | app.kubernetes.io/component: frontend
8 | spec:
9 | # if your cluster supports it, uncomment the following to automatically create
10 | # an external load-balanced IP for the frontend service.
11 | # type: LoadBalancer
12 | ports:
13 | - port: 80
14 | selector:
15 | app.kubernetes.io/name: guestbook
16 | app.kubernetes.io/component: frontend
17 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "class": null,
4 | "name": "minimal-ingress",
5 | "namespace": "default",
6 | "rules": [
7 | {
8 | "http": {
9 | "paths": [
10 | {
11 | "backend": {
12 | "serviceName": "test",
13 | "servicePort": 80
14 | },
15 | "path": "/testpath",
16 | "pathType": "Prefix"
17 | }
18 | ]
19 | }
20 | }
21 | ]
22 | }
23 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/role.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_role" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get roles"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cronjob/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-cronjob" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/cronjob.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_cronjob" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get cronjobs"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/HostPort.py
2 | # https://github.com/bridgecrewio/checkov/tree/master/tests/kubernetes/checks/example_HostPort
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_HostPort.py
4 |
5 | # delete DS-node-exporter-FAILED
6 | resource "null_resource" "delete_deployment" {
7 | provisioner "local-exec" {
8 | command = "kubectl delete -f ${path.cwd}/nginx-deployment.yaml"
9 | }
10 | }
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-service" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/service.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_services" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get services"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/test_files/pod/privileged-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | replicas: 1 # tells deployment to run 2 pods matching the template
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx:1.14.2
18 | ports:
19 | - containerPort: 80
20 | securityContext:
21 | privileged: true
22 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_replicaset/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "frontend_replicaset" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/frontend.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_pods" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get replicasets"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create_config_map" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/config_map.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_config_map" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get configmaps"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create_quota" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/resource_quota.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_quota" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl describe quota"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/privileged-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | replicas: 1 # tells deployment to run 2 pods matching the template
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx:1.14.2
18 | ports:
19 | - containerPort: 80
20 | securityContext:
21 | privileged: true
22 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create_role_binding" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/role_binding.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_role_binding" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get rolebindings"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-statefulset" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/statefulset.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_statefulsets" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get statefulsets"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/test_files/ingress/ingress-resource-backend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-resource-backend
5 | spec:
6 | defaultBackend:
7 | resource:
8 | apiGroup: k8s.example.com
9 | kind: StorageBucket
10 | name: static-assets
11 | rules:
12 | - http:
13 | paths:
14 | - path: /icons
15 | pathType: ImplementationSpecific
16 | backend:
17 | resource:
18 | apiGroup: k8s.example.com
19 | kind: StorageBucket
20 | name: icon-assets
21 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_storage_class/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-storageclass" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/storageclass.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_storageclass" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get storageclass"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-cluster-role" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/cluster_role.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_role_jenkins" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get clusterrole jenkins"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Questions
4 | url: https://turbot.com/community/join
5 | about: GitHub issues in this repository are only intended for bug reports and feature requests. Other issues will be closed. Please ask and answer questions through the Steampipe Slack community.
6 | - name: Steampipe CLI Bug Reports and Feature Requests
7 | url: https://github.com/turbot/steampipe/issues/new/choose
8 | about: Steampipe CLI has its own codebase. Bug reports and feature requests for those pieces of functionality should be directed to that repository.
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/ingress-resource-backend.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-resource-backend
5 | spec:
6 | defaultBackend:
7 | resource:
8 | apiGroup: k8s.example.com
9 | kind: StorageBucket
10 | name: static-assets
11 | rules:
12 | - http:
13 | paths:
14 | - path: /icons
15 | pathType: ImplementationSpecific
16 | backend:
17 | resource:
18 | apiGroup: k8s.example.com
19 | kind: StorageBucket
20 | name: icon-assets
21 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-service-account" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/service-account.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_serviceaccounts" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get serviceaccounts"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_deployment/variables.tf:
--------------------------------------------------------------------------------
1 |
2 | resource "null_resource" "create_deployment" {
3 | provisioner "local-exec" {
4 | command = "kubectl apply -f ${path.cwd}/nginx-deployment.yaml"
5 | }
6 | }
7 |
8 | resource "null_resource" "delay" {
9 | provisioner "local-exec" {
10 | command = "sleep 60"
11 | }
12 | }
13 |
14 |
15 | # Delay in order to get te resource creation complete
16 | resource "null_resource" "get_deployments" {
17 | depends_on = [
18 | null_resource.delay
19 | ]
20 | provisioner "local-exec" {
21 | command = "kubectl get deployments"
22 | }
23 | }
24 |
25 |
26 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "delete_minimal_ingress" {
2 | provisioner "local-exec" {
3 | command = "kubectl delete -f ${path.cwd}/minimal-ingress.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delete_ingress_wildcard_host" {
8 | provisioner "local-exec" {
9 | command = "kubectl delete -f ${path.cwd}/ingress-wildcard-host.yaml"
10 | }
11 | }
12 |
13 | resource "null_resource" "delete_ingress_backend" {
14 | provisioner "local-exec" {
15 | command = "kubectl delete -f ${path.cwd}/ingress-resource-backend.yaml"
16 | }
17 | }
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/variables.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "create-cluster-role-binding" {
2 | provisioner "local-exec" {
3 | command = "kubectl apply -f ${path.cwd}/cluster_role.yaml"
4 | }
5 | }
6 |
7 | resource "null_resource" "delay" {
8 | provisioner "local-exec" {
9 | command = "sleep 45"
10 | }
11 | }
12 |
13 |
14 | # Delay in order to get te resource creation complete
15 | resource "null_resource" "get_cluster_role_binding_jenkins" {
16 | depends_on = [
17 | null_resource.delay
18 | ]
19 | provisioner "local-exec" {
20 | command = "kubectl get clusterrolebindings jenkins"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/test_files/ingress/ingress-wildcard-host.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-wildcard-host
5 | spec:
6 | rules:
7 | - host: "foo.bar.com"
8 | http:
9 | paths:
10 | - pathType: Prefix
11 | path: "/bar"
12 | backend:
13 | service:
14 | name: service1
15 | port:
16 | number: 80
17 | - host: "*.foo.com"
18 | http:
19 | paths:
20 | - pathType: Prefix
21 | path: "/foo"
22 | backend:
23 | service:
24 | name: service2
25 | port:
26 | number: 80
27 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/ingress-wildcard-host.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: ingress-wildcard-host
5 | spec:
6 | rules:
7 | - host: "foo.bar.com"
8 | http:
9 | paths:
10 | - pathType: Prefix
11 | path: "/bar"
12 | backend:
13 | service:
14 | name: service1
15 | port:
16 | number: 80
17 | - host: "*.foo.com"
18 | http:
19 | paths:
20 | - pathType: Prefix
21 | path: "/foo"
22 | backend:
23 | service:
24 | name: service2
25 | port:
26 | number: 80
27 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "collision_count": 0,
4 | "current_replicas": 2,
5 | "name": "web",
6 | "namespace": "default",
7 | "observed_generation": 1,
8 | "pod_management_policy": "OrderedReady",
9 | "ready_replicas": 2,
10 | "replicas": 2,
11 | "selector": {
12 | "matchLabels": {
13 | "app": "nginx"
14 | }
15 | },
16 | "selector_query": "app=nginx",
17 | "revision_history_limit": 10,
18 | "service_name": "nginx",
19 | "update_strategy": {
20 | "rollingUpdate": {
21 | "partition": 0
22 | },
23 | "type": "RollingUpdate"
24 | },
25 | "updated_replicas": 2
26 | }
27 | ]
28 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_pod/variables.tf:
--------------------------------------------------------------------------------
1 | # locals {
2 | # filepath = "${path.cwd}/naked-pod.yml"
3 | # }
4 |
5 | # output "filepath" {
6 | # value = local.filepath
7 | # }
8 |
9 | resource "null_resource" "naked-pod" {
10 | provisioner "local-exec" {
11 | command = "kubectl create -f ${path.cwd}/naked-pod.yml"
12 | }
13 | }
14 |
15 | resource "null_resource" "privileged-pod" {
16 | provisioner "local-exec" {
17 | command = "kubectl create -f ${path.cwd}/privileged-pod.yml"
18 | }
19 | }
20 |
21 | resource "null_resource" "pull-backoff" {
22 | provisioner "local-exec" {
23 | command = "kubectl create -f ${path.cwd}/pull-backoff.yml"
24 | }
25 | }
26 |
27 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **Steampipe version (`steampipe -v`)**
14 | Example: v0.3.0
15 |
16 | **Plugin version (`steampipe plugin list`)**
17 | Example: v0.5.0
18 |
19 | **To reproduce**
20 | Steps to reproduce the behavior (please include relevant code and/or commands).
21 |
22 | **Expected behavior**
23 | A clear and concise description of what you expected to happen.
24 |
25 | **Additional context**
26 | Add any other context about the problem here.
27 |
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/nginx-app-PASSED.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-nginx-svc
5 | labels:
6 | app: nginx
7 | spec:
8 | type: LoadBalancer
9 | ports:
10 | - port: 80
11 | selector:
12 | app: nginx
13 |
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: my-nginx
19 | labels:
20 | app: nginx
21 | spec:
22 | replicas: 3
23 | selector:
24 | matchLabels:
25 | app: nginx
26 | template:
27 | metadata:
28 | labels:
29 | app: nginx
30 | spec:
31 | containers:
32 | - name: nginx
33 | image: nginx:1.14.2
34 | ports:
35 | - containerPort: 80
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "gomod" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 | pull-request-branch-name:
13 | separator: "-"
14 | assignees:
15 | - "misraved"
16 | - "madhushreeray30"
17 | labels:
18 | - "dependencies"
19 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "key": "game.properties",
4 | "name": "game-demo",
5 | "namespace": "default",
6 | "value": "enemy.types=aliens,monsters\nplayer.maximum-lives=5\n"
7 | },
8 | {
9 | "key": "player_initial_lives",
10 | "name": "game-demo",
11 | "namespace": "default",
12 | "value": "3"
13 | },
14 | {
15 | "key": "ui_properties_file_name",
16 | "name": "game-demo",
17 | "namespace": "default",
18 | "value": "user-interface.properties"
19 | },
20 | {
21 | "key": "user-interface.properties",
22 | "name": "game-demo",
23 | "namespace": "default",
24 | "value": "color.good=purple\ncolor.bad=yellow\nallow.textmode=true\n"
25 | }
26 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_config_map/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "key": "game.properties",
4 | "name": "game-demo",
5 | "namespace": "default",
6 | "value": "enemy.types=aliens,monsters\nplayer.maximum-lives=5\n"
7 | },
8 | {
9 | "key": "player_initial_lives",
10 | "name": "game-demo",
11 | "namespace": "default",
12 | "value": "3"
13 | },
14 | {
15 | "key": "ui_properties_file_name",
16 | "name": "game-demo",
17 | "namespace": "default",
18 | "value": "user-interface.properties"
19 | },
20 | {
21 | "key": "user-interface.properties",
22 | "name": "game-demo",
23 | "namespace": "default",
24 | "value": "color.good=purple\ncolor.bad=yellow\nallow.textmode=true\n"
25 | }
26 | ]
--------------------------------------------------------------------------------
/test_files/pod_security_policy/privileged-psp.yaml:
--------------------------------------------------------------------------------
1 |
2 | # least restrictive policy you can create, equivalent to not using the pod security policy admission controller
3 | apiVersion: policy/v1beta1
4 | kind: PodSecurityPolicy
5 | metadata:
6 | name: privileged
7 | annotations:
8 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
9 | spec:
10 | privileged: true
11 | allowPrivilegeEscalation: true
12 | allowedCapabilities:
13 | - '*'
14 | volumes:
15 | - '*'
16 | hostNetwork: true
17 | hostPorts:
18 | - min: 0
19 | max: 65535
20 | hostIPC: true
21 | hostPID: true
22 | runAsUser:
23 | rule: 'RunAsAny'
24 | seLinux:
25 | rule: 'RunAsAny'
26 | supplementalGroups:
27 | rule: 'RunAsAny'
28 | fsGroup:
29 | rule: 'RunAsAny'
30 |
--------------------------------------------------------------------------------
/test_files/network_policy/test-network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: test-network-policy
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | role: db
10 | policyTypes:
11 | - Ingress
12 | - Egress
13 | ingress:
14 | - from:
15 | - ipBlock:
16 | cidr: 172.17.0.0/16
17 | except:
18 | - 172.17.1.0/24
19 | - namespaceSelector:
20 | matchLabels:
21 | project: myproject
22 | - podSelector:
23 | matchLabels:
24 | role: frontend
25 | ports:
26 | - protocol: TCP
27 | port: 6379
28 | egress:
29 | - to:
30 | - ipBlock:
31 | cidr: 10.0.0.0/24
32 | ports:
33 | - protocol: TCP
34 | port: 5978
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/HostPort.py
2 | # https://github.com/bridgecrewio/checkov/tree/master/tests/kubernetes/checks/example_HostPort
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_HostPort.py
4 |
5 | # delete DS-node-exporter-FAILED
6 | resource "null_resource" "delete_daemonset" {
7 | provisioner "local-exec" {
8 | command = "kubectl delete -f ${path.cwd}/daemonset.yaml"
9 | }
10 | }
11 |
12 | resource "null_resource" "delete-namespace_monitoring" {
13 | depends_on = [
14 | null_resource.delete_daemonset
15 | ]
16 | provisioner "local-exec" {
17 | command = "kubectl delete namespace monitoring"
18 | }
19 | }
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/test_files/application/guestbook/mongo-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: mongo
5 | labels:
6 | app.kubernetes.io/name: mongo
7 | app.kubernetes.io/component: backend
8 | spec:
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/name: mongo
12 | app.kubernetes.io/component: backend
13 | replicas: 1
14 | template:
15 | metadata:
16 | labels:
17 | app.kubernetes.io/name: mongo
18 | app.kubernetes.io/component: backend
19 | spec:
20 | containers:
21 | - name: mongo
22 | image: mongo:4.2
23 | args:
24 | - --bind_ip
25 | - 0.0.0.0
26 | resources:
27 | requests:
28 | cpu: 100m
29 | memory: 100Mi
30 | ports:
31 | - containerPort: 27017
32 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/variables.tf:
--------------------------------------------------------------------------------
1 | # deploy rootContainersFAILED.yaml
2 | resource "null_resource" "root_containers_failed" {
3 | provisioner "local-exec" {
4 | command = "kubectl create -f ${path.cwd}/rootContainersFAILED.yaml"
5 | }
6 | }
7 |
8 | # deploy rootContainersPASSED.yaml
9 | resource "null_resource" "root_containers_passed" {
10 | provisioner "local-exec" {
11 | command = "kubectl create -f ${path.cwd}/rootContainersPASSED.yaml"
12 | }
13 | }
14 |
15 | resource "null_resource" "delay" {
16 | provisioner "local-exec" {
17 | command = "sleep 60"
18 | }
19 | }
20 |
21 |
22 | # Delay in order to get te resource creation complete
23 | resource "null_resource" "get_pods" {
24 | depends_on = [
25 | null_resource.delay
26 | ]
27 | provisioner "local-exec" {
28 | command = "kubectl get pods"
29 | }
30 | }
31 |
32 |
33 |
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | # This is an example goreleaser.yaml file with some sane defaults.
2 | # Make sure to check the documentation at http://goreleaser.com
3 | before:
4 | hooks:
5 | - go mod tidy
6 | builds:
7 | - env:
8 | - CGO_ENABLED=0
9 | - GO111MODULE=on
10 | - GOPRIVATE=github.com/turbot
11 | goos:
12 | - linux
13 | - darwin
14 |
15 | goarch:
16 | - amd64
17 | - arm64
18 |
19 | id: "steampipe"
20 | binary: "{{ .ProjectName }}.plugin"
21 | flags:
22 | - -tags=netgo
23 |
24 | archives:
25 | - format: gz
26 | name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}"
27 | files:
28 | - none*
29 | checksum:
30 | name_template: "{{ .ProjectName }}_{{ .Version }}_SHA256SUMS"
31 | algorithm: sha256
32 | changelog:
33 | sort: asc
34 | filters:
35 | exclude:
36 | - "^docs:"
37 | - "^test:"
38 |
--------------------------------------------------------------------------------
/test_files/application/guestbook/frontend-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | labels:
6 | app.kubernetes.io/name: guestbook
7 | app.kubernetes.io/component: frontend
8 | spec:
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/name: guestbook
12 | app.kubernetes.io/component: frontend
13 | replicas: 3
14 | template:
15 | metadata:
16 | labels:
17 | app.kubernetes.io/name: guestbook
18 | app.kubernetes.io/component: frontend
19 | spec:
20 | containers:
21 | - name: guestbook
22 | image: paulczar/gb-frontend:v5
23 | # image: gcr.io/google-samples/gb-frontend:v4
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 100Mi
28 | env:
29 | - name: GET_HOSTS_FROM
30 | value: dns
31 | ports:
32 | - containerPort: 80
33 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "class": null,
4 | "name": "ingress-wildcard-host",
5 | "namespace": "default",
6 | "rules": [
7 | {
8 | "host": "foo.bar.com",
9 | "http": {
10 | "paths": [
11 | {
12 | "backend": {
13 | "serviceName": "service1",
14 | "servicePort": 80
15 | },
16 | "path": "/bar",
17 | "pathType": "Prefix"
18 | }
19 | ]
20 | }
21 | },
22 | {
23 | "host": "*.foo.com",
24 | "http": {
25 | "paths": [
26 | {
27 | "backend": {
28 | "serviceName": "service2",
29 | "servicePort": 80
30 | },
31 | "path": "/foo",
32 | "pathType": "Prefix"
33 | }
34 | ]
35 | }
36 | }
37 | ]
38 | }
39 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_ingress/variables.tf:
--------------------------------------------------------------------------------
1 |
2 | resource "null_resource" "create_minimal_ingress" {
3 | provisioner "local-exec" {
4 | command = "kubectl apply -f ${path.cwd}/minimal-ingress.yaml"
5 | }
6 | }
7 |
8 | resource "null_resource" "create_ingress_wildcard_host" {
9 | provisioner "local-exec" {
10 | command = "kubectl apply -f ${path.cwd}/ingress-wildcard-host.yaml"
11 | }
12 | }
13 |
14 | resource "null_resource" "create_ingress_backend" {
15 | provisioner "local-exec" {
16 | command = "kubectl apply -f ${path.cwd}/ingress-resource-backend.yaml"
17 | }
18 | }
19 |
20 | resource "null_resource" "delay" {
21 | provisioner "local-exec" {
22 | command = "sleep 60"
23 | }
24 | }
25 |
26 |
27 | # Delay in order to get te resource creation complete
28 | resource "null_resource" "get_deployments" {
29 | depends_on = [
30 | null_resource.delay
31 | ]
32 | provisioner "local-exec" {
33 | command = "kubectl get ingresses"
34 | }
35 | }
36 |
37 |
38 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_stateful_set/statefulset.yaml:
--------------------------------------------------------------------------------
1 |
2 | ---
3 | apiVersion: v1
4 | kind: Service
5 | metadata:
6 | name: nginx
7 | labels:
8 | app: nginx
9 | spec:
10 | ports:
11 | - port: 80
12 | name: web
13 | clusterIP: None
14 | selector:
15 | app: nginx
16 |
17 | ---
18 | apiVersion: apps/v1
19 | kind: StatefulSet
20 | metadata:
21 | name: web
22 | spec:
23 | serviceName: "nginx"
24 | replicas: 2
25 | selector:
26 | matchLabels:
27 | app: nginx
28 | template:
29 | metadata:
30 | labels:
31 | app: nginx
32 | spec:
33 | containers:
34 | - name: nginx
35 | image: k8s.gcr.io/nginx-slim:0.8
36 | ports:
37 | - containerPort: 80
38 | name: web
39 | volumeMounts:
40 | - name: www
41 | mountPath: /usr/share/nginx/html
42 | volumeClaimTemplates:
43 | - metadata:
44 | name: www
45 | spec:
46 | accessModes: [ "ReadWriteOnce" ]
47 | resources:
48 | requests:
49 | storage: 1Gi
50 |
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/posttest-variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/HostPort.py
2 | # https://github.com/bridgecrewio/checkov/tree/master/tests/kubernetes/checks/example_HostPort
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_HostPort.py
4 |
5 | # delete DS-node-exporter-FAILED
6 | resource "null_resource" "delete-ds-node-exporter-failed" {
7 | provisioner "local-exec" {
8 | command = "kubectl delete -f ${path.cwd}/DS-node-exporter-FAILED.yaml"
9 | }
10 | }
11 |
12 | # delete nginx-app-PASSED
13 | resource "null_resource" "delete-nginx-app-passed" {
14 | provisioner "local-exec" {
15 | command = "kubectl delete -f ${path.cwd}/nginx-app-PASSED.yaml"
16 | }
17 | }
18 |
19 | resource "null_resource" "delete-namespace_monitoring" {
20 | depends_on = [
21 | null_resource.delete-ds-node-exporter-failed
22 | ]
23 | provisioner "local-exec" {
24 | command = "kubectl delete namespace monitoring"
25 | }
26 | }
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_resource_quota/resource_quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: List
3 | items:
4 | - apiVersion: v1
5 | kind: ResourceQuota
6 | metadata:
7 | name: pods-high
8 | spec:
9 | hard:
10 | cpu: "1000"
11 | memory: 200Gi
12 | pods: "10"
13 | scopeSelector:
14 | matchExpressions:
15 | - operator: In
16 | scopeName: PriorityClass
17 | values: ["high"]
18 | - apiVersion: v1
19 | kind: ResourceQuota
20 | metadata:
21 | name: pods-medium
22 | spec:
23 | hard:
24 | cpu: "10"
25 | memory: 20Gi
26 | pods: "10"
27 | scopeSelector:
28 | matchExpressions:
29 | - operator: In
30 | scopeName: PriorityClass
31 | values: ["medium"]
32 | - apiVersion: v1
33 | kind: ResourceQuota
34 | metadata:
35 | name: pods-low
36 | spec:
37 | hard:
38 | cpu: "5"
39 | memory: 10Gi
40 | pods: "10"
41 | scopeSelector:
42 | matchExpressions:
43 | - operator: In
44 | scopeName: PriorityClass
45 | values: ["low"]
46 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/role.yaml:
--------------------------------------------------------------------------------
1 | # In GKE need to get RBAC permissions first with
2 | # kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin [--user=|--group=]
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | name: jenkins
9 |
10 | ---
11 | kind: Role
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | metadata:
14 | name: jenkins
15 | rules:
16 | - apiGroups: [""]
17 | resources: ["pods"]
18 | verbs: ["create","delete","get","list","patch","update","watch"]
19 | - apiGroups: [""]
20 | resources: ["pods/exec"]
21 | verbs: ["create","delete","get","list","patch","update","watch"]
22 | - apiGroups: [""]
23 | resources: ["pods/log"]
24 | verbs: ["get","list","watch"]
25 | - apiGroups: [""]
26 | resources: ["events"]
27 | verbs: ["watch"]
28 | - apiGroups: [""]
29 | resources: ["secrets"]
30 | verbs: ["get"]
31 |
32 | ---
33 | apiVersion: rbac.authorization.k8s.io/v1
34 | kind: RoleBinding
35 | metadata:
36 | name: jenkins
37 | roleRef:
38 | apiGroup: rbac.authorization.k8s.io
39 | kind: Role
40 | name: jenkins
41 | subjects:
42 | - kind: ServiceAccount
43 | name: jenkins
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role_binding/role_binding.yaml:
--------------------------------------------------------------------------------
1 | # In GKE need to get RBAC permissions first with
2 | # kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin [--user=|--group=]
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | name: jenkins
9 |
10 | ---
11 | kind: Role
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | metadata:
14 | name: jenkins
15 | rules:
16 | - apiGroups: [""]
17 | resources: ["pods"]
18 | verbs: ["create","delete","get","list","patch","update","watch"]
19 | - apiGroups: [""]
20 | resources: ["pods/exec"]
21 | verbs: ["create","delete","get","list","patch","update","watch"]
22 | - apiGroups: [""]
23 | resources: ["pods/log"]
24 | verbs: ["get","list","watch"]
25 | - apiGroups: [""]
26 | resources: ["events"]
27 | verbs: ["watch"]
28 | - apiGroups: [""]
29 | resources: ["secrets"]
30 | verbs: ["get"]
31 |
32 | ---
33 | apiVersion: rbac.authorization.k8s.io/v1
34 | kind: RoleBinding
35 | metadata:
36 | name: jenkins
37 | roleRef:
38 | apiGroup: rbac.authorization.k8s.io
39 | kind: Role
40 | name: jenkins
41 | subjects:
42 | - kind: ServiceAccount
43 | name: jenkins
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_service_account/service-account.yaml:
--------------------------------------------------------------------------------
1 | # In GKE need to get RBAC permissions first with
2 | # kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin [--user=|--group=]
3 |
4 | ---
5 | apiVersion: v1
6 | kind: ServiceAccount
7 | metadata:
8 | name: jenkins
9 |
10 | ---
11 | kind: Role
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | metadata:
14 | name: jenkins
15 | rules:
16 | - apiGroups: [""]
17 | resources: ["pods"]
18 | verbs: ["create","delete","get","list","patch","update","watch"]
19 | - apiGroups: [""]
20 | resources: ["pods/exec"]
21 | verbs: ["create","delete","get","list","patch","update","watch"]
22 | - apiGroups: [""]
23 | resources: ["pods/log"]
24 | verbs: ["get","list","watch"]
25 | - apiGroups: [""]
26 | resources: ["events"]
27 | verbs: ["watch"]
28 | - apiGroups: [""]
29 | resources: ["secrets"]
30 | verbs: ["get"]
31 |
32 | ---
33 | apiVersion: rbac.authorization.k8s.io/v1
34 | kind: RoleBinding
35 | metadata:
36 | name: jenkins
37 | roleRef:
38 | apiGroup: rbac.authorization.k8s.io
39 | kind: Role
40 | name: jenkins
41 | subjects:
42 | - kind: ServiceAccount
43 | name: jenkins
--------------------------------------------------------------------------------
/kubernetes/connection_config.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
5 | )
6 |
7 | type kubernetesConfig struct {
8 | ConfigPaths []string `hcl:"config_paths,optional"`
9 | ConfigPath *string `hcl:"config_path"`
10 | ConfigContext *string `hcl:"config_context"`
11 | CustomResourceTables []string `hcl:"custom_resource_tables,optional"`
12 | ManifestFilePaths []string `hcl:"manifest_file_paths,optional" steampipe:"watch"`
13 | SourceType *string `hcl:"source_type"`
14 | SourceTypes []string `hcl:"source_types,optional"`
15 | HelmRenderedCharts map[string]chartConfig `hcl:"helm_rendered_charts,optional"`
16 | }
17 |
18 | type chartConfig struct {
19 | ChartPath string `hcl:"chart_path" cty:"chart_path"`
20 | ValuesFilePaths []string `hcl:"values_file_paths,optional" cty:"values_file_paths"`
21 | }
22 |
23 | // GetConfig :: retrieve and cast connection config from query data
24 | func GetConfig(connection *plugin.Connection) kubernetesConfig {
25 | if connection == nil || connection.Config == nil {
26 | return kubernetesConfig{}
27 | }
28 | config, _ := connection.Config.(kubernetesConfig)
29 | return config
30 | }
31 |
--------------------------------------------------------------------------------
/k8s-test/tests/root_containers/rootContainersFAILED.yaml:
--------------------------------------------------------------------------------
1 | # runAsNonRoot and runAsUser not set (pod or container)
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: pod6
6 | spec:
7 | containers:
8 | - name: main
9 | image: alpine
10 | command: ["/bin/sleep", "999999"]
11 | ---
12 | # runAsNonRoot set at pod, but overridden at container
13 | apiVersion: v1
14 | kind: Pod
15 | metadata:
16 | name: pod7
17 | spec:
18 | securityContext:
19 | runAsNonRoot: true
20 | containers:
21 | - name: main
22 | image: alpine
23 | command: ["/bin/sleep", "999999"]
24 | securityContext:
25 | runAsNonRoot: false
26 | ---
27 | # runAsNonRoot not set, runAsUser set to 0 at pod level
28 | apiVersion: v1
29 | kind: Pod
30 | metadata:
31 | name: pod8
32 | spec:
33 | securityContext:
34 | runAsUser: 0
35 | containers:
36 | - name: main
37 | image: alpine
38 | command: ["/bin/sleep", "999999"]
39 | ---
40 | # runAsNonRoot not set, runAsUser >1000 defined at pod, but overridden to 0 at container level
41 | apiVersion: v1
42 | kind: Pod
43 | metadata:
44 | name: pod9
45 | spec:
46 | securityContext:
47 | runAsUser: 1000
48 | containers:
49 | - name: main
50 | image: alpine
51 | command: ["/bin/sleep", "999999"]
52 | - name: main2
53 | image: alpine
54 | command: ["/bin/sleep", "999999"]
55 | securityContext:
56 | runAsUser: 0
57 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/HostPort.py
2 | # https://github.com/bridgecrewio/checkov/tree/master/tests/kubernetes/checks/example_HostPort
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_HostPort.py
4 |
5 | # https://kubernetes.io/docs/concepts/configuration/overview/
6 | # Don’t specify a hostPort for a Pod unless it is absolutely necessary.
7 | # When you bind a Pod to a hostPort, it limits the number of places the
8 | # Pod can be scheduled, because each combination
9 | # must be unique.
10 |
11 |
12 | resource "null_resource" "namespace_monitoring" {
13 | provisioner "local-exec" {
14 | command = "kubectl create namespace monitoring"
15 | }
16 | }
17 |
18 | # deploy DS-node-exporter-FAILED
19 | resource "null_resource" "create_daemonset" {
20 | depends_on = [
21 | null_resource.namespace_monitoring
22 | ]
23 | provisioner "local-exec" {
24 | command = "kubectl apply -f ${path.cwd}/daemonset.yaml"
25 | }
26 | }
27 |
28 | resource "null_resource" "delay" {
29 | provisioner "local-exec" {
30 | command = "sleep 60"
31 | }
32 | }
33 |
34 |
35 | # Delay in order to get te resource creation complete
36 | resource "null_resource" "get_daemonsets" {
37 | depends_on = [
38 | null_resource.delay
39 | ]
40 | provisioner "local-exec" {
41 | command = "kubectl get daemonsets -n monitoring"
42 | }
43 | }
44 |
45 |
46 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "rules": [
6 | {
7 | "apiGroups": [
8 | ""
9 | ],
10 | "resources": [
11 | "pods"
12 | ],
13 | "verbs": [
14 | "create",
15 | "delete",
16 | "get",
17 | "list",
18 | "patch",
19 | "update",
20 | "watch"
21 | ]
22 | },
23 | {
24 | "apiGroups": [
25 | ""
26 | ],
27 | "resources": [
28 | "pods/exec"
29 | ],
30 | "verbs": [
31 | "create",
32 | "delete",
33 | "get",
34 | "list",
35 | "patch",
36 | "update",
37 | "watch"
38 | ]
39 | },
40 | {
41 | "apiGroups": [
42 | ""
43 | ],
44 | "resources": [
45 | "pods/log"
46 | ],
47 | "verbs": [
48 | "get",
49 | "list",
50 | "watch"
51 | ]
52 | },
53 | {
54 | "apiGroups": [
55 | ""
56 | ],
57 | "resources": [
58 | "events"
59 | ],
60 | "verbs": [
61 | "watch"
62 | ]
63 | },
64 | {
65 | "apiGroups": [
66 | ""
67 | ],
68 | "resources": [
69 | "secrets"
70 | ],
71 | "verbs": [
72 | "get"
73 | ]
74 | }
75 | ]
76 | }
77 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_role/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "namespace": "default",
5 | "rules": [
6 | {
7 | "apiGroups": [
8 | ""
9 | ],
10 | "resources": [
11 | "pods"
12 | ],
13 | "verbs": [
14 | "create",
15 | "delete",
16 | "get",
17 | "list",
18 | "patch",
19 | "update",
20 | "watch"
21 | ]
22 | },
23 | {
24 | "apiGroups": [
25 | ""
26 | ],
27 | "resources": [
28 | "pods/exec"
29 | ],
30 | "verbs": [
31 | "create",
32 | "delete",
33 | "get",
34 | "list",
35 | "patch",
36 | "update",
37 | "watch"
38 | ]
39 | },
40 | {
41 | "apiGroups": [
42 | ""
43 | ],
44 | "resources": [
45 | "pods/log"
46 | ],
47 | "verbs": [
48 | "get",
49 | "list",
50 | "watch"
51 | ]
52 | },
53 | {
54 | "apiGroups": [
55 | ""
56 | ],
57 | "resources": [
58 | "events"
59 | ],
60 | "verbs": [
61 | "watch"
62 | ]
63 | },
64 | {
65 | "apiGroups": [
66 | ""
67 | ],
68 | "resources": [
69 | "secrets"
70 | ],
71 | "verbs": [
72 | "get"
73 | ]
74 | }
75 | ]
76 | }
77 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/HostPort.py
2 | # https://github.com/bridgecrewio/checkov/tree/master/tests/kubernetes/checks/example_HostPort
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_HostPort.py
4 |
5 | # https://kubernetes.io/docs/concepts/configuration/overview/
6 | # Don’t specify a hostPort for a Pod unless it is absolutely necessary.
7 | # When you bind a Pod to a hostPort, it limits the number of places the
8 | # Pod can be scheduled, because each combination
9 | # must be unique.
10 |
11 |
12 | resource "null_resource" "namespace_monitoring" {
13 | provisioner "local-exec" {
14 | command = "kubectl create namespace monitoring"
15 | }
16 | }
17 |
18 | # deploy nginx-app-PASSED
19 | resource "null_resource" "nginx-app-passed" {
20 | provisioner "local-exec" {
21 | command = "kubectl create -f ${path.cwd}/nginx-app-PASSED.yaml"
22 | }
23 | }
24 |
25 | # deploy DS-node-exporter-FAILED
26 | resource "null_resource" "ds-node-exporter-failed" {
27 | depends_on = [
28 | null_resource.namespace_monitoring
29 | ]
30 | provisioner "local-exec" {
31 | command = "kubectl create -f ${path.cwd}/DS-node-exporter-FAILED.yaml"
32 | }
33 | }
34 |
35 | resource "null_resource" "delay" {
36 | provisioner "local-exec" {
37 | command = "sleep 60"
38 | }
39 | }
40 |
41 |
42 | # Delay in order to get te resource creation complete
43 | resource "null_resource" "get_pods" {
44 | depends_on = [
45 | null_resource.delay
46 | ]
47 | provisioner "local-exec" {
48 | command = "kubectl get pods"
49 | }
50 | }
51 |
52 |
53 |
--------------------------------------------------------------------------------
/test_files/pod_security_policy/restricted-psp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: restricted
5 | annotations:
6 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
7 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
8 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default'
9 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
10 | spec:
11 | privileged: false
12 | # Required to prevent escalations to root.
13 | allowPrivilegeEscalation: false
14 | # This is redundant with non-root + disallow privilege escalation,
15 | # but we can provide it for defense in depth.
16 | requiredDropCapabilities:
17 | - ALL
18 | # Allow core volume types.
19 | volumes:
20 | - 'configMap'
21 | - 'emptyDir'
22 | - 'projected'
23 | - 'secret'
24 | - 'downwardAPI'
25 | # Assume that persistentVolumes set up by the cluster admin are safe to use.
26 | - 'persistentVolumeClaim'
27 | hostNetwork: false
28 | hostIPC: false
29 | hostPID: false
30 | runAsUser:
31 | # Require the container to run without root privileges.
32 | rule: 'MustRunAsNonRoot'
33 | seLinux:
34 | # This policy assumes the nodes are using AppArmor rather than SELinux.
35 | rule: 'RunAsAny'
36 | supplementalGroups:
37 | rule: 'MustRunAs'
38 | ranges:
39 | # Forbid adding the root group.
40 | - min: 1
41 | max: 65535
42 | fsGroup:
43 | rule: 'MustRunAs'
44 | ranges:
45 | # Forbid adding the root group.
46 | - min: 1
47 | max: 65535
48 | readOnlyRootFilesystem: false
49 |
--------------------------------------------------------------------------------
/kubernetes/table_helm_template.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "path"
6 |
7 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
8 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
9 | )
10 |
11 | //// TABLE DEFINITION
12 |
13 | func tableHelmTemplates(ctx context.Context) *plugin.Table {
14 | return &plugin.Table{
15 | Name: "helm_template",
16 | Description: "Lists the raw templates defined in the configured charts",
17 | List: &plugin.ListConfig{
18 | Hydrate: listHelmTemplates,
19 | },
20 | Columns: []*plugin.Column{
21 | {Name: "chart_name", Type: proto.ColumnType_STRING, Description: "The name of the chart."},
22 | {Name: "path", Type: proto.ColumnType_STRING, Description: "The path to the template file."},
23 | {Name: "raw", Type: proto.ColumnType_STRING, Description: "Raw is the template as byte data."},
24 | },
25 | }
26 | }
27 |
28 | type helmTemplateRaw struct {
29 | ChartName string
30 | Path string
31 | Raw string
32 | }
33 |
34 | //// LIST FUNCTION
35 |
36 | func listHelmTemplates(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
37 | charts, err := getUniqueHelmCharts(ctx, d)
38 | if err != nil {
39 | return nil, err
40 | }
41 |
42 | for _, chart := range charts {
43 | for _, template := range chart.Chart.Templates {
44 | d.StreamListItem(ctx, helmTemplateRaw{
45 | ChartName: chart.Chart.Metadata.Name,
46 | Raw: string(template.Data),
47 | Path: path.Join(chart.Path, template.Name),
48 | })
49 |
50 | // Context can be cancelled due to manual cancellation or the limit has been hit
51 | if d.RowsRemaining(ctx) == 0 {
52 | return nil, nil
53 | }
54 | }
55 | }
56 |
57 | return nil, nil
58 | }
59 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/cluster_role.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: jenkins
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["namespaces"]
8 | verbs: ["create","delete","get","list","patch","update","watch"]
9 | - apiGroups: [""]
10 | resources: ["pods"]
11 | verbs: ["create","delete","get","list","patch","update","watch"]
12 | - apiGroups: [""]
13 | resources: ["pods/exec"]
14 | verbs: ["create","delete","get","list","patch","update","watch"]
15 | - apiGroups: [""]
16 | resources: ["pods/log"]
17 | verbs: ["get","list","watch"]
18 | - apiGroups: [""]
19 | resources: ["events"]
20 | verbs: ["watch"]
21 | - apiGroups: [""]
22 | resources: ["nodes"]
23 | verbs: ["list"]
24 | - apiGroups: [""]
25 | resources: ["secrets"]
26 | verbs: ["create","delete","get","list","patch","update","watch"]
27 | - apiGroups: ["apps"]
28 | resources: ["deployments"] # KubernetesPipelineTest#cascadingDelete
29 | verbs: ["create","delete","get","list","patch","update","watch"]
30 | - apiGroups: ["extensions"]
31 | resources: ["deployments"] # ditto
32 | verbs: ["create","delete","get","list","patch","update","watch"]
33 | - apiGroups: ["apps"]
34 | resources: ["replicasets"] # ditto
35 | verbs: ["create","delete","get","list","patch","update","watch"]
36 | - apiGroups: [""]
37 | resources: ["persistentvolumeclaims"] # KubernetesPipelineTest#dynamicPVC
38 | verbs: ["create","delete","get","list","patch","update","watch"]
39 | - apiGroups: ['policy']
40 | resources: ['podsecuritypolicies']
41 | verbs: ['use']
42 | resourceNames:
43 | - privileged
44 | ---
45 | apiVersion: rbac.authorization.k8s.io/v1
46 | kind: ClusterRoleBinding
47 | metadata:
48 | name: jenkins
49 | roleRef:
50 | apiGroup: rbac.authorization.k8s.io
51 | kind: ClusterRole
52 | name: jenkins
53 | subjects:
54 | - kind: ServiceAccount
55 | name: jenkins
56 | namespace: kubernetes-plugin-test
57 | ---
58 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role_binding/cluster_role.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: jenkins
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["namespaces"]
8 | verbs: ["create","delete","get","list","patch","update","watch"]
9 | - apiGroups: [""]
10 | resources: ["pods"]
11 | verbs: ["create","delete","get","list","patch","update","watch"]
12 | - apiGroups: [""]
13 | resources: ["pods/exec"]
14 | verbs: ["create","delete","get","list","patch","update","watch"]
15 | - apiGroups: [""]
16 | resources: ["pods/log"]
17 | verbs: ["get","list","watch"]
18 | - apiGroups: [""]
19 | resources: ["events"]
20 | verbs: ["watch"]
21 | - apiGroups: [""]
22 | resources: ["nodes"]
23 | verbs: ["list"]
24 | - apiGroups: [""]
25 | resources: ["secrets"]
26 | verbs: ["create","delete","get","list","patch","update","watch"]
27 | - apiGroups: ["apps"]
28 | resources: ["deployments"] # KubernetesPipelineTest#cascadingDelete
29 | verbs: ["create","delete","get","list","patch","update","watch"]
30 | - apiGroups: ["extensions"]
31 | resources: ["deployments"] # ditto
32 | verbs: ["create","delete","get","list","patch","update","watch"]
33 | - apiGroups: ["apps"]
34 | resources: ["replicasets"] # ditto
35 | verbs: ["create","delete","get","list","patch","update","watch"]
36 | - apiGroups: [""]
37 | resources: ["persistentvolumeclaims"] # KubernetesPipelineTest#dynamicPVC
38 | verbs: ["create","delete","get","list","patch","update","watch"]
39 | - apiGroups: ['policy']
40 | resources: ['podsecuritypolicies']
41 | verbs: ['use']
42 | resourceNames:
43 | - privileged
44 | ---
45 | apiVersion: rbac.authorization.k8s.io/v1
46 | kind: ClusterRoleBinding
47 | metadata:
48 | name: jenkins
49 | roleRef:
50 | apiGroup: rbac.authorization.k8s.io
51 | kind: ClusterRole
52 | name: jenkins
53 | subjects:
54 | - kind: ServiceAccount
55 | name: jenkins
56 | namespace: kubernetes-plugin-test
57 | ---
58 |
--------------------------------------------------------------------------------
/kubernetes/table_helm_template_rendered.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
8 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
9 | )
10 |
11 | //// TABLE DEFINITION
12 |
13 | func tableHelmTemplateRendered(ctx context.Context) *plugin.Table {
14 | return &plugin.Table{
15 | Name: "helm_template_rendered",
16 | Description: "Lists the fully rendered templates using the values provided in the config",
17 | List: &plugin.ListConfig{
18 | Hydrate: listHelmRenderedTemplates,
19 | },
20 | Columns: []*plugin.Column{
21 | {Name: "path", Type: proto.ColumnType_STRING, Description: "The path to the template file."},
22 | {Name: "chart_name", Type: proto.ColumnType_STRING, Description: "The name of the chart."},
23 | {Name: "source_type", Type: proto.ColumnType_STRING, Description: "The source of the template."},
24 | {Name: "rendered", Type: proto.ColumnType_STRING, Description: "Rendered is the rendered template as byte data."},
25 | },
26 | }
27 | }
28 |
29 | type helmTemplate struct {
30 | ChartName string
31 | Path string
32 | Rendered string
33 | SourceType string
34 | }
35 |
36 | //// LIST FUNCTION
37 |
38 | func listHelmRenderedTemplates(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
39 | renderedTemplates, err := getHelmRenderedTemplates(ctx, d, nil)
40 | if err != nil {
41 | return nil, err
42 | }
43 |
44 | for _, template := range renderedTemplates {
45 | d.StreamListItem(ctx, helmTemplate{
46 | ChartName: template.Chart.Metadata.Name,
47 | Path: template.Path,
48 | Rendered: template.Data,
49 | SourceType: fmt.Sprintf("helm_rendered:%s", template.ConfigKey),
50 | })
51 |
52 | // Context can be cancelled due to manual cancellation or the limit has been hit
53 | if d.RowsRemaining(ctx) == 0 {
54 | return nil, nil
55 | }
56 | }
57 |
58 | return nil, nil
59 | }
60 |
--------------------------------------------------------------------------------
/k8s-test/tests/docker_daemon_socket/variables.tf:
--------------------------------------------------------------------------------
1 | # https://github.com/bridgecrewio/checkov/blob/master/checkov/kubernetes/checks/DockerSocketVolume.py
2 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/example_DockerSocketVolume/scope-2PASSED-1FAILED.yaml
3 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/example_DockerSocketVolume/cloudwatch-agent-1PASSED-1FAILED.yaml
4 | # https://github.com/bridgecrewio/checkov/blob/master/tests/kubernetes/checks/test_DockerSocketVolume.py
5 |
6 |
7 | # name = "Do not expose the docker daemon socket to containers"
8 | # Exposing the socket gives container information and increases risk of exploit
9 | # read-only is not a solution but only makes it harder to exploit.
10 | # Location: Pod.spec.volumes[].hostPath.path
11 | # Location: CronJob.spec.jobTemplate.spec.template.spec.volumes[].hostPath.path
12 | # Location: *.spec.template.spec.volumes[].hostPath.path
13 | # supported_kind = ['Pod', 'Deployment', 'DaemonSet', 'StatefulSet', 'ReplicaSet', 'ReplicationController', 'Job', 'CronJob']
14 |
15 |
16 | resource "null_resource" "namespace-amazon-cloudwatch" {
17 | provisioner "local-exec" {
18 | command = "kubectl create namespace amazon-cloudwatch"
19 | }
20 | }
21 |
22 | # deploy scope-2PASSED-1FAILED.yaml
23 | resource "null_resource" "scope_failed" {
24 | provisioner "local-exec" {
25 | command = "kubectl apply -f ${path.cwd}/scope-2PASSED-1FAILED.yaml"
26 | }
27 | }
28 |
29 | # deploy cloudwatch-agent-1PASSED-1FAILED.yaml
30 | resource "null_resource" "cloudwatch-agent" {
31 | depends_on = [
32 | null_resource.namespace-amazon-cloudwatch
33 | ]
34 | provisioner "local-exec" {
35 | command = "kubectl apply -f ${path.cwd}/cloudwatch-agent-1PASSED-1FAILED.yaml"
36 | }
37 | }
38 |
39 | resource "null_resource" "delay" {
40 | provisioner "local-exec" {
41 | command = "sleep 60"
42 | }
43 | }
44 |
45 |
46 | # Delay in order to get te resource creation complete
47 | resource "null_resource" "get_pods" {
48 | depends_on = [
49 | null_resource.delay
50 | ]
51 | provisioner "local-exec" {
52 | command = "kubectl get pods"
53 | }
54 | }
55 |
56 |
--------------------------------------------------------------------------------
/docs/tables/helm_template.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: helm_template - Query Kubernetes Helm Templates using SQL"
3 | description: "Allows users to query Helm Templates in Kubernetes, specifically providing details about the chart, metadata, and template files, offering insights into Kubernetes deployment configurations."
4 | folder: "Helm"
5 | ---
6 |
7 | # Table: helm_template - Query Kubernetes Helm Templates using SQL
8 |
9 | Helm Templates are part of Kubernetes, a platform for managing containerized applications across a cluster of nodes. Helm, a package manager for Kubernetes, uses templates to generate Kubernetes manifest files, which describe the resources needed for applications. These templates offer a way to manage complex applications and their dependencies in a standardized, repeatable, and efficient manner.
10 |
11 | ## Table Usage Guide
12 |
13 | The `helm_template` table provides insights into Helm Templates within Kubernetes. As a DevOps engineer, explore template-specific details through this table, including chart details, metadata, and template files. Utilize it to understand Kubernetes deployment configurations, manage complex applications, and their dependencies more efficiently.
14 |
15 | **Important Notes**
16 | - The table will show the raw template as defined in the file. To list the fully rendered templates, use table `helm_template_rendered`.
17 |
18 | ## Examples
19 |
20 | ### Basic info
21 | Explore the basic information of your Helm charts, including their names and paths. This can help you gain insights into your Helm configuration, understand its structure, and identify any potential issues.
22 |
23 | ```sql+postgres
24 | select
25 | chart_name,
26 | path,
27 | raw
28 | from
29 | helm_template;
30 | ```
31 |
32 | ```sql+sqlite
33 | select
34 | chart_name,
35 | path,
36 | raw
37 | from
38 | helm_template;
39 | ```
40 |
41 | ### List templates defined for a specific chart
42 | Explore which templates are defined for a specific chart in a Helm-based application deployment. This can be useful in understanding the configuration and setup of a specific application like 'redis'.
43 |
44 | ```sql+postgres
45 | select
46 | chart_name,
47 | path,
48 | raw
49 | from
50 | helm_template
51 | where
52 | chart_name = 'redis';
53 | ```
54 |
55 | ```sql+sqlite
56 | select
57 | chart_name,
58 | path,
59 | raw
60 | from
61 | helm_template
62 | where
63 | chart_name = 'redis';
64 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_namespace.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_namespace - Query Kubernetes Namespaces using SQL"
3 | description: "Allows users to query Kubernetes Namespaces, specifically the metadata and status of each namespace, providing insights into resource allocation and usage."
4 | folder: "Namespace"
5 | ---
6 |
7 | # Table: kubernetes_namespace - Query Kubernetes Namespaces using SQL
8 |
9 | Kubernetes Namespaces are an abstraction used by Kubernetes to support multiple virtual clusters on the same physical cluster. These namespaces provide a scope for names, and they are intended to be used in environments with many users spread across multiple teams, or projects. Namespaces are a way to divide cluster resources between multiple uses.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_namespace` table provides insights into Namespaces within Kubernetes. As a DevOps engineer, explore namespace-specific details through this table, including metadata, status, and associated resources. Utilize it to uncover information about namespaces, such as their status, the resources allocated to them, and their overall usage within the Kubernetes cluster.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the status and metadata of different segments within your Kubernetes environment. This allows you to gain insights into the current operational phase and additional details of each namespace, aiding in effective resource management and monitoring.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | phase as status,
24 | annotations,
25 | labels
26 | from
27 | kubernetes_namespace;
28 | ```
29 |
30 | ```sql+sqlite
31 | select
32 | name,
33 | phase as status,
34 | annotations,
35 | labels
36 | from
37 | kubernetes_namespace;
38 | ```
39 |
40 | ### List manifest resources
41 | Uncover the details of each manifest resource within your Kubernetes namespace, including its status and associated annotations and labels. This is particularly useful for tracking resource utilization and identifying any potential issues or anomalies that may impact system performance.
42 |
43 | ```sql+postgres
44 | select
45 | name,
46 | phase as status,
47 | annotations,
48 | labels,
49 | path
50 | from
51 | kubernetes_namespace
52 | where
53 | path is not null;
54 | ```
55 |
56 | ```sql+sqlite
57 | select
58 | name,
59 | phase as status,
60 | annotations,
61 | labels,
62 | path
63 | from
64 | kubernetes_namespace
65 | where
66 | path is not null;
67 | ```
--------------------------------------------------------------------------------
/k8s-test/tests/host_port/DS-node-exporter-FAILED.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: prometheus-node-exporter
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | name: prometheus-node-exporter
10 | template:
11 | metadata:
12 | labels:
13 | name: prometheus-node-exporter
14 | spec:
15 | containers:
16 | - args:
17 | - --path.procfs=/host/proc
18 | - --path.sysfs=/host/sys
19 | - --web.listen-address=0.0.0.0:9100
20 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
21 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
22 | image: quay.io/prometheus/node-exporter:v0.18.1
23 | imagePullPolicy: IfNotPresent
24 | livenessProbe:
25 | failureThreshold: 3
26 | httpGet:
27 | path: /
28 | port: 9100
29 | scheme: HTTP
30 | periodSeconds: 10
31 | successThreshold: 1
32 | timeoutSeconds: 1
33 | name: node-exporter
34 | ports:
35 | - containerPort: 9100
36 | hostPort: 9100
37 | name: metrics
38 | protocol: TCP
39 | readinessProbe:
40 | failureThreshold: 3
41 | httpGet:
42 | path: /
43 | port: 9100
44 | scheme: HTTP
45 | periodSeconds: 10
46 | successThreshold: 1
47 | timeoutSeconds: 1
48 | volumeMounts:
49 | - mountPath: /host/proc
50 | name: proc
51 | readOnly: true
52 | - mountPath: /host/sys
53 | name: sys
54 | readOnly: true
55 | dnsPolicy: ClusterFirst
56 | hostNetwork: true
57 | hostPID: true
58 | restartPolicy: Always
59 | schedulerName: default-scheduler
60 | securityContext:
61 | runAsNonRoot: true
62 | runAsUser: 65534
63 | serviceAccount: prometheus-node-exporter
64 | serviceAccountName: prometheus-node-exporter
65 | volumes:
66 | - hostPath:
67 | path: /proc
68 | type: ""
69 | name: proc
70 | - hostPath:
71 | path: /sys
72 | type: ""
73 | name: sys
74 | updateStrategy:
75 | rollingUpdate:
76 | maxUnavailable: 1
77 | type: RollingUpdate
78 |
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_daemonset/daemonset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: prometheus-node-exporter
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | name: prometheus-node-exporter
10 | template:
11 | metadata:
12 | labels:
13 | name: prometheus-node-exporter
14 | spec:
15 | containers:
16 | - args:
17 | - --path.procfs=/host/proc
18 | - --path.sysfs=/host/sys
19 | - --web.listen-address=0.0.0.0:9100
20 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
21 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
22 | image: quay.io/prometheus/node-exporter:v0.18.1
23 | imagePullPolicy: IfNotPresent
24 | livenessProbe:
25 | failureThreshold: 3
26 | httpGet:
27 | path: /
28 | port: 9100
29 | scheme: HTTP
30 | periodSeconds: 10
31 | successThreshold: 1
32 | timeoutSeconds: 1
33 | name: node-exporter
34 | ports:
35 | - containerPort: 9100
36 | hostPort: 9100
37 | name: metrics
38 | protocol: TCP
39 | readinessProbe:
40 | failureThreshold: 3
41 | httpGet:
42 | path: /
43 | port: 9100
44 | scheme: HTTP
45 | periodSeconds: 10
46 | successThreshold: 1
47 | timeoutSeconds: 1
48 | volumeMounts:
49 | - mountPath: /host/proc
50 | name: proc
51 | readOnly: true
52 | - mountPath: /host/sys
53 | name: sys
54 | readOnly: true
55 | dnsPolicy: ClusterFirst
56 | hostNetwork: true
57 | hostPID: true
58 | restartPolicy: Always
59 | schedulerName: default-scheduler
60 | securityContext:
61 | runAsNonRoot: true
62 | runAsUser: 65534
63 | serviceAccount: prometheus-node-exporter
64 | serviceAccountName: prometheus-node-exporter
65 | volumes:
66 | - hostPath:
67 | path: /proc
68 | type: ""
69 | name: proc
70 | - hostPath:
71 | path: /sys
72 | type: ""
73 | name: sys
74 | updateStrategy:
75 | rollingUpdate:
76 | maxUnavailable: 1
77 | type: RollingUpdate
78 |
--------------------------------------------------------------------------------
/docs/tables/kubernetes_service.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_service - Query Kubernetes Services using SQL"
3 | description: "Allows users to query Kubernetes Services, particularly the configuration and status of services within a Kubernetes cluster."
4 | folder: "Service"
5 | ---
6 |
7 | # Table: kubernetes_service - Query Kubernetes Services using SQL
8 |
9 | Kubernetes Service is a resource within Kubernetes that is used to expose an application running on a set of Pods. The set of Pods targeted by a Service is determined by a Label Selector. It provides the abstraction of a logical set of Pods and a policy by which to access them, often referred to as micro-services.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_service` table offers insights into the services within a Kubernetes cluster. As a DevOps engineer, you can probe service-specific details through this table, including service configurations, status, and associated metadata. Use it to discover information about services, such as those with specific selectors, the type of service, and the ports exposed by the service.
14 |
15 | ## Examples
16 |
17 | ### Basic Info - `kubectl describe service --all-namespaces` columns
18 | Analyze the settings of your Kubernetes services to understand their organization and longevity. This query is useful for gaining insights into how your services are distributed across namespaces, their types, and how long they have been active.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | type,
25 | cluster_ip,
26 | age(current_timestamp, creation_timestamp)
27 | from
28 | kubernetes_service
29 | order by
30 | namespace,
31 | name;
32 | ```
33 |
34 | ```sql+sqlite
35 | select
36 | name,
37 | namespace,
38 | type,
39 | cluster_ip,
40 | strftime('%s', 'now') - strftime('%s', creation_timestamp) as age
41 | from
42 | kubernetes_service
43 | order by
44 | namespace,
45 | name;
46 | ```
47 |
48 | ### List manifest resources
49 | Analyze the settings to understand the distribution of resources within a Kubernetes cluster. This can help to identify instances where resources are not properly allocated, improving the efficiency of the cluster.
50 |
51 | ```sql+postgres
52 | select
53 | name,
54 | namespace,
55 | type,
56 | cluster_ip,
57 | path
58 | from
59 | kubernetes_service
60 | where
61 | path is not null
62 | order by
63 | namespace,
64 | name;
65 | ```
66 |
67 | ```sql+sqlite
68 | select
69 | name,
70 | namespace,
71 | type,
72 | cluster_ip,
73 | path
74 | from
75 | kubernetes_service
76 | where
77 | path is not null
78 | order by
79 | namespace,
80 | name;
81 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_config_map.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_config_map - Query Kubernetes Config Maps using SQL"
3 | description: "Allows users to query Kubernetes Config Maps, providing insights into configuration data and application settings within a Kubernetes cluster."
4 | folder: "Config"
5 | ---
6 |
7 | # Table: kubernetes_config_map - Query Kubernetes Config Maps using SQL
8 |
9 | Kubernetes Config Maps is a resource that allows you to decouple configuration artifacts from image content to keep containerized applications portable. It is used to store non-confidential data in key-value pairs and consumed by pods or used to store configuration details, such as environment variables for a pod. Kubernetes Config Maps offers a centralized and secure method to manage and deploy configuration data.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_config_map` table provides insights into Config Maps within Kubernetes. As a DevOps engineer, explore Config Map-specific details through this table, including data, creation timestamps, and associated metadata. Utilize it to uncover information about Config Maps, such as those used in specific namespaces, the configuration details they hold, and the pods that may be consuming them.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the age and details of Kubernetes configuration maps to understand their longevity and content. This can help you manage and optimize your Kubernetes resources over time.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | data.key,
25 | data.value,
26 | age(current_timestamp, creation_timestamp)
27 | from
28 | kubernetes_config_map,
29 | jsonb_each(data) as data
30 | order by
31 | namespace,
32 | name;
33 | ```
34 |
35 | ```sql+sqlite
36 | select
37 | name,
38 | namespace,
39 | data.key,
40 | data.value,
41 | strftime('%s', 'now') - strftime('%s', creation_timestamp) as age
42 | from
43 | kubernetes_config_map,
44 | json_each(data) as data
45 | order by
46 | namespace,
47 | name;
48 | ```
49 |
50 | ### List manifest resources
51 | Analyze the settings to understand the distribution of resources across different namespaces within your Kubernetes environment. This can help in managing resources effectively and preventing any potential conflicts or overlaps.
52 |
53 | ```sql+postgres
54 | select
55 | name,
56 | namespace,
57 | data.key,
58 | data.value,
59 | path
60 | from
61 | kubernetes_config_map,
62 | jsonb_each(data) as data
63 | where
64 | path is not null
65 | order by
66 | namespace,
67 | name;
68 | ```
69 |
70 | ```sql+sqlite
71 | select
72 | name,
73 | namespace,
74 | data.key,
75 | data.value,
76 | path
77 | from
78 | kubernetes_config_map,
79 | json_each(data) as data
80 | where
81 | path is not null
82 | order by
83 | namespace,
84 | name;
85 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_ingress.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_ingress - Query Kubernetes Ingresses using SQL"
3 | description: "Allows users to query Kubernetes Ingresses, specifically to obtain details about the network traffic routing rules, providing insights into application or service access patterns."
4 | folder: "Ingress"
5 | ---
6 |
7 | # Table: kubernetes_ingress - Query Kubernetes Ingresses using SQL
8 |
9 | Kubernetes Ingress is a collection of routing rules that govern how external users access services running in a Kubernetes cluster. Typically, these rules are used to expose services to external traffic coming from the internet. It provides a way to manage external access to the services in a cluster, typically HTTP.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_ingress` table provides insights into Ingresses within Kubernetes. As a DevOps engineer, explore Ingress-specific details through this table, including host information, backend service details, and associated annotations. Utilize it to uncover information about Ingresses, such as those with specific routing rules, the services they expose, and their configurations.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore which Kubernetes ingress resources are associated with specific namespaces and classes, and how long they have been created. This can help in tracking resource allocation and usage over time.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | ingress_class_name as class,
25 | age(current_timestamp, creation_timestamp)
26 | from
27 | kubernetes_ingress
28 | order by
29 | namespace,
30 | name;
31 | ```
32 |
33 | ```sql+sqlite
34 | select
35 | name,
36 | namespace,
37 | ingress_class_name as class,
38 | (julianday('now') - julianday(creation_timestamp)) * 24 * 60 * 60 as age
39 | from
40 | kubernetes_ingress
41 | order by
42 | namespace,
43 | name;
44 | ```
45 |
46 | ### View rules for the ingress
47 | Explore which ingress rules are currently in place within your Kubernetes environment. This can help in understanding and managing traffic routing, ensuring efficient and secure communication between services.
48 |
49 | ```sql+postgres
50 | select
51 | name,
52 | namespace,
53 | jsonb_pretty(rules) as rules
54 | from
55 | kubernetes_ingress;
56 | ```
57 |
58 | ```sql+sqlite
59 | select
60 | name,
61 | namespace,
62 | rules
63 | from
64 | kubernetes_ingress;
65 | ```
66 |
67 | ### List manifest resources
68 | Explore which Kubernetes ingress resources are configured with a specific path. This can help identify areas where traffic routing rules have been established, which is essential for understanding and managing application traffic flow.
69 |
70 | ```sql+postgres
71 | select
72 | name,
73 | namespace,
74 | ingress_class_name as class,
75 | path
76 | from
77 | kubernetes_ingress
78 | where
79 | path is not null
80 | order by
81 | namespace,
82 | name;
83 | ```
84 |
85 | ```sql+sqlite
86 | select
87 | name,
88 | namespace,
89 | ingress_class_name as class,
90 | path
91 | from
92 | kubernetes_ingress
93 | where
94 | path is not null
95 | order by
96 | namespace,
97 | name;
98 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_persistent_volume_claim.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_persistent_volume_claim - Query Kubernetes Persistent Volume Claims using SQL"
3 | description: "Allows users to query Kubernetes Persistent Volume Claims, specifically providing information about the status, capacity, and access modes of each claim."
4 | folder: "Persistent Volume"
5 | ---
6 |
7 | # Table: kubernetes_persistent_volume_claim - Query Kubernetes Persistent Volume Claims using SQL
8 |
9 | A Kubernetes Persistent Volume Claim (PVC) is a request for storage by a user. It is similar to a pod in Kubernetes. PVCs can request specific size and access modes like read and write for a Persistent Volume (PV).
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_persistent_volume_claim` table provides insights into the Persistent Volume Claims within a Kubernetes cluster. As a DevOps engineer, you can use this table to explore details about each claim, including its current status, requested storage capacity, and access modes. This table is beneficial when you need to manage storage resources or troubleshoot storage-related issues in your Kubernetes environment.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the status and capacity of persistent storage volumes in a Kubernetes environment. This can help you manage resources effectively and ensure optimal allocation and usage.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | volume_name as volume,
25 | volume_mode,
26 | access_modes,
27 | phase as status,
28 | capacity ->> 'storage' as capacity,
29 | creation_timestamp,
30 | data_source,
31 | selector,
32 | resources,
33 | age(current_timestamp, creation_timestamp)
34 | from
35 | kubernetes_persistent_volume_claim;
36 | ```
37 |
38 | ```sql+sqlite
39 | select
40 | name,
41 | namespace,
42 | volume_name as volume,
43 | volume_mode,
44 | access_modes,
45 | phase as status,
46 | json_extract(capacity, '$.storage') as capacity,
47 | creation_timestamp,
48 | data_source,
49 | selector,
50 | resources,
51 | (julianday('now') - julianday(creation_timestamp)) * 24 * 60 * 60 as age
52 | from
53 | kubernetes_persistent_volume_claim;
54 | ```
55 |
56 | ### List manifest resources
57 | Explore the various resources within a manifest by identifying their names, namespaces, and statuses. This is useful for understanding the capacity and configuration of your persistent storage volumes, particularly when you need to assess the availability and allocation of resources.
58 |
59 | ```sql+postgres
60 | select
61 | name,
62 | namespace,
63 | volume_name as volume,
64 | volume_mode,
65 | access_modes,
66 | phase as status,
67 | capacity ->> 'storage' as capacity,
68 | data_source,
69 | selector,
70 | resources,
71 | path
72 | from
73 | kubernetes_persistent_volume_claim
74 | where
75 | path is not null;
76 | ```
77 |
78 | ```sql+sqlite
79 | select
80 | name,
81 | namespace,
82 | volume_name as volume,
83 | volume_mode,
84 | access_modes,
85 | phase as status,
86 | json_extract(capacity, '$.storage') as capacity,
87 | data_source,
88 | selector,
89 | resources,
90 | path
91 | from
92 | kubernetes_persistent_volume_claim
93 | where
94 | path is not null;
95 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_cronjob.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_cronjob - Query Kubernetes CronJobs using SQL"
3 | description: "Allows users to query Kubernetes CronJobs, providing insights into scheduled tasks within the Kubernetes environment."
4 | folder: "CronJob"
5 | ---
6 |
7 | # Table: kubernetes_cronjob - Query Kubernetes CronJobs using SQL
8 |
9 | A Kubernetes CronJob creates Jobs on a repeating schedule, similar to the job scheduling in Unix-like systems. It is a way to run automated tasks at regular, predetermined times. CronJobs use the Cron format to schedule tasks.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_cronjob` table provides insights into CronJobs within Kubernetes. As a DevOps engineer, explore CronJob-specific details through this table, including schedules, job histories, and associated metadata. Utilize it to monitor and manage your automated tasks, and ensure they are running as expected.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore which scheduled tasks within your Kubernetes environment have failed. This allows for proactive troubleshooting and understanding of task scheduling and execution patterns.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | uid,
25 | failed_jobs_history_limit,
26 | schedule,
27 | suspend
28 | from
29 | kubernetes_cronjob;
30 | ```
31 |
32 | ```sql+sqlite
33 | select
34 | name,
35 | namespace,
36 | uid,
37 | failed_jobs_history_limit,
38 | schedule,
39 | suspend
40 | from
41 | kubernetes_cronjob;
42 | ```
43 |
44 | ### Get list of container and images for cronJobs
45 | Explore which cronJobs are running in your Kubernetes environment and identify the containers and images they are using. This is useful to understand the dependencies and configurations of your scheduled tasks, and can help in troubleshooting or optimizing resource usage.
46 |
47 | ```sql+postgres
48 | select
49 | name,
50 | namespace,
51 | jsonb_agg(elems.value -> 'name') as containers,
52 | jsonb_agg(elems.value -> 'image') as images
53 | from
54 | kubernetes_cronjob,
55 | jsonb_array_elements(job_template -> 'spec' -> 'template' -> 'spec' -> 'containers') as elems
56 | group by
57 | name,
58 | namespace;
59 | ```
60 |
61 | ```sql+sqlite
62 | select
63 | name,
64 | namespace,
65 | json_group_array(json_extract(elems.value, '$.name')) as containers,
66 | json_group_array(json_extract(elems.value, '$.image')) as images
67 | from
68 | kubernetes_cronjob,
69 | json_each(job_template, '$.spec.template.spec.containers') as elems
70 | group by
71 | name,
72 | namespace;
73 | ```
74 |
75 | ### List manifest resources
76 | Explore which scheduled tasks within your Kubernetes environment have a specified path. This can be useful to identify tasks that may be associated with certain applications or services, helping you to manage and monitor your resources more effectively.
77 |
78 | ```sql+postgres
79 | select
80 | name,
81 | namespace,
82 | uid,
83 | failed_jobs_history_limit,
84 | schedule,
85 | suspend,
86 | path
87 | from
88 | kubernetes_cronjob
89 | where
90 | path is not null;
91 | ```
92 |
93 | ```sql+sqlite
94 | select
95 | name,
96 | namespace,
97 | uid,
98 | failed_jobs_history_limit,
99 | schedule,
100 | suspend,
101 | path
102 | from
103 | kubernetes_cronjob
104 | where
105 | path is not null;
106 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_resource_quota.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_resource_quota - Query Kubernetes Resource Quotas using SQL"
3 | description: "Allows users to query Resource Quotas in Kubernetes, providing insights into resource usage and restrictions within a namespace."
4 | folder: "Resource Quota"
5 | ---
6 |
7 | # Table: kubernetes_resource_quota - Query Kubernetes Resource Quotas using SQL
8 |
9 | A Resource Quota in Kubernetes is a tool that administrators use to manage resources within a namespace. It sets hard limits on the amount of compute resources that can be used by a namespace in a Kubernetes cluster. This includes CPU and memory resources, the number of pods, services, volumes, and more.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_resource_quota` table provides insights into resource quotas within Kubernetes. As a Kubernetes administrator, you can use this table to explore quota-specific details, including resource usage and restrictions within a namespace. Utilize it to uncover information about resource quotas, such as those nearing their limit, and effectively manage resources within your Kubernetes cluster.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the basic information of your Kubernetes resource quotas to understand their allocation. This can help in managing and optimizing resource usage within your Kubernetes environment.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | resource_version,
25 | creation_timestamp,
26 | jsonb_pretty(spec_hard) as spec_hard
27 | from
28 | kubernetes_resource_quota
29 | order by
30 | name;
31 | ```
32 |
33 | ```sql+sqlite
34 | select
35 | name,
36 | namespace,
37 | resource_version,
38 | creation_timestamp,
39 | spec_hard
40 | from
41 | kubernetes_resource_quota
42 | order by
43 | name;
44 | ```
45 |
46 | ### Get used pod details of namespaces
47 | Discover the segments that are consuming resources in your Kubernetes environment by identifying how many pods and services are currently being used within each namespace. This is beneficial for managing resource allocation and identifying potential areas of overuse or inefficiency.
48 |
49 | ```sql+postgres
50 | select
51 | name,
52 | namespace,
53 | status_used -> 'pods' as used_pods,
54 | status_used -> 'services' as used_services
55 | from
56 | kubernetes_resource_quota;
57 | ```
58 |
59 | ```sql+sqlite
60 | select
61 | name,
62 | namespace,
63 | json_extract(status_used, '$.pods') as used_pods,
64 | json_extract(status_used, '$.services') as used_services
65 | from
66 | kubernetes_resource_quota;
67 | ```
68 |
69 | ### List manifest resources
70 | Analyze the configuration of Kubernetes to identify resource quotas with specific paths. This is beneficial in managing resources efficiently by understanding their allocation and usage within your Kubernetes environment.
71 |
72 | ```sql+postgres
73 | select
74 | name,
75 | namespace,
76 | resource_version,
77 | jsonb_pretty(spec_hard) as spec_hard,
78 | path
79 | from
80 | kubernetes_resource_quota
81 | where
82 | path is not null
83 | order by
84 | name;
85 | ```
86 |
87 | ```sql+sqlite
88 | select
89 | name,
90 | namespace,
91 | resource_version,
92 | spec_hard,
93 | path
94 | from
95 | kubernetes_resource_quota
96 | where
97 | path is not null
98 | order by
99 | name;
100 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_endpoint.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_endpoint - Query Kubernetes Endpoints using SQL"
3 | description: "Allows users to query Kubernetes Endpoints, providing a detailed overview of each endpoint's configuration and status."
4 | folder: "Endpoint"
5 | ---
6 |
7 | # Table: kubernetes_endpoint - Query Kubernetes Endpoints using SQL
8 |
9 | Kubernetes Endpoints are a part of the Kubernetes Service concept that represents a real or virtual IP address and a port number that Kubernetes pods use to access services. Endpoints can be defined as a network endpoint that can receive traffic. They are a fundamental part of the Kubernetes networking model, allowing pods to communicate with each other and with services outside the Kubernetes cluster.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_endpoint` table provides insights into endpoints within Kubernetes. As a DevOps engineer, you can explore details about each endpoint through this table, including its associated services, IP addresses, and ports. Use this table to understand the communication paths within your Kubernetes cluster, track the status of endpoints, and identify any potential networking issues.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore which Kubernetes endpoints are currently active in your system. This can help you understand the communication points within your clusters and troubleshoot any networking issues.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | subsets
25 | from
26 | kubernetes_endpoint;
27 | ```
28 |
29 | ```sql+sqlite
30 | select
31 | name,
32 | namespace,
33 | subsets
34 | from
35 | kubernetes_endpoint;
36 | ```
37 |
38 | ### Endpoint IP Info
39 | Determine the areas in which endpoint IP information, such as address, readiness status and protocol, is used in your Kubernetes environment. This can aid in network troubleshooting and enhancing security measures.
40 |
41 | ```sql+postgres
42 | select
43 | name,
44 | namespace,
45 | addr ->> 'ip' as address,
46 | nr_addr ->> 'ip' as not_ready_address,
47 | port -> 'port' as port,
48 | port ->> 'protocol' as protocol
49 | from
50 | kubernetes_endpoint,
51 | jsonb_array_elements(subsets) as subset
52 | left join jsonb_array_elements(subset -> 'addresses') as addr on true
53 | left join jsonb_array_elements(subset -> 'notReadyAddresses') as nr_addr on true
54 | left join jsonb_array_elements(subset -> 'ports') as port on true;
55 | ```
56 |
57 | ```sql+sqlite
58 | select
59 | kubernetes_endpoint.name,
60 | kubernetes_endpoint.namespace,
61 | json_extract(addr.value, '$.ip') as address,
62 | json_extract(nr_addr.value, '$.ip') as not_ready_address,
63 | json_extract(port.value, '$.port') as port,
64 | json_extract(port.value, '$.protocol') as protocol
65 | from
66 | kubernetes_endpoint,
67 | json_each(kubernetes_endpoint.subsets) as subset,
68 | json_each(json_extract(subset.value, '$.addresses')) as addr,
69 | json_each(json_extract(subset.value, '$.notReadyAddresses')) as nr_addr,
70 | json_each(json_extract(subset.value, '$.ports')) as port;
71 | ```
72 |
73 | ### List manifest resources
74 | Explore which Kubernetes endpoints have a specified path. This is useful to understand the distribution of resources within your Kubernetes environment.
75 |
76 | ```sql+postgres
77 | select
78 | name,
79 | namespace,
80 | subsets,
81 | path
82 | from
83 | kubernetes_endpoint
84 | where
85 | path is not null;
86 | ```
87 |
88 | ```sql+sqlite
89 | select
90 | name,
91 | namespace,
92 | subsets,
93 | path
94 | from
95 | kubernetes_endpoint
96 | where
97 | path is not null;
98 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_role.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_role - Query Kubernetes Roles using SQL"
3 | description: "Allows users to query Roles in Kubernetes, specifically the permissions and privileges assigned to a Role, providing insights into access control and security configurations."
4 | folder: "Role"
5 | ---
6 |
7 | # Table: kubernetes_role - Query Kubernetes Roles using SQL
8 |
9 | A Role in Kubernetes is a set of permissions that can be assigned to resources within a namespace. Roles dictate what actions are permitted and which resources those actions can be performed on. It is an integral part of Kubernetes' Role-Based Access Control (RBAC) system used to manage permissions and access to the Kubernetes API.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_role` table provides insights into Roles within Kubernetes RBAC. As a DevOps engineer, explore role-specific details through this table, including permissions, associated resources, and the namespaces they belong to. Utilize it to uncover information about roles, such as their access privileges, the resources they can interact with, and the namespaces they are active in.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the roles within your Kubernetes environment, including their creation time and associated rules, to gain insights into your system's configuration and organization. This can help you understand how roles are distributed and manage them effectively.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | creation_timestamp,
25 | rules
26 | from
27 | kubernetes_role
28 | order by
29 | name;
30 | ```
31 |
32 | ```sql+sqlite
33 | select
34 | name,
35 | namespace,
36 | creation_timestamp,
37 | rules
38 | from
39 | kubernetes_role
40 | order by
41 | name;
42 | ```
43 |
44 | ### List rules and verbs for roles
45 | Explore which roles have specific permissions in your Kubernetes environment. This query helps in understanding the distribution of access rights, assisting in access control and security management.
46 |
47 | ```sql+postgres
48 | select
49 | name as role_name,
50 | rule ->> 'apiGroups' as api_groups,
51 | rule ->> 'resources' as resources,
52 | rule ->> 'nonResourceURLs' as non_resource_urls,
53 | rule ->> 'verbs' as verbs,
54 | rule ->> 'resourceNames' as resource_names
55 | from
56 | kubernetes_role,
57 | jsonb_array_elements(rules) as rule
58 | order by
59 | role_name,
60 | api_groups;
61 | ```
62 |
63 | ```sql+sqlite
64 | select
65 | name as role_name,
66 | json_extract(rule.value, '$.apiGroups') as api_groups,
67 | json_extract(rule.value, '$.resources') as resources,
68 | json_extract(rule.value, '$.nonResourceURLs') as non_resource_urls,
69 | json_extract(rule.value, '$.verbs') as verbs,
70 | json_extract(rule.value, '$.resourceNames') as resource_names
71 | from
72 | kubernetes_role,
73 | json_each(rules) as rule
74 | order by
75 | role_name,
76 | api_groups;
77 | ```
78 |
79 | ### List manifest resources
80 | Explore which Kubernetes roles have a defined path to better organize and manage your resources. This can help in identifying instances where roles may be improperly configured or misplaced.
81 |
82 | ```sql+postgres
83 | select
84 | name,
85 | namespace,
86 | rules,
87 | path
88 | from
89 | kubernetes_role
90 | where
91 | path is not null
92 | order by
93 | name;
94 | ```
95 |
96 | ```sql+sqlite
97 | select
98 | name,
99 | namespace,
100 | rules,
101 | path
102 | from
103 | kubernetes_role
104 | where
105 | path is not null
106 | order by
107 | name;
108 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_event.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_event - Query Kubernetes Events using SQL"
3 | description: "Allows users to query Kubernetes Events, specifically the details of events occurring within a Kubernetes system, providing insights into system behaviors and potential issues."
4 | folder: "Event"
5 | ---
6 |
7 | # Table: kubernetes_event - Query Kubernetes Events using SQL
8 |
9 | Kubernetes Events are objects that provide insight into what is happening inside a cluster, such as what decisions were made by scheduler or why some pods were evicted from the node. Events are a resource type in Kubernetes that are automatically created when certain situations occur, and they give developers a tool to understand the activity of the system.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_event` table provides insights into events within a Kubernetes system. As a DevOps engineer or system administrator, explore event-specific details through this table, including the involved object, source, message, and related metadata. Utilize it to monitor system behaviors, troubleshoot issues, and understand the state changes in the workloads running on the cluster.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the recent events in your Kubernetes environment to understand the status and health of your objects. This query can help you identify any issues or anomalies, providing valuable insights for troubleshooting and maintenance.
19 |
20 | ```sql+postgres
21 | select
22 | namespace,
23 | last_timestamp,
24 | type,
25 | reason,
26 | concat(involved_object ->> 'kind', '/', involved_object ->> 'name') as object,
27 | message
28 | from
29 | kubernetes_event;
30 | ```
31 |
32 | ```sql+sqlite
33 | select
34 | namespace,
35 | last_timestamp,
36 | type,
37 | reason,
38 | involved_object || '/' || involved_object as object,
39 | message
40 | from
41 | kubernetes_event;
42 | ```
43 |
44 | ### List warning events by last timestamp
45 | Identify instances where warning events have occurred in your Kubernetes environment. This query is useful for tracking and understanding the chronology of these events to manage and troubleshoot issues effectively.
46 |
47 | ```sql+postgres
48 | select
49 | namespace,
50 | last_timestamp,
51 | type,
52 | reason,
53 | concat(involved_object ->> 'kind', '/', involved_object ->> 'name') as object,
54 | message
55 | from
56 | kubernetes_event
57 | where
58 | type = 'Warning'
59 | order by
60 | namespace,
61 | last_timestamp;
62 | ```
63 |
64 | ```sql+sqlite
65 | select
66 | namespace,
67 | last_timestamp,
68 | type,
69 | reason,
70 | involved_object || '/' || involved_object as object,
71 | message
72 | from
73 | kubernetes_event
74 | where
75 | type = 'Warning'
76 | order by
77 | namespace,
78 | last_timestamp;
79 | ```
80 |
81 | ### List manifest resources
82 | Explore which Kubernetes events have a defined path to gain insights into the health and status of your Kubernetes resources. This can help identify any potential issues or anomalies within your system.
83 |
84 | ```sql+postgres
85 | select
86 | namespace,
87 | type,
88 | reason,
89 | concat(involved_object ->> 'kind', '/', involved_object ->> 'name') as object,
90 | message,
91 | path
92 | from
93 | kubernetes_event
94 | where
95 | path is not null;
96 | ```
97 |
98 | ```sql+sqlite
99 | select
100 | namespace,
101 | type,
102 | reason,
103 | involved_object || '/' || involved_object as object,
104 | message,
105 | path
106 | from
107 | kubernetes_event
108 | where
109 | path is not null;
110 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_endpoint_slice.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_endpoint_slice - Query Kubernetes Endpoint Slices using SQL"
3 | description: "Allows users to query Kubernetes Endpoint Slices, providing insights into the set of endpoints that a service may route traffic to."
4 | folder: "Endpoint"
5 | ---
6 |
7 | # Table: kubernetes_endpoint_slice - Query Kubernetes Endpoint Slices using SQL
8 |
9 | Kubernetes Endpoint Slices are a scalable and extensible way to network traffic routing. They provide a simple way to track network endpoints within a Kubernetes cluster. Endpoint Slices group network endpoints together, allowing for efficient and flexible traffic routing.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_endpoint_slice` table provides insights into the Endpoint Slices within a Kubernetes cluster. As a network engineer or DevOps professional, explore Endpoint Slice-specific details through this table, including associated services, ports, and addresses. Utilize it to manage and optimize network traffic routing within your Kubernetes environment.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the configuration of your Kubernetes environment by identifying its various endpoints, their corresponding addresses and ports. This can provide valuable insights into the network architecture and communication within your Kubernetes cluster.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | generate_name as endpoint_name,
25 | address_type,
26 | endpoints,
27 | ports
28 | from
29 | kubernetes_endpoint_slice;
30 | ```
31 |
32 | ```sql+sqlite
33 | select
34 | name,
35 | namespace,
36 | generate_name as endpoint_name,
37 | address_type,
38 | endpoints,
39 | ports
40 | from
41 | kubernetes_endpoint_slice;
42 | ```
43 |
44 | ### Endpoint Slice IP Information
45 | Analyze the settings to understand the IP information for endpoint slices in a Kubernetes environment. This can be beneficial in identifying potential networking issues or inconsistencies within your application's communication paths.
46 |
47 | ```sql+postgres
48 | select
49 | name,
50 | namespace,
51 | addr,
52 | port -> 'port' as port,
53 | port ->> 'protocol' as protocol
54 | from
55 | kubernetes_endpoint_slice,
56 | jsonb_array_elements(endpoints) as ep,
57 | jsonb_array_elements(ep -> 'addresses') as addr,
58 | jsonb_array_elements(ports) as port;
59 | ```
60 |
61 | ```sql+sqlite
62 | select
63 | name,
64 | namespace,
65 | addr.value as addr,
66 | json_extract(port.value, '$.port') as port,
67 | json_extract(port.value, '$.protocol') as protocol
68 | from
69 | kubernetes_endpoint_slice,
70 | json_each(endpoints) as ep,
71 | json_each(json_extract(ep.value, '$.addresses')) as addr,
72 | json_each(ports) as port;
73 | ```
74 |
75 | ### List manifest resources
76 | Explore the various manifest resources within a Kubernetes cluster, specifically identifying those with a defined path. This can help in understanding the distribution and configuration of resources, which is vital for efficient cluster management and troubleshooting.
77 |
78 | ```sql+postgres
79 | select
80 | name,
81 | namespace,
82 | generate_name as endpoint_name,
83 | address_type,
84 | endpoints,
85 | ports,
86 | path
87 | from
88 | kubernetes_endpoint_slice
89 | where
90 | path is not null;
91 | ```
92 |
93 | ```sql+sqlite
94 | select
95 | name,
96 | namespace,
97 | generate_name as endpoint_name,
98 | address_type,
99 | endpoints,
100 | ports,
101 | path
102 | from
103 | kubernetes_endpoint_slice
104 | where
105 | path is not null;
106 | ```
--------------------------------------------------------------------------------
/kubernetes/table_helm_chart.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
7 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
8 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin/transform"
9 |
10 | "helm.sh/helm/v3/pkg/chart"
11 | )
12 |
13 | //// TABLE DEFINITION
14 |
15 | func tableHelmChart(ctx context.Context) *plugin.Table {
16 | return &plugin.Table{
17 | Name: "helm_chart",
18 | Description: "Lists the configuration settings from the configured charts",
19 | List: &plugin.ListConfig{
20 | Hydrate: listHelmCharts,
21 | },
22 | Columns: []*plugin.Column{
23 | {Name: "name", Type: proto.ColumnType_STRING, Description: "The name of the chart."},
24 | {Name: "api_version", Type: proto.ColumnType_STRING, Description: "The API Version of the chart.", Transform: transform.FromField("APIVersion")},
25 | {Name: "version", Type: proto.ColumnType_STRING, Description: "A SemVer 2 conformant version string of the chart."},
26 | {Name: "app_version", Type: proto.ColumnType_STRING, Description: "The version of the application enclosed inside of this chart."},
27 | {Name: "description", Type: proto.ColumnType_STRING, Description: "A one-sentence description of the chart."},
28 | {Name: "deprecated", Type: proto.ColumnType_BOOL, Description: "Indicates whether or not this chart is deprecated."},
29 | {Name: "home", Type: proto.ColumnType_STRING, Description: "The URL to a relevant project page, git repo, or contact person."},
30 | {Name: "icon", Type: proto.ColumnType_STRING, Description: "The URL to an icon file."},
31 | {Name: "condition", Type: proto.ColumnType_STRING, Description: "The condition to check to enable chart."},
32 | {Name: "tags", Type: proto.ColumnType_STRING, Description: "The tags to check to enable chart."},
33 | {Name: "kube_version", Type: proto.ColumnType_STRING, Description: "A SemVer constraint specifying the version of Kubernetes required."},
34 | {Name: "type", Type: proto.ColumnType_STRING, Description: "Specifies the chart type. Possible values: application, or library."},
35 | {Name: "sources", Type: proto.ColumnType_JSON, Description: "Source is the URL to the source code of this chart."},
36 | {Name: "keywords", Type: proto.ColumnType_JSON, Description: "A list of string keywords."},
37 | {Name: "maintainers", Type: proto.ColumnType_JSON, Description: "A list of name and URL/email address combinations for the maintainer(s)."},
38 | {Name: "annotations", Type: proto.ColumnType_JSON, Description: "Annotations are additional mappings uninterpreted by Helm, made available for inspection by other applications."},
39 | {Name: "dependencies", Type: proto.ColumnType_JSON, Description: "Dependencies are a list of dependencies for a chart."},
40 | {Name: "chart_path", Type: proto.ColumnType_STRING, Description: "The path to the directory where the chart is located."},
41 | },
42 | }
43 | }
44 |
45 | type HelmChartInfo struct {
46 | chart.Metadata
47 | ChartPath string
48 | }
49 |
50 | //// LIST FUNCTION
51 |
52 | func listHelmCharts(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
53 | // Get the list of unique helm charts from the charts provided in the config
54 | charts, err := getUniqueHelmCharts(ctx, d)
55 | if err != nil {
56 | plugin.Logger(ctx).Error("listHelmCharts", "failed to list charts", err)
57 | return nil, err
58 | }
59 |
60 | for _, chart := range charts {
61 | d.StreamListItem(ctx, HelmChartInfo{*chart.Chart.Metadata, chart.Path})
62 |
63 | // Context can be cancelled due to manual cancellation or the limit has been hit
64 | if d.RowsRemaining(ctx) == 0 {
65 | return nil, nil
66 | }
67 | }
68 |
69 | return nil, nil
70 | }
71 |
--------------------------------------------------------------------------------
/docs/tables/kubernetes_stateful_set.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_stateful_set - Query Kubernetes Stateful Sets using SQL"
3 | description: "Allows users to query Kubernetes Stateful Sets, specifically providing details about the stateful applications running in a Kubernetes environment."
4 | folder: "Stateful Set"
5 | ---
6 |
7 | # Table: kubernetes_stateful_set - Query Kubernetes Stateful Sets using SQL
8 |
9 | A Kubernetes Stateful Set is a workload API object that manages stateful applications. It is used to manage applications which require one or more of the following: stable, unique network identifiers, stable, persistent storage, and ordered, graceful deployment and scaling. Stateful Sets are valuable for applications that require stable network identity or stable storage, like databases.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_stateful_set` table provides insights into the stateful applications running in a Kubernetes environment. As a DevOps engineer, explore details of these applications through this table, including network identifiers, persistent storage, and deployment details. Utilize it to manage and monitor stateful applications, such as databases, that require stable network identity or persistent storage.
14 |
15 | ## Examples
16 |
17 | ### Basic Info - `kubectl get statefulsets --all-namespaces` columns
18 | Explore the organization and status of your Kubernetes stateful sets by identifying their names, associated services, and the number of replicas. This query also allows you to assess the age of these sets, helping you manage system resources and plan for updates or decommissioning.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | service_name,
25 | replicas,
26 | age(current_timestamp, creation_timestamp)
27 | from
28 | kubernetes_stateful_set
29 | order by
30 | namespace,
31 | name;
32 | ```
33 |
34 | ```sql+sqlite
35 | select
36 | name,
37 | namespace,
38 | service_name,
39 | replicas,
40 | strftime('%s', 'now') - strftime('%s', creation_timestamp) as age
41 | from
42 | kubernetes_stateful_set
43 | order by
44 | namespace,
45 | name;
46 | ```
47 |
48 | ### List stateful sets that require manual update when the object's configuration is changed
49 | Explore which stateful sets in your Kubernetes environment require manual updates whenever there are changes in the object's configuration. This is useful for ensuring optimal management and timely updates of stateful sets, particularly those with an 'OnDelete' update strategy.
50 |
51 | ```sql+postgres
52 | select
53 | name,
54 | namespace,
55 | service_name,
56 | update_strategy ->> 'type' as update_strategy_type
57 | from
58 | kubernetes_stateful_set
59 | where
60 | update_strategy ->> 'type' = 'OnDelete';
61 | ```
62 |
63 | ```sql+sqlite
64 | select
65 | name,
66 | namespace,
67 | service_name,
68 | json_extract(update_strategy, '$.type') as update_strategy_type
69 | from
70 | kubernetes_stateful_set
71 | where
72 | json_extract(update_strategy, '$.type') = 'OnDelete';
73 | ```
74 |
75 | ### List manifest resources
76 | Explore which stateful applications in your Kubernetes cluster have specified storage configurations. This can help you understand how your persistent data is managed and identify any potential issues with data persistence.
77 |
78 | ```sql+postgres
79 | select
80 | name,
81 | namespace,
82 | service_name,
83 | replicas,
84 | path
85 | from
86 | kubernetes_stateful_set
87 | where
88 | path is not null
89 | order by
90 | namespace,
91 | name;
92 | ```
93 |
94 | ```sql+sqlite
95 | select
96 | name,
97 | namespace,
98 | service_name,
99 | replicas,
100 | path
101 | from
102 | kubernetes_stateful_set
103 | where
104 | path is not null
105 | order by
106 | namespace,
107 | name;
108 | ```
--------------------------------------------------------------------------------
/kubernetes/table_helm_value.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "os"
6 | "path"
7 |
8 | "github.com/turbot/steampipe-plugin-sdk/v5/grpc/proto"
9 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin"
10 | "github.com/turbot/steampipe-plugin-sdk/v5/plugin/transform"
11 | "gopkg.in/yaml.v3"
12 | )
13 |
14 | //// TABLE DEFINITION
15 |
16 | func tableHelmValue(ctx context.Context) *plugin.Table {
17 | return &plugin.Table{
18 | Name: "helm_value",
19 | Description: "Lists the values from chart's values.yaml file as well as the values listed in the configured values override files",
20 | List: &plugin.ListConfig{
21 | Hydrate: listHelmValues,
22 | },
23 | Columns: []*plugin.Column{
24 | {Name: "path", Type: proto.ColumnType_STRING, Description: "Name is the path-like name of the template."},
25 | {Name: "key_path", Type: proto.ColumnType_LTREE, Transform: transform.FromField("Key").Transform(keysToSnakeCase), Description: "Specifies full path of a key in YML file."},
26 | {Name: "value", Type: proto.ColumnType_STRING, Description: "Specifies the value of the corresponding key."},
27 | {Name: "keys", Type: proto.ColumnType_JSON, Transform: transform.FromField("Key"), Description: "The array representation of path of a key."},
28 | {Name: "start_line", Type: proto.ColumnType_INT, Description: "Specifies the line number where the value is located."},
29 | {Name: "start_column", Type: proto.ColumnType_INT, Description: "Specifies the starting column of the value."},
30 | {Name: "pre_comments", Type: proto.ColumnType_JSON, Description: "Specifies the comments added above a key."},
31 | {Name: "head_comment", Type: proto.ColumnType_STRING, Description: "Specifies the comment in the lines preceding the node and not separated by an empty line."},
32 | {Name: "line_comment", Type: proto.ColumnType_STRING, Description: "Specifies the comment at the end of the line where the node is in."},
33 | {Name: "foot_comment", Type: proto.ColumnType_STRING, Description: "Specifies the comment following the node and before empty lines."},
34 | },
35 | }
36 | }
37 |
38 | //// LIST FUNCTION
39 |
40 | func listHelmValues(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) {
41 | // Stream all values from chart's values.yaml file
42 | charts, err := getUniqueHelmCharts(ctx, d)
43 | if err != nil {
44 | return nil, err
45 | }
46 |
47 | var allValues Rows
48 | for _, chart := range charts {
49 | defaultValues, err := getRows(ctx, chart.Chart.Values)
50 | if err != nil {
51 | plugin.Logger(ctx).Error("helm_value.listHelmValues", "parse_error", err, "path", chart.Path)
52 | return nil, err
53 | }
54 |
55 | for _, r := range defaultValues {
56 | r.Path = path.Join(chart.Path, "values.yaml")
57 | allValues = append(allValues, r)
58 | }
59 | }
60 |
61 | // Stream values from the unique set of override value files provided in the config
62 | overrideValueFiles := getUniqueValueFilesFromConfig(ctx, d)
63 | for _, path := range overrideValueFiles {
64 | content, err := os.ReadFile(path)
65 | if err != nil {
66 | return nil, err
67 | }
68 |
69 | var values map[string]interface{}
70 | err = yaml.Unmarshal(content, &values)
71 | if err != nil {
72 | return nil, err
73 | }
74 |
75 | overrideValues, err := getRows(ctx, values)
76 | if err != nil {
77 | return nil, err
78 | }
79 |
80 | for _, r := range overrideValues {
81 | r.Path = path
82 | allValues = append(allValues, r)
83 | }
84 | }
85 |
86 | // Stream all the values from chart's default values.yaml as well as the value override files configured in the config
87 | for _, v := range allValues {
88 | d.StreamListItem(ctx, v)
89 |
90 | // Context can be cancelled due to manual cancellation or the limit has been hit
91 | if d.RowsRemaining(ctx) == 0 {
92 | return nil, nil
93 | }
94 | }
95 |
96 | return nil, nil
97 | }
98 |
--------------------------------------------------------------------------------
/docs/tables/kubernetes_secret.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_secret - Query Kubernetes Secrets using SQL"
3 | description: "Allows users to query Kubernetes Secrets, providing insights into the sensitive information like passwords, OAuth tokens, and ssh keys that are stored."
4 | folder: "Secret"
5 | ---
6 |
7 | # Table: kubernetes_secret - Query Kubernetes Secrets using SQL
8 |
9 | Kubernetes Secrets is a resource that manages sensitive data such as passwords, OAuth tokens, ssh keys, etc. It provides a more secure and flexible solution to manage sensitive data in a Kubernetes cluster, compared to the alternative of putting this information directly into pod specification or in docker images. Kubernetes Secrets offers the ability to decouple sensitive content from the pod specification and isolate the visibility of such sensitive information to just the system components which require access to it.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_secret` table provides insights into Kubernetes Secrets within a Kubernetes cluster. As a DevOps engineer, explore secret-specific details through this table, including the type of secret, the namespace it belongs to, and associated metadata. Utilize it to uncover information about secrets, such as those that are not in use, those that are exposed, or those that are stored in a non-compliant manner.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the age and details of various Kubernetes secrets to understand their creation and configuration for better resource management and security. This could be particularly useful in identifying outdated or potentially vulnerable secrets that may need updating or removal.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | data.key,
25 | data.value,
26 | age(current_timestamp, creation_timestamp)
27 | from
28 | kubernetes_secret,
29 | jsonb_each(data) as data
30 | order by
31 | namespace,
32 | name;
33 | ```
34 |
35 | ```sql+sqlite
36 | select
37 | name,
38 | namespace,
39 | data.key,
40 | data.value,
41 | (julianday('now') - julianday(creation_timestamp)) * 24 * 60 * 60
42 | from
43 | kubernetes_secret,
44 | json_each(data) as data
45 | order by
46 | namespace,
47 | name;
48 | ```
49 |
50 | ### List and base64 decode secret values
51 | Explore the decoded values of secrets in your Kubernetes environment to better understand the information they hold. This can be particularly useful for troubleshooting or auditing purposes.
52 |
53 | ```sql+postgres
54 | select
55 | name,
56 | namespace,
57 | data.key,
58 | decode(data.value, 'base64') as decoded_data,
59 | age(current_timestamp, creation_timestamp)
60 | from
61 | kubernetes_secret,
62 | jsonb_each_text(data) as data
63 | order by
64 | namespace,
65 | name;
66 | ```
67 |
68 | ```sql+sqlite
69 | select
70 | name,
71 | namespace,
72 | data.key,
73 | data.value as decoded_data,
74 | julianday('now') - julianday(creation_timestamp)
75 | from
76 | kubernetes_secret,
77 | json_each(data) as data
78 | order by
79 | namespace,
80 | name;
81 | ```
82 |
83 | ### List manifest resources
84 | Explore which encrypted data is associated with each resource in your Kubernetes environment. This can help you assess the elements within your system configuration and identify potential areas of concern.
85 |
86 | ```sql+postgres
87 | select
88 | name,
89 | namespace,
90 | data.key,
91 | data.value,
92 | path
93 | from
94 | kubernetes_secret,
95 | jsonb_each(data) as data
96 | where
97 | path is not null
98 | order by
99 | namespace,
100 | name;
101 | ```
102 |
103 | ```sql+sqlite
104 | select
105 | name,
106 | namespace,
107 | data.key,
108 | data.value,
109 | path
110 | from
111 | kubernetes_secret,
112 | json_each(data) as data
113 | where
114 | path is not null
115 | order by
116 | namespace,
117 | name;
118 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_persistent_volume.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_persistent_volume - Query Kubernetes Persistent Volumes using SQL"
3 | description: "Allows users to query Kubernetes Persistent Volumes, providing insights into the storage resources available in a Kubernetes cluster."
4 | folder: "Persistent Volume"
5 | ---
6 |
7 | # Table: kubernetes_persistent_volume - Query Kubernetes Persistent Volumes using SQL
8 |
9 | A Kubernetes Persistent Volume (PV) is a piece of storage in the cluster that has been provisioned by an administrator or dynamically provisioned using Storage Classes. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual Pod that uses the PV. These resources allow Pods to store data that can survive the lifecycle of a Pod.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_persistent_volume` table provides insights into persistent volumes within Kubernetes. As a DevOps engineer, explore volume-specific details through this table, including storage capacity, access modes, and associated metadata. Utilize it to uncover information about volumes, such as those with certain storage classes, the status of volumes, and the reclaim policy set for volumes.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the status and capacity of your persistent storage volumes within your Kubernetes environment. This allows you to manage your storage resources effectively and plan for future capacity needs.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | access_modes,
24 | storage_class,
25 | capacity ->> 'storage' as storage_capacity,
26 | creation_timestamp,
27 | persistent_volume_reclaim_policy,
28 | phase as status,
29 | volume_mode,
30 | age(current_timestamp, creation_timestamp)
31 | from
32 | kubernetes_persistent_volume;
33 | ```
34 |
35 | ```sql+sqlite
36 | select
37 | name,
38 | access_modes,
39 | storage_class,
40 | json_extract(capacity, '$.storage') as storage_capacity,
41 | creation_timestamp,
42 | persistent_volume_reclaim_policy,
43 | phase as status,
44 | volume_mode,
45 | (julianday('now') - julianday(creation_timestamp)) * 24 * 60 * 60 as age
46 | from
47 | kubernetes_persistent_volume;
48 | ```
49 |
50 | ### Get hostpath details for the volume
51 | Explore the details of your persistent volume's hostpath in your Kubernetes setup. This can help in understanding the type and path associated with your volume, which is crucial for managing and troubleshooting your storage configuration.
52 |
53 | ```sql+postgres
54 | select
55 | name,
56 | persistent_volume_source -> 'hostPath' ->> 'path' as path,
57 | persistent_volume_source -> 'hostPath' ->> 'type' as type
58 | from
59 | kubernetes_persistent_volume;
60 | ```
61 |
62 | ```sql+sqlite
63 | select
64 | name,
65 | json_extract(persistent_volume_source, '$.hostPath.path') as path,
66 | json_extract(persistent_volume_source, '$.hostPath.type') as type
67 | from
68 | kubernetes_persistent_volume;
69 | ```
70 |
71 | ### List manifest resources
72 | Explore the various resources within your Kubernetes persistent volumes, focusing on those that have a specified path. This allows you to assess storage capacities, access modes, and reclaim policies to better manage your Kubernetes environment.
73 |
74 | ```sql+postgres
75 | select
76 | name,
77 | access_modes,
78 | storage_class,
79 | capacity ->> 'storage' as storage_capacity,
80 | persistent_volume_reclaim_policy,
81 | phase as status,
82 | volume_mode,
83 | path
84 | from
85 | kubernetes_persistent_volume
86 | where
87 | path is not null;
88 | ```
89 |
90 | ```sql+sqlite
91 | select
92 | name,
93 | access_modes,
94 | storage_class,
95 | json_extract(capacity, '$.storage') as storage_capacity,
96 | persistent_volume_reclaim_policy,
97 | phase as status,
98 | volume_mode,
99 | path
100 | from
101 | kubernetes_persistent_volume
102 | where
103 | path is not null;
104 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_pod_disruption_budget.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_pod_disruption_budget - Query Kubernetes Pod Disruption Budgets using SQL"
3 | description: "Allows users to query Kubernetes Pod Disruption Budgets, specifically providing information about the minimum available pods and selector details, offering insights into the disruption allowance of the pods."
4 | folder: "Pod"
5 | ---
6 |
7 | # Table: kubernetes_pod_disruption_budget - Query Kubernetes Pod Disruption Budgets using SQL
8 |
9 | Kubernetes Pod Disruption Budgets (PDB) is a feature that allows a Kubernetes user to specify the number of replicas that an application can tolerate having, relative to how many it is intended to have. It defines the minimum number of pods that an orchestrated app can have, without a voluntary disruption. PDB also provides a way to limit the disruptions of your application while the Kubernetes cluster manager balances the needs of your applications.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_pod_disruption_budget` table provides insights into the Pod Disruption Budgets within Kubernetes. As a DevOps engineer, explore details through this table, including the minimum available pods, selector details, and associated metadata. Utilize it to uncover information about the disruption allowance of the pods, such as the minimum number of pods an application can have, and the details of the selectors.
14 |
15 | ## Examples
16 |
17 | ### Basic info
18 | Explore the minimum and maximum availability of resources within your Kubernetes environment. This query helps in managing resource allocation and ensuring smooth operation by identifying potential disruption areas.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | min_available,
25 | max_unavailable,
26 | selector
27 | from
28 | kubernetes_pod_disruption_budget
29 | order by
30 | namespace,
31 | name;
32 | ```
33 |
34 | ```sql+sqlite
35 | select
36 | name,
37 | namespace,
38 | min_available,
39 | max_unavailable,
40 | selector
41 | from
42 | kubernetes_pod_disruption_budget
43 | order by
44 | namespace,
45 | name;
46 | ```
47 |
48 | ### List deployments and their matching PDB
49 | Analyze the settings to understand the relationship between different deployments and their corresponding Pod Disruption Budgets (PDB) in a Kubernetes environment. This could be useful to ensure that the deployments are properly configured to handle disruptions, thereby enhancing system resilience.
50 |
51 | ```sql+postgres
52 | select
53 | d.namespace,
54 | d.name,
55 | min_available,
56 | replicas
57 | from
58 | kubernetes_pod_disruption_budget pdb
59 | inner join
60 | kubernetes_deployment d
61 | on d.selector = pdb.selector
62 | and d.namespace = pdb.namespace
63 | order by
64 | d.namespace,
65 | d.name;
66 | ```
67 |
68 | ```sql+sqlite
69 | select
70 | d.namespace,
71 | d.name,
72 | min_available,
73 | replicas
74 | from
75 | kubernetes_pod_disruption_budget as pdb
76 | join
77 | kubernetes_deployment as d
78 | on d.selector = pdb.selector
79 | and d.namespace = pdb.namespace
80 | order by
81 | d.namespace,
82 | d.name;
83 | ```
84 |
85 | ### List manifest resources
86 | Explore which Kubernetes pod disruption budgets are available, focusing on those with a specified path. This helps in managing the application availability during voluntary disruptions.
87 |
88 | ```sql+postgres
89 | select
90 | name,
91 | namespace,
92 | min_available,
93 | max_unavailable,
94 | selector,
95 | path
96 | from
97 | kubernetes_pod_disruption_budget
98 | where
99 | path is not null
100 | order by
101 | namespace,
102 | name;
103 | ```
104 |
105 | ```sql+sqlite
106 | select
107 | name,
108 | namespace,
109 | min_available,
110 | max_unavailable,
111 | selector,
112 | path
113 | from
114 | kubernetes_pod_disruption_budget
115 | where
116 | path is not null
117 | order by
118 | namespace,
119 | name;
120 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_replication_controller.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_replication_controller - Query Kubernetes Replication Controllers using SQL"
3 | description: "Allows users to query Kubernetes Replication Controllers, providing insights into the status, configuration, and specifications of these controllers within a Kubernetes environment."
4 | folder: "Replication Controller"
5 | ---
6 |
7 | # Table: kubernetes_replication_controller - Query Kubernetes Replication Controllers using SQL
8 |
9 | Kubernetes Replication Controllers are a core component of Kubernetes that ensure a specified number of pod replicas are running at any given time. They are particularly useful for stateless applications where more instances can be easily created or destroyed. Replication Controllers supersede the functionality of Kubernetes Pods by adding life-cycle control, system introspection, and self-healing mechanisms.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_replication_controller` table provides insights into Replication Controllers within Kubernetes. As a DevOps engineer, you can explore controller-specific details through this table, including its status, configuration, and specifications. Utilize it to manage and monitor the state of your Kubernetes environment, ensuring the desired number of pod replicas are always running.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the status of your Kubernetes replication controllers to understand the current state of your system. This can help you assess the number of desired, current, and ready replicas, and determine the age and selector details of each controller.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | replicas as desired,
25 | status_replicas as current,
26 | ready_replicas as ready,
27 | age(current_timestamp, creation_timestamp),
28 | selector
29 | from
30 | kubernetes_replication_controller;
31 | ```
32 |
33 | ```sql+sqlite
34 | select
35 | name,
36 | namespace,
37 | replicas as desired,
38 | status_replicas as current,
39 | ready_replicas as ready,
40 | (julianday('now') - julianday(creation_timestamp)) as age,
41 | selector
42 | from
43 | kubernetes_replication_controller;
44 | ```
45 |
46 | ### Get details of containers and image
47 | Explore the intricacies of your Kubernetes replication controllers by identifying the associated containers and images. This enables you to better understand the structure of your deployment, facilitating more effective management and troubleshooting.
48 |
49 | ```sql+postgres
50 | select
51 | name,
52 | namespace,
53 | jsonb_agg(container.value -> 'name') as containers,
54 | jsonb_agg(container.value -> 'image') as images
55 | from
56 | kubernetes_replication_controller,
57 | jsonb_array_elements(template -> 'spec' -> 'containers') as container
58 | group by
59 | name,
60 | namespace;
61 | ```
62 |
63 | ```sql+sqlite
64 | select
65 | name,
66 | namespace,
67 | json_group_array(json_extract(container.value, '$.name')) as containers,
68 | json_group_array(json_extract(container.value, '$.image')) as images
69 | from
70 | kubernetes_replication_controller,
71 | json_each(json_extract(template, '$.spec.containers')) as container
72 | group by
73 | name,
74 | namespace;
75 | ```
76 |
77 | ### List manifest resources
78 | Explore the Kubernetes replication controllers with a specified path to understand their names, namespaces, and desired replicas. This can help in managing and monitoring the distribution and replication of workloads in a Kubernetes environment.
79 |
80 | ```sql+postgres
81 | select
82 | name,
83 | namespace,
84 | replicas as desired,
85 | selector,
86 | path
87 | from
88 | kubernetes_replication_controller
89 | where
90 | path is not null;
91 | ```
92 |
93 | ```sql+sqlite
94 | select
95 | name,
96 | namespace,
97 | replicas as desired,
98 | selector,
99 | path
100 | from
101 | kubernetes_replication_controller
102 | where
103 | path is not null;
104 | ```
--------------------------------------------------------------------------------
/docs/tables/helm_template_rendered.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: helm_template_rendered - Query Kubernetes Helm Templates using SQL"
3 | description: "Allows users to query Helm Templates in Kubernetes, specifically the rendered templates, providing insights into the configuration and deployment of applications within Kubernetes clusters."
4 | folder: "Helm"
5 | ---
6 |
7 | # Table: helm_template_rendered - Query Kubernetes Helm Templates using SQL
8 |
9 | A Helm Template in Kubernetes is a powerful tool that generates Kubernetes manifest files. It is a part of Helm, the package manager for Kubernetes, and is used to streamline the installation and management of applications within Kubernetes clusters. Helm Templates allow users to define, install, and upgrade complex Kubernetes applications, effectively serving as a deployment blueprint.
10 |
11 | ## Table Usage Guide
12 |
13 | The `helm_template_rendered` table provides insights into Helm Templates within Kubernetes. As a DevOps engineer or a Kubernetes administrator, explore the details of rendered templates through this table, including the configuration and deployment of applications within Kubernetes clusters. Utilize it to verify the deployment specifications, understand the configuration of applications, and manage the lifecycle of Kubernetes applications.
14 |
15 | ## Examples
16 |
17 | ### List fully rendered kubernetes resource templates defined in a chart
18 | Explore the fully processed resource templates within a specific Kubernetes chart to understand its configuration. This is useful for assessing the elements within a given chart, such as 'redis', for effective management and troubleshooting.
19 |
20 | ```sql+postgres
21 | select
22 | path,
23 | source_type,
24 | rendered
25 | from
26 | helm_template_rendered
27 | where
28 | chart_name = 'redis';
29 | ```
30 |
31 | ```sql+sqlite
32 | select
33 | path,
34 | source_type,
35 | rendered
36 | from
37 | helm_template_rendered
38 | where
39 | chart_name = 'redis';
40 | ```
41 |
42 | ### List fully rendered kubernetes resource templates for different environments
43 | Explore the fully rendered resource templates for different environments in Kubernetes. This is useful to understand the configuration for specific applications in development and production environments.
44 | Let's say you have two different environments for maintaining your app: dev and prod. And, you have a helm chart with 2 different set of values for your environments. For example:
45 |
46 | ```hcl
47 | connection "kubernetes" {
48 | plugin = "kubernetes"
49 |
50 | helm_rendered_charts = {
51 | "my-app-dev" = {
52 | chart_path = "~/charts/my-app"
53 | values_file_paths = ["~/value/file/for/dev.yaml"]
54 | }
55 | "my-app-prod" = {
56 | chart_path = "~/charts/my-app"
57 | values_file_paths = ["~/value/file/for/prod.yaml"]
58 | }
59 | }
60 | }
61 | ```
62 |
63 | In both case, it is using same chart with a different set of values.
64 |
65 | To list the kubernetes resource configurations defined for the dev environment, you can simply run the below query:
66 |
67 |
68 | ```sql+postgres
69 | select
70 | chart_name,
71 | path,
72 | source_type,
73 | rendered
74 | from
75 | helm_template_rendered
76 | where
77 | source_type = 'helm_rendered:my-app-dev';
78 | ```
79 |
80 | ```sql+sqlite
81 | select
82 | chart_name,
83 | path,
84 | source_type,
85 | rendered
86 | from
87 | helm_template_rendered
88 | where
89 | source_type = 'helm_rendered:my-app-dev';
90 | ```
91 |
92 | Similarly, to query the kubernetes resource configurations for prod,
93 |
94 | ```sql+postgres
95 | select
96 | chart_name,
97 | path,
98 | source_type,
99 | rendered
100 | from
101 | helm_template_rendered
102 | where
103 | source_type = 'helm_rendered:my-app-prod';
104 | ```
105 |
106 | ```sql+sqlite
107 | select
108 | chart_name,
109 | path,
110 | source_type,
111 | rendered
112 | from
113 | helm_template_rendered
114 | where
115 | source_type = 'helm_rendered:my-app-prod';
116 | ```
--------------------------------------------------------------------------------
/docs/tables/kubernetes_limit_range.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_limit_range - Query Kubernetes Limit Ranges using SQL"
3 | description: "Allows users to query Kubernetes Limit Ranges, specifically the range of constraints for resources such as CPU and memory that can be consumed by containers in a namespace."
4 | folder: "Limit Range"
5 | ---
6 |
7 | # Table: kubernetes_limit_range - Query Kubernetes Limit Ranges using SQL
8 |
9 | Kubernetes Limit Range is a policy to constrain resource allocation (CPU, memory, etc.) in a namespace. It configures the minimum and maximum compute resources that are allowed for different types of Kubernetes objects (Pod, Container, PersistentVolumeClaim, etc.). It helps to control the resource consumption and ensure the efficient use of resources across all Pods and Containers in a namespace.
10 |
11 | A LimitRange provides constraints that can:
12 |
13 | - Enforce minimum and maximum compute resources usage per Pod or Container in a namespace.
14 | - Enforce minimum and maximum storage request per PersistentVolumeClaim in a namespace.
15 | - Enforce a ratio between request and limit for a resource in a namespace.
16 | - Set default request/limit for compute resources in a namespace and automatically inject them to Containers at runtime.
17 |
18 | ## Table Usage Guide
19 |
20 | The `kubernetes_limit_range` table provides insights into the limit ranges within Kubernetes. As a DevOps engineer, explore limit range-specific details through this table, including the types of resources being constrained, their minimum and maximum values, and the namespace in which they are applied. Utilize it to manage and optimize resource allocation across all Pods and Containers in a namespace.
21 |
22 | ## Examples
23 |
24 | ### Basic Info
25 | Explore which resources have specific limits within your Kubernetes environment. This can help you manage resources effectively by understanding their configurations and creation times.
26 |
27 | ```sql+postgres
28 | select
29 | name,
30 | namespace,
31 | resource_version,
32 | creation_timestamp,
33 | jsonb_pretty(spec_limits) as spec_limits
34 | from
35 | kubernetes_limit_range
36 | order by
37 | namespace;
38 | ```
39 |
40 | ```sql+sqlite
41 | select
42 | name,
43 | namespace,
44 | resource_version,
45 | creation_timestamp,
46 | spec_limits
47 | from
48 | kubernetes_limit_range
49 | order by
50 | namespace;
51 | ```
52 |
53 | ### Get spec limits details of limit range
54 | Assess the elements within your Kubernetes limit range to understand the specifics of each limit type, including their default values and requests. This allows you to manage resource consumption effectively by identifying the parameters that define the minimum and maximum resource usage.
55 |
56 | ```sql+postgres
57 | select
58 | name,
59 | namespace,
60 | limits ->> 'type' as type,
61 | limits ->> 'default' as default,
62 | limits ->> 'defaultRequest' as default_request
63 | from
64 | kubernetes_limit_range,
65 | jsonb_array_elements(spec_limits) as limits;
66 | ```
67 |
68 | ```sql+sqlite
69 | select
70 | name,
71 | namespace,
72 | json_extract(limits.value, '$.type') as type,
73 | json_extract(limits.value, '$.default') as default,
74 | json_extract(limits.value, '$.defaultRequest') as default_request
75 | from
76 | kubernetes_limit_range,
77 | json_each(spec_limits) as limits;
78 | ```
79 |
80 | ### List manifest resources
81 | Explore the specific limits set for resources in different namespaces of a Kubernetes cluster. This can help in managing resource allocation and ensuring optimal performance.
82 |
83 | ```sql+postgres
84 | select
85 | name,
86 | namespace,
87 | resource_version,
88 | jsonb_pretty(spec_limits) as spec_limits,
89 | path
90 | from
91 | kubernetes_limit_range
92 | where
93 | path is not null
94 | order by
95 | namespace;
96 | ```
97 |
98 | ```sql+sqlite
99 | select
100 | name,
101 | namespace,
102 | resource_version,
103 | spec_limits,
104 | path
105 | from
106 | kubernetes_limit_range
107 | where
108 | path is not null
109 | order by
110 | namespace;
111 | ```
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-get-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "rules": [
5 | {
6 | "apiGroups": [
7 | ""
8 | ],
9 | "resources": [
10 | "namespaces"
11 | ],
12 | "verbs": [
13 | "create",
14 | "delete",
15 | "get",
16 | "list",
17 | "patch",
18 | "update",
19 | "watch"
20 | ]
21 | },
22 | {
23 | "apiGroups": [
24 | ""
25 | ],
26 | "resources": [
27 | "pods"
28 | ],
29 | "verbs": [
30 | "create",
31 | "delete",
32 | "get",
33 | "list",
34 | "patch",
35 | "update",
36 | "watch"
37 | ]
38 | },
39 | {
40 | "apiGroups": [
41 | ""
42 | ],
43 | "resources": [
44 | "pods/exec"
45 | ],
46 | "verbs": [
47 | "create",
48 | "delete",
49 | "get",
50 | "list",
51 | "patch",
52 | "update",
53 | "watch"
54 | ]
55 | },
56 | {
57 | "apiGroups": [
58 | ""
59 | ],
60 | "resources": [
61 | "pods/log"
62 | ],
63 | "verbs": [
64 | "get",
65 | "list",
66 | "watch"
67 | ]
68 | },
69 | {
70 | "apiGroups": [
71 | ""
72 | ],
73 | "resources": [
74 | "events"
75 | ],
76 | "verbs": [
77 | "watch"
78 | ]
79 | },
80 | {
81 | "apiGroups": [
82 | ""
83 | ],
84 | "resources": [
85 | "nodes"
86 | ],
87 | "verbs": [
88 | "list"
89 | ]
90 | },
91 | {
92 | "apiGroups": [
93 | ""
94 | ],
95 | "resources": [
96 | "secrets"
97 | ],
98 | "verbs": [
99 | "create",
100 | "delete",
101 | "get",
102 | "list",
103 | "patch",
104 | "update",
105 | "watch"
106 | ]
107 | },
108 | {
109 | "apiGroups": [
110 | "apps"
111 | ],
112 | "resources": [
113 | "deployments"
114 | ],
115 | "verbs": [
116 | "create",
117 | "delete",
118 | "get",
119 | "list",
120 | "patch",
121 | "update",
122 | "watch"
123 | ]
124 | },
125 | {
126 | "apiGroups": [
127 | "extensions"
128 | ],
129 | "resources": [
130 | "deployments"
131 | ],
132 | "verbs": [
133 | "create",
134 | "delete",
135 | "get",
136 | "list",
137 | "patch",
138 | "update",
139 | "watch"
140 | ]
141 | },
142 | {
143 | "apiGroups": [
144 | "apps"
145 | ],
146 | "resources": [
147 | "replicasets"
148 | ],
149 | "verbs": [
150 | "create",
151 | "delete",
152 | "get",
153 | "list",
154 | "patch",
155 | "update",
156 | "watch"
157 | ]
158 | },
159 | {
160 | "apiGroups": [
161 | ""
162 | ],
163 | "resources": [
164 | "persistentvolumeclaims"
165 | ],
166 | "verbs": [
167 | "create",
168 | "delete",
169 | "get",
170 | "list",
171 | "patch",
172 | "update",
173 | "watch"
174 | ]
175 | },
176 | {
177 | "apiGroups": [
178 | "policy"
179 | ],
180 | "resourceNames": [
181 | "privileged"
182 | ],
183 | "resources": [
184 | "podsecuritypolicies"
185 | ],
186 | "verbs": [
187 | "use"
188 | ]
189 | }
190 | ]
191 | }
192 | ]
--------------------------------------------------------------------------------
/k8s-test/tests/kubernetes_cluster_role/test-list-expected.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "jenkins",
4 | "rules": [
5 | {
6 | "apiGroups": [
7 | ""
8 | ],
9 | "resources": [
10 | "namespaces"
11 | ],
12 | "verbs": [
13 | "create",
14 | "delete",
15 | "get",
16 | "list",
17 | "patch",
18 | "update",
19 | "watch"
20 | ]
21 | },
22 | {
23 | "apiGroups": [
24 | ""
25 | ],
26 | "resources": [
27 | "pods"
28 | ],
29 | "verbs": [
30 | "create",
31 | "delete",
32 | "get",
33 | "list",
34 | "patch",
35 | "update",
36 | "watch"
37 | ]
38 | },
39 | {
40 | "apiGroups": [
41 | ""
42 | ],
43 | "resources": [
44 | "pods/exec"
45 | ],
46 | "verbs": [
47 | "create",
48 | "delete",
49 | "get",
50 | "list",
51 | "patch",
52 | "update",
53 | "watch"
54 | ]
55 | },
56 | {
57 | "apiGroups": [
58 | ""
59 | ],
60 | "resources": [
61 | "pods/log"
62 | ],
63 | "verbs": [
64 | "get",
65 | "list",
66 | "watch"
67 | ]
68 | },
69 | {
70 | "apiGroups": [
71 | ""
72 | ],
73 | "resources": [
74 | "events"
75 | ],
76 | "verbs": [
77 | "watch"
78 | ]
79 | },
80 | {
81 | "apiGroups": [
82 | ""
83 | ],
84 | "resources": [
85 | "nodes"
86 | ],
87 | "verbs": [
88 | "list"
89 | ]
90 | },
91 | {
92 | "apiGroups": [
93 | ""
94 | ],
95 | "resources": [
96 | "secrets"
97 | ],
98 | "verbs": [
99 | "create",
100 | "delete",
101 | "get",
102 | "list",
103 | "patch",
104 | "update",
105 | "watch"
106 | ]
107 | },
108 | {
109 | "apiGroups": [
110 | "apps"
111 | ],
112 | "resources": [
113 | "deployments"
114 | ],
115 | "verbs": [
116 | "create",
117 | "delete",
118 | "get",
119 | "list",
120 | "patch",
121 | "update",
122 | "watch"
123 | ]
124 | },
125 | {
126 | "apiGroups": [
127 | "extensions"
128 | ],
129 | "resources": [
130 | "deployments"
131 | ],
132 | "verbs": [
133 | "create",
134 | "delete",
135 | "get",
136 | "list",
137 | "patch",
138 | "update",
139 | "watch"
140 | ]
141 | },
142 | {
143 | "apiGroups": [
144 | "apps"
145 | ],
146 | "resources": [
147 | "replicasets"
148 | ],
149 | "verbs": [
150 | "create",
151 | "delete",
152 | "get",
153 | "list",
154 | "patch",
155 | "update",
156 | "watch"
157 | ]
158 | },
159 | {
160 | "apiGroups": [
161 | ""
162 | ],
163 | "resources": [
164 | "persistentvolumeclaims"
165 | ],
166 | "verbs": [
167 | "create",
168 | "delete",
169 | "get",
170 | "list",
171 | "patch",
172 | "update",
173 | "watch"
174 | ]
175 | },
176 | {
177 | "apiGroups": [
178 | "policy"
179 | ],
180 | "resourceNames": [
181 | "privileged"
182 | ],
183 | "resources": [
184 | "podsecuritypolicies"
185 | ],
186 | "verbs": [
187 | "use"
188 | ]
189 | }
190 | ]
191 | }
192 | ]
--------------------------------------------------------------------------------
/docs/tables/helm_value.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: helm_value - Query Kubernetes Helm Values using SQL"
3 | description: "Allows users to query Helm Values in Kubernetes, specifically the configuration values for Helm Charts, providing insights into the configurations of different Kubernetes applications."
4 | folder: "Helm"
5 | ---
6 |
7 | # Table: helm_value - Query Kubernetes Helm Values using SQL
8 |
9 | Kubernetes Helm is a package manager for Kubernetes that allows developers and operators to more easily package, configure, and deploy applications and services onto Kubernetes clusters. Helm uses a packaging format called charts, and a chart is a collection of files that describe a related set of Kubernetes resources. Helm Values are the specific configurations for a Helm Chart.
10 |
11 | ## Table Usage Guide
12 |
13 | The `helm_value` table provides insights into Helm Values within Kubernetes. As a DevOps engineer, explore Helm Value-specific details through this table, including the configurations of different Kubernetes applications and services. Utilize it to uncover information about Helm Values, such as those relating to specific Helm Charts, the configurations of different services, and the verification of configurations.
14 |
15 | By design, applications can ship with default values.yaml file tuned for production deployments. Also, considering the multiple environments, it may have different configurations. To override the default value, it is not necessary to change the default values.yaml, but you can refer to the override value files from which it takes the configuration. For example:
16 |
17 | Let's say you have two different environments for maintaining your app: dev and prod. And, you have a helm chart with 2 different set of values for your environments. For example:
18 |
19 | ```hcl
20 | connection "kubernetes" {
21 | plugin = "kubernetes"
22 |
23 | helm_rendered_charts = {
24 | "my-app-dev" = {
25 | chart_path = "~/charts/my-app"
26 | values_file_paths = "~/value/file/for/dev.yaml"
27 | }
28 | "my-app-prod" = {
29 | chart_path = "~/charts/my-app"
30 | values_file_paths = "~/value/file/for/prod.yaml"
31 | }
32 | }
33 | }
34 | ```
35 |
36 | The table `helm_value` lists the values from the chart's default values.yaml file, as well as it lists the values from the files that are provided to override the default configuration.
37 |
38 | **Important Notes**
39 |
40 | - You must specify the `path` column in the `where` clause to query this table.
41 |
42 | ## Examples
43 |
44 | ### List values configured in the default values.yaml file of a specific chart
45 | Analyze the settings to understand the default configurations set in a specific chart's values.yaml file in Helm, which is beneficial for auditing or modifying these configurations. This allows you to pinpoint the specific locations where changes have been made, enhancing your control over the chart's behavior.
46 |
47 | ```sql+postgres
48 | select
49 | path,
50 | key_path,
51 | value,
52 | start_line,
53 | start_column
54 | from
55 | helm_value
56 | where
57 | path = '~/charts/my-app/values.yaml'
58 | order by
59 | start_line;
60 | ```
61 |
62 | ```sql+sqlite
63 | select
64 | path,
65 | key_path,
66 | value,
67 | start_line,
68 | start_column
69 | from
70 | helm_value
71 | where
72 | path = '~/charts/my-app/values.yaml'
73 | order by
74 | start_line;
75 | ```
76 |
77 | ### List values from a specific override file
78 | Explore which values are being used from a specific file in your Helm configuration. This can be particularly useful to understand and manage your development environment settings.
79 |
80 | ```sql+postgres
81 | select
82 | path,
83 | key_path,
84 | value,
85 | start_line,
86 | start_column
87 | from
88 | helm_value
89 | where
90 | path = '~/value/file/for/dev.yaml'
91 | order by
92 | start_line;
93 | ```
94 |
95 | ```sql+sqlite
96 | select
97 | path,
98 | key_path,
99 | value,
100 | start_line,
101 | start_column
102 | from
103 | helm_value
104 | where
105 | path = '~/value/file/for/dev.yaml'
106 | order by
107 | start_line;
108 | ```
--------------------------------------------------------------------------------
/config/kubernetes.spc:
--------------------------------------------------------------------------------
1 | connection "kubernetes" {
2 | plugin = "kubernetes"
3 |
4 | # By default, the plugin will use credentials in "~/.kube/config" with the current context.
5 | # OpenID Connect (OIDC) authentication is supported without any extra configuration.
6 | # The kubeconfig path and context can also be specified with the following config arguments:
7 |
8 | # Specify the file path to the kubeconfig.
9 | # Can also be set with the "KUBECONFIG" or "KUBE_CONFIG_PATH" environment variables. Plugin will prioritize KUBECONFIG if both are available.
10 | # config_path = "~/.kube/config"
11 |
12 | # Specify a context other than the current one.
13 | # config_context = "minikube"
14 |
15 | # List of custom resources that will be created as dynamic tables.
16 | # No dynamic tables will be created if this arg is empty or not set.
17 | # Wildcard based searches are supported.
18 |
19 | # For example:
20 | # - "*" matches all custom resources available
21 | # - "*.storage.k8s.io" matches all custom resources in the storage.k8s.io group
22 | # - "certificates.cert-manager.io" matches a specific custom resource "certificates.cert-manager.io"
23 | # - "backendconfig" matches the singular name "backendconfig" in any group
24 |
25 | # Defaults to all custom resources
26 | custom_resource_tables = ["*"]
27 |
28 | # If no kubeconfig file can be found, the plugin will attempt to use the service account Kubernetes gives to pods.
29 | # This authentication method is intended for clients that expect to be running inside a pod running on Kubernetes.
30 |
31 | # Specify the source(s) of the resource(s). Possible values: `deployed`, `helm` and `manifest`.
32 | # Defaults to all possible values. Set the argument to override the default value.
33 | # If `deployed` is contained in the value, tables will show all the deployed resources.
34 | # If `helm` is contained in the value, tables will show resources from the configured helm charts.
35 | # If `manifest` is contained in the value, tables will show all the resources from the kubernetes manifest. Make sure that the `manifest_file_paths` arg is set.
36 | # source_types = ["deployed", "helm", "manifest"]
37 |
38 | # Manifest File Configuration
39 |
40 | # Manifest file paths is a list of locations to search for Kubernetes manifest files
41 | # Manifest file paths can be configured with a local directory, a remote Git repository URL, or an S3 bucket URL
42 | # Refer https://hub.steampipe.io/plugins/turbot/kubernetes#supported-path-formats for more information
43 | # Wildcard based searches are supported, including recursive searches
44 | # Local paths are resolved relative to the current working directory (CWD)
45 |
46 | # For example:
47 | # - "*.yml" or "*.yaml" or "*.json" matches all Kubernetes manifest files in the CWD
48 | # - "**/*.yml" or "**/*.yaml" or "**/*.json" matches all Kubernetes manifest files in the CWD and all sub-directories
49 | # - "../*.yml" or "../*.yaml" or "../*.json" matches all Kubernetes manifest files in the CWD's parent directory
50 | # - "steampipe*.yml" or "steampipe*.yaml" or "steampipe*.json" matches all Kubernetes manifest files starting with "steampipe" in the CWD
51 | # - "/path/to/dir/*.yml" or "/path/to/dir/*.yaml" or "/path/to/dir/*.json" matches all Kubernetes manifest files in a specific directory
52 | # - "/path/to/dir/main.yml" or "/path/to/dir/main.yaml" or "/path/to/dir/main.json" matches a specific file
53 |
54 | # If the given paths includes "*", all files (including non-kubernetes manifest files) in
55 | # the CWD will be matched, which may cause errors if incompatible file types exist
56 |
57 | # Defaults to CWD
58 | # manifest_file_paths = [ "*.yml", "*.yaml", "*.json" ]
59 |
60 | # Helm configuration
61 |
62 | # A map for Helm charts along with the path to the chart directory and the paths of the value override files (if any).
63 | # Every map should have chart_path defined, and the values_file_paths is optional.
64 | # You can define multiple charts in the config.
65 | # helm_rendered_charts = {
66 | # "chart_name" = {
67 | # chart_path = "/path/to/chart/dir"
68 | # values_file_paths = ["/path/to/value/override/files.yaml"]
69 | # }
70 | # }
71 | }
72 |
--------------------------------------------------------------------------------
/docs/tables/kubernetes_horizontal_pod_autoscaler.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Steampipe Table: kubernetes_horizontal_pod_autoscaler - Query Kubernetes Horizontal Pod Autoscalers using SQL"
3 | description: "Allows users to query Kubernetes Horizontal Pod Autoscalers, providing insights into the configuration and current status of autoscalers in the Kubernetes cluster."
4 | folder: "Horizontal Pod Autoscaler"
5 | ---
6 |
7 | # Table: kubernetes_horizontal_pod_autoscaler - Query Kubernetes Horizontal Pod Autoscalers using SQL
8 |
9 | A Kubernetes Horizontal Pod Autoscaler automatically scales the number of pods in a replication controller, deployment, replica set, or stateful set based on observed CPU utilization. It is designed to maintain a specified amount of CPU utilization across the pods, irrespective of the load. The Horizontal Pod Autoscaler is implemented as a Kubernetes API resource and a controller.
10 |
11 | ## Table Usage Guide
12 |
13 | The `kubernetes_horizontal_pod_autoscaler` table provides insights into Horizontal Pod Autoscalers within a Kubernetes cluster. As a Kubernetes administrator or DevOps engineer, explore autoscaler-specific details through this table, including the current and desired number of replicas, target CPU utilization, and associated metadata. Utilize it to monitor the performance and efficiency of the autoscalers, ensuring that your applications are scaling correctly and efficiently under varying load conditions.
14 |
15 | ## Examples
16 |
17 | ### Basic Info
18 | Explore the configuration of your Kubernetes horizontal pod autoscaler to determine its current and desired replica settings. This will help you understand how your system is scaling and whether it is operating within your set parameters.
19 |
20 | ```sql+postgres
21 | select
22 | name,
23 | namespace,
24 | min_replicas,
25 | max_replicas,
26 | current_replicas,
27 | desired_replicas
28 | from
29 | kubernetes_horizontal_pod_autoscaler;
30 | ```
31 |
32 | ```sql+sqlite
33 | The PostgreSQL query provided does not use any PostgreSQL-specific functions, data types, or JSON functions. Therefore, the query can be used in SQLite without any changes.
34 |
35 | Here is the SQLite equivalent:
36 |
37 | ```sql
38 | select
39 | name,
40 | namespace,
41 | min_replicas,
42 | max_replicas,
43 | current_replicas,
44 | desired_replicas
45 | from
46 | kubernetes_horizontal_pod_autoscaler;
47 | ```
48 | ```
49 |
50 | ### Get list of HPA metrics configurations
51 | Explore the configurations of your Horizontal Pod Autoscalers (HPA) to understand their current and desired replica settings. This can help you assess the efficiency of your current setup and identify areas for potential optimization.
52 |
53 | ```sql+postgres
54 | select
55 | name,
56 | namespace,
57 | min_replicas,
58 | max_replicas,
59 | current_replicas,
60 | desired_replicas,
61 | jsonb_array_elements(metrics) as metrics,
62 | jsonb_array_elements(current_metrics) as current_metrics,
63 | conditions
64 | from
65 | kubernetes_horizontal_pod_autoscaler;
66 | ```
67 |
68 | ```sql+sqlite
69 | select
70 | name,
71 | namespace,
72 | min_replicas,
73 | max_replicas,
74 | current_replicas,
75 | desired_replicas,
76 | metrics,
77 | current_metrics,
78 | conditions
79 | from
80 | kubernetes_horizontal_pod_autoscaler,
81 | json_each(metrics),
82 | json_each(current_metrics);
83 | ```
84 |
85 | ### List manifest resources
86 | Explore which Kubernetes horizontal pod autoscalers have a defined path. This helps in understanding the autoscaling configuration for the pods and aids in optimizing resource usage within your Kubernetes environment.
87 |
88 | ```sql+postgres
89 | select
90 | name,
91 | namespace,
92 | min_replicas,
93 | max_replicas,
94 | current_replicas,
95 | desired_replicas,
96 | path
97 | from
98 | kubernetes_horizontal_pod_autoscaler
99 | where
100 | path is not null;
101 | ```
102 |
103 | ```sql+sqlite
104 | select
105 | name,
106 | namespace,
107 | min_replicas,
108 | max_replicas,
109 | current_replicas,
110 | desired_replicas,
111 | path
112 | from
113 | kubernetes_horizontal_pod_autoscaler
114 | where
115 | path is not null;
116 | ```
--------------------------------------------------------------------------------