├── gcloud
├── .gitignore
├── gcp-add-data-disk.sh
└── create_serviceaccount.sh
├── haproxy
├── .gitignore
├── ab
│ ├── ab.sh
│ ├── Dockerfile
│ └── Vagrantfile
├── selfsigned.sh
├── haproxytest.sh
├── server.js
└── ubuntu1604
│ ├── haproxy.service
│ └── switchhaproxy.sh
├── socat
├── .gitignore
├── toupload.txt
├── curl_client_calls.sh
└── start_socat_tls.sh
├── diagrams
├── README
└── ssh-x11forwarding.drawio
├── file-encoding
├── .gitignore
├── python_read_file.py
└── test-ascii-utf8-conversion.sh
├── py-zabbix
├── .gitignore
└── ZabbixSender.py
├── ansible
├── lineinfileyml
│ ├── .gitignore
│ ├── README.md
│ ├── my.yml.bak
│ ├── test-ansible-lineinfile.sh
│ └── test-lineinfile.yml
├── local-README.md
├── lineinfile
│ ├── .gitignore
│ ├── README.md
│ ├── key-value.cfg.bak
│ ├── test-ansible-lineinfile.sh
│ └── test-lineinfile.yml
├── roles
│ ├── echo
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── fileglobtest
│ │ ├── defaults
│ │ │ └── main.yaml
│ │ ├── templates
│ │ │ ├── greeting.html.j2
│ │ │ └── goodbye.html.j2
│ │ └── tasks
│ │ │ └── main.yaml
│ ├── filetreetest
│ │ ├── templates
│ │ │ ├── basefile1.txt
│ │ │ ├── basefile2.txt
│ │ │ └── subdir1
│ │ │ │ ├── leafdir
│ │ │ │ └── leaf.txt
│ │ │ │ └── subdirfile.txt
│ │ └── tasks
│ │ │ └── main.yaml
│ └── conditional-tasks
│ │ └── tasks
│ │ ├── import_task.yml
│ │ ├── include_task.yml
│ │ ├── delete.yml
│ │ └── main.yml
├── ansible_inventory
├── conditional_block.yml
├── ansible.cfg
├── playbook-role-for-group.yaml
├── playbook-filetree.yml
├── from-inside-cron.sh
├── playbook-fallback.yml
├── playbook-kernel-headers.yaml
├── playbook-find-files-iterate.yml
├── playbook-cached-facts.yaml
├── playbook-never-tag.yml
├── playbook-timestamp.yml
├── playbook-loop-exceptfor.yml
├── playbook-fileglob.yml
├── split-string-with-tertiary.yml
├── playbook-ubuntu-cron.yml
├── playbook-remove-symlink.yml
├── playbook-block-loop.yml
├── install_dependencies.yml
├── playbook-list-union.yml
├── playbook-git-withcreds.yml
├── playbook-try-url-first.yaml
├── playbook-ipaddr.yaml
├── playbook-local-remote-content.yaml
├── playbook-recursive-dirs.yml
├── playbook-pass-extra-vars.yml
├── playbook-only-for-missing-files.yml
└── playbook-211-213-changed.yml
├── podman
├── index.html
├── build-bud.sh
├── Dockerfile
├── run.sh
├── run2.sh
└── build.sh
├── python-jsonpath
├── .gitignore
└── squad.json
├── vagrant
├── aptcacherng
│ ├── .gitignore
│ ├── aptcacherng.sh
│ └── Vagrantfile
├── auditd
│ ├── .gitignore
│ ├── quicktest.sh
│ ├── usersetup.sh
│ └── Vagrantfile
├── .gitignore
├── awscli1604
│ ├── aws-python-sdk.sh
│ ├── list_aws_regions.py
│ └── Vagrantfile
├── gpg1604
│ ├── createsecret.sh
│ ├── Vagrantfile
│ ├── fordevelopers.sh
│ └── gpgsetup.sh
└── awscli1404
│ └── Vagrantfile
├── ymlcerts
├── .gitignore
└── parse_certs.sh
├── bash
├── yamlfiles
│ ├── three.yaml
│ ├── one.yaml
│ └── two.yaml
├── .gitignore
├── test_process_exit_code.sh
├── awk-using-variable.sh
├── test_yaml_aggregation_with_substitution.sh
├── generate_random_string.sh
├── tmux-new-shared-session.sh
├── create_temp_dir.sh
├── is_script_sourced.sh
├── openssl_key_cert_check.sh
├── test_awk_NF.sh
├── test_sed_between.sh
├── test_timeout.sh
├── test_cached_file.sh
├── logic-as-shorthand-control.sh
├── show-git-untracked-files.sh
├── lines_before_and_after_regex.sh
├── script_dir.sh
├── test_value_choices.sh
├── compare_values_on_line_regex_capture.sh
├── test_chain_sed.sh
├── test_string_padding.sh
├── tmux-view-shared-session.sh
├── awk_nth_match.sh
├── pull_and_parse_certs.sh
├── test_trap.sh
├── file_existence_and_size.sh
├── openssl_cert_days_till_expiration.sh
├── grep_lookahead_lookbehind.sh
├── test_generic_variables_template.sh
├── test_heredoc.sh
└── install-ansible-ubuntu.sh
├── terraform
├── hardcoded_resources
│ ├── .gitignore
│ └── main.tf
├── .gitignore
├── json_vars_file
│ ├── apply.sh
│ ├── variable-values.json
│ ├── local-values.json
│ └── main.tf
├── yaml_contribution_model
│ ├── external-dc.yaml
│ ├── external.yaml
│ ├── map-external.tf
│ └── main.tf
├── templatefile_test
│ ├── script.sh.tpl
│ └── main.tf
├── flatten_list
│ └── main.tf
└── for_each_list
│ └── main.tf
├── kubectl
├── ConfigMap-test1.yaml
├── kustomize-delete
│ ├── myns.yaml
│ ├── mysecret.yaml
│ ├── delete-ns.yaml
│ ├── delete-configmap.yaml
│ ├── delete-deployment.yaml
│ ├── myconfigmap.yaml
│ ├── mydeployment.yaml
│ └── kustomization.yaml
├── kustomize-secret
│ ├── README.md
│ ├── my-tls-secret-extfile.yaml
│ ├── kustomization.yaml
│ ├── my-tls.crt
│ └── my-tls.key
└── playbook-k8s-configmap-update.yml
├── pandas
├── flatten_relational
│ ├── .gitignore
│ └── fetch_relational_data.sh
└── mariadb
│ ├── fetch_sample_employees_database.sh
│ └── load_sample_employees_database.sh
├── vault
├── jwt
│ ├── .gitignore
│ └── UNUSED-sa-secret-template.yaml
├── vso
│ ├── .gitignore
│ ├── vaultconnection.yaml
│ ├── vaultstaticsecret-cert.yaml
│ ├── vaultauth-jwt.yaml
│ ├── vaultstaticsecret-hello.yaml
│ └── web-hello.yaml
├── tokenreview.yaml
└── tiny-tools-test.yaml
├── .gitignore
├── README.md
├── python
├── .gitignore
├── loopWithSleep.sh
├── print_color.py3
├── most_recent_file.py
├── json-to-dot2.py
├── json-to-dot.py
└── argParseTest.py
├── putty
├── template.reg
└── createPuttySessions.ps1
├── openwrt
├── listuserpackages.awk
├── README.md
└── listuserpackages.sh
├── MethodsAsBOFModules.zip
├── yq
├── update-filesection
│ ├── company-regions-new.yaml
│ ├── company-servers.yaml
│ ├── install-yq.sh
│ └── yq-mikefarah-merge-two-files.sh
├── update-deep
│ ├── company-servers.yaml
│ └── yq-mikefarah-update-deep-element.sh
├── multi-doc.yaml
└── yq-update-ds-annotation.sh
├── golang
├── zabbixhost
│ └── README.md
├── myarch
│ ├── README.md
│ └── myarch.go
├── zabbixsender
│ ├── README.md
│ └── zabbixsender.go
├── readjson
│ ├── config.json
│ └── readjson.go
├── echoservice
│ ├── systemd
│ │ └── echoservice.service
│ └── echoservice.go
├── sleepservice
│ ├── systemd
│ │ └── sleepservice.service
│ └── sleepservice.go
└── sleepservice19
│ ├── systemd
│ └── sleepservice.service
│ └── sleepservice.go
├── keycloak
├── keycloak-oauth2-entities.drawio.png
├── keycloak-subdomain-ingress.yaml
├── selfsigned-openssl.sh
├── keycloak-patch.yaml
├── apply.sh
└── keycloak.yaml
├── k8s
├── kyverno
│ ├── create-ns.yaml
│ └── kyv-require-ns-label.yaml
├── kustomization-svc-acct-patch
│ ├── sa.yaml
│ ├── sa-patch.yaml
│ └── kustomization.yaml
├── example-ingress.yaml
├── ndots-glibc.yaml
├── ndots-musl.yaml
├── keda
│ └── keda-scaledobject-pubsub.yaml
├── tiny-tools.yaml
└── tiny-tools-nodeselector-tolerations.yaml
├── cve
└── meltdown-candidate
│ ├── testMeltdown.sh
│ └── Vagrantfile
├── k8s-dashboard
├── limited-user.yaml
├── limited-ns-role.yaml
├── limited-clusterrole.yaml
├── dashboard-admin.yaml
└── limited-binding.yaml
├── OnDemand BOCS Customer Installation Guide v0.2.pdf
├── sysprep
└── w2k12
│ ├── dosysprep.bat
│ ├── test.ps1
│ ├── vagrant.pub
│ ├── Readme.md
│ ├── MakeDomainController.ps1
│ ├── Vagrantfile
│ └── Vagrantfile-powershell.ps1
├── windows
├── README
├── rename-host.ps1
├── install-dc.ps1
├── install-dc-2019.ps1
├── disable-iesc-uac.ps1
├── install-adfs-2019.ps1
└── make-ip-static.ps1
├── tensorflow
└── macm1
│ ├── requirements.txt
│ ├── tf-list-devices.py3
│ ├── tensorflow-sample.py3
│ └── README.md
├── saltstack-pillarcombine-etc-srv
├── salt
│ ├── logrotate
│ │ ├── defaults.yaml
│ │ ├── init.sls
│ │ └── map.jinja
│ └── top.sls
├── pillar
│ ├── logrotate-maya3ds.sls
│ ├── logrotate-matlab.sls
│ └── top.sls
└── README.md
├── jekyll
├── escape_liquid_directives.sh
└── install_jekyll_via_rvm.sh
├── gitlab
├── install-zaquestion-lab-cli.sh
└── install-glab-cli.sh
├── zabbixlld
└── produce.sh
├── CF
└── which-cf-buildpack.sh
├── LICENSE
├── winpscert
└── newSelfSignedCert.ps1
├── logstash-metadata.conf
├── make
└── Makefile.envfile.exists
├── systemd
└── ssh-agent.service
├── ruby
└── headless_chrome.rb
├── helm
└── helm_show_repo_for_releases.sh
└── ruby-logstash.conf
/gcloud/.gitignore:
--------------------------------------------------------------------------------
1 | *.json
2 |
--------------------------------------------------------------------------------
/haproxy/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 |
--------------------------------------------------------------------------------
/socat/.gitignore:
--------------------------------------------------------------------------------
1 | mysocat*
2 |
--------------------------------------------------------------------------------
/diagrams/README:
--------------------------------------------------------------------------------
1 | draw.io diagrams
2 |
--------------------------------------------------------------------------------
/file-encoding/.gitignore:
--------------------------------------------------------------------------------
1 | *.txt
2 |
--------------------------------------------------------------------------------
/py-zabbix/.gitignore:
--------------------------------------------------------------------------------
1 | py-zabbix/
2 |
--------------------------------------------------------------------------------
/ansible/lineinfileyml/.gitignore:
--------------------------------------------------------------------------------
1 | my.yml
2 |
--------------------------------------------------------------------------------
/ansible/local-README.md:
--------------------------------------------------------------------------------
1 | this was local
2 |
--------------------------------------------------------------------------------
/podman/index.html:
--------------------------------------------------------------------------------
1 |
Hello world!
2 |
--------------------------------------------------------------------------------
/python-jsonpath/.gitignore:
--------------------------------------------------------------------------------
1 | jsonpath/
2 |
--------------------------------------------------------------------------------
/socat/toupload.txt:
--------------------------------------------------------------------------------
1 | this is going up
2 |
--------------------------------------------------------------------------------
/vagrant/aptcacherng/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 |
--------------------------------------------------------------------------------
/ansible/lineinfile/.gitignore:
--------------------------------------------------------------------------------
1 | key-value.cfg
2 |
--------------------------------------------------------------------------------
/vagrant/auditd/.gitignore:
--------------------------------------------------------------------------------
1 | *.log
2 | *.key
3 |
--------------------------------------------------------------------------------
/ymlcerts/.gitignore:
--------------------------------------------------------------------------------
1 | allcerts.pem
2 | *.crt
3 |
--------------------------------------------------------------------------------
/ansible/roles/echo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | noun: World
2 |
--------------------------------------------------------------------------------
/bash/yamlfiles/three.yaml:
--------------------------------------------------------------------------------
1 | root:
2 | three: 3
3 |
--------------------------------------------------------------------------------
/terraform/hardcoded_resources/.gitignore:
--------------------------------------------------------------------------------
1 | *.txt
2 |
--------------------------------------------------------------------------------
/vagrant/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | *.log
3 | bbl-aws
4 |
--------------------------------------------------------------------------------
/bash/.gitignore:
--------------------------------------------------------------------------------
1 | greptest.txt
2 | cert.pem
3 | key.pem
4 |
--------------------------------------------------------------------------------
/kubectl/ConfigMap-test1.yaml:
--------------------------------------------------------------------------------
1 | test1:
2 | foo: bar
3 |
--------------------------------------------------------------------------------
/ansible/roles/fileglobtest/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | noun: World
2 |
--------------------------------------------------------------------------------
/ansible/roles/filetreetest/templates/basefile1.txt:
--------------------------------------------------------------------------------
1 | basedir1
2 |
--------------------------------------------------------------------------------
/ansible/roles/filetreetest/templates/basefile2.txt:
--------------------------------------------------------------------------------
1 | basedir2
2 |
--------------------------------------------------------------------------------
/pandas/flatten_relational/.gitignore:
--------------------------------------------------------------------------------
1 | *.csv
2 | instawdb.sql
3 |
--------------------------------------------------------------------------------
/ansible/roles/filetreetest/templates/subdir1/leafdir/leaf.txt:
--------------------------------------------------------------------------------
1 | leaf
2 |
--------------------------------------------------------------------------------
/ansible/roles/filetreetest/templates/subdir1/subdirfile.txt:
--------------------------------------------------------------------------------
1 | subdir
2 |
--------------------------------------------------------------------------------
/podman/build-bud.sh:
--------------------------------------------------------------------------------
1 | buildah bud -f Dockerfile -t nginx-alpine-custom2
2 |
--------------------------------------------------------------------------------
/vault/jwt/.gitignore:
--------------------------------------------------------------------------------
1 | *.jwk
2 | *.pem
3 | *.jwt
4 | step
5 | jwker
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | __pycache__
3 | ansible-roles-community/
4 | *.swp
5 |
--------------------------------------------------------------------------------
/bash/yamlfiles/one.yaml:
--------------------------------------------------------------------------------
1 | root:
2 | first: 1
3 | testsub_first: $first
4 |
--------------------------------------------------------------------------------
/bash/yamlfiles/two.yaml:
--------------------------------------------------------------------------------
1 | root:
2 | two: 2
3 | which_animal: $animal
4 |
--------------------------------------------------------------------------------
/ansible/roles/fileglobtest/templates/greeting.html.j2:
--------------------------------------------------------------------------------
1 | hello, {{noun}}
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # blogcode
2 |
3 | Supporting code for blog
4 | https://fabianlee.org/
5 |
--------------------------------------------------------------------------------
/ansible/roles/fileglobtest/templates/goodbye.html.j2:
--------------------------------------------------------------------------------
1 | goodbye, {{noun}}"
2 |
--------------------------------------------------------------------------------
/python/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode/
2 | credentials.json
3 | *.pickle
4 | __pycache__
5 |
6 |
--------------------------------------------------------------------------------
/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | *.backup
2 | *tfstate*
3 | *.retry
4 | *.hcl
5 | .terraform
6 |
--------------------------------------------------------------------------------
/putty/template.reg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabianlee/blogcode/HEAD/putty/template.reg
--------------------------------------------------------------------------------
/vault/vso/.gitignore:
--------------------------------------------------------------------------------
1 | *.jwk
2 | *.pem
3 | *.jwt
4 | *.crt
5 | *.key
6 | jwker
7 | step
8 |
--------------------------------------------------------------------------------
/openwrt/listuserpackages.awk:
--------------------------------------------------------------------------------
1 | /^Package:/{PKG= $2}
2 | /^Status: .*user installed/{print PKG}
3 |
--------------------------------------------------------------------------------
/MethodsAsBOFModules.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabianlee/blogcode/HEAD/MethodsAsBOFModules.zip
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/myns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: myns
6 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/mysecret.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: myns
6 |
--------------------------------------------------------------------------------
/podman/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/nginx:1.23.1-alpine
2 | COPY index.html /usr/share/nginx/html
3 | EXPOSE 8080
4 |
--------------------------------------------------------------------------------
/podman/run.sh:
--------------------------------------------------------------------------------
1 | podman run -p 8080:80 --name nginxtest localhost/nginx-alpine-custom:latest
2 | podman rm nginxtest
3 |
--------------------------------------------------------------------------------
/yq/update-filesection/company-regions-new.yaml:
--------------------------------------------------------------------------------
1 | regions-new: ['us-east', 'us-central', 'us-west', 'eu-1', 'eu-2']
2 |
--------------------------------------------------------------------------------
/podman/run2.sh:
--------------------------------------------------------------------------------
1 | podman run -p 8080:80 --name nginxtest2 localhost/nginx-alpine-custom2:latest
2 | podman rm nginxtest2
3 |
--------------------------------------------------------------------------------
/python/loopWithSleep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | for i in $(seq 1 5); do
4 | echo "iteration" $i
5 | sleep 1
6 | done
7 |
--------------------------------------------------------------------------------
/golang/zabbixhost/README.md:
--------------------------------------------------------------------------------
1 | supporting code for: https://fabianlee.org/2017/05/16/zabbix-zabbix-rest-api-using-a-go-client/
2 |
--------------------------------------------------------------------------------
/ansible/roles/conditional-tasks/tasks/import_task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: this is from import_task.yml
4 | debug: msg="ok"
5 |
--------------------------------------------------------------------------------
/ansible/roles/conditional-tasks/tasks/include_task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: this is from include_task.yml
4 | debug: msg="ok"
5 |
--------------------------------------------------------------------------------
/terraform/json_vars_file/apply.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | terraform init
3 | terraform apply -var-file=variable-values.json -auto-approve
4 |
--------------------------------------------------------------------------------
/golang/myarch/README.md:
--------------------------------------------------------------------------------
1 | supporting code for: https://fabianlee.org/2017/05/16/golang-cross-compiling-for-linux-and-windows-platforms/
2 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/delete-ns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | $patch: delete
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: myns
7 |
--------------------------------------------------------------------------------
/golang/zabbixsender/README.md:
--------------------------------------------------------------------------------
1 | Supporting blog post:
2 | https://fabianlee.org/2017/05/19/zabbix-sending-zabbix-metrics-using-a-go-client/
3 |
--------------------------------------------------------------------------------
/keycloak/keycloak-oauth2-entities.drawio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabianlee/blogcode/HEAD/keycloak/keycloak-oauth2-entities.drawio.png
--------------------------------------------------------------------------------
/ansible/roles/conditional-tasks/tasks/delete.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: this is from delete.yml
4 | debug: msg="deleted"
5 | #tags: delete
6 |
--------------------------------------------------------------------------------
/k8s/kyverno/create-ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: kyvtest
5 | labels:
6 | env: dev
7 | owner: me
8 |
--------------------------------------------------------------------------------
/cve/meltdown-candidate/testMeltdown.sh:
--------------------------------------------------------------------------------
1 | cd Am-I-affected-by-Meltdown
2 | sudo sh -c "echo 0 > /proc/sys/kernel/kptr_restrict"
3 | ./meltdown-checker
4 |
--------------------------------------------------------------------------------
/k8s-dashboard/limited-user.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: limited-user
5 | namespace: kubernetes-dashboard
6 |
--------------------------------------------------------------------------------
/k8s/kustomization-svc-acct-patch/sa.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: default
6 | namespace: default
7 |
--------------------------------------------------------------------------------
/OnDemand BOCS Customer Installation Guide v0.2.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fabianlee/blogcode/HEAD/OnDemand BOCS Customer Installation Guide v0.2.pdf
--------------------------------------------------------------------------------
/golang/readjson/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "foo",
3 | "version": "1.0",
4 | "props": {
5 | "key1" : "value1",
6 | "key2" : "value2"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/ansible/lineinfileyml/README.md:
--------------------------------------------------------------------------------
1 | code supporting blog: https://fabianlee.org/2021/01/03/ansible-regex-capture-groups-with-lineinfile-to-preserve-yaml-indentation/
2 |
--------------------------------------------------------------------------------
/ansible/ansible_inventory:
--------------------------------------------------------------------------------
1 |
2 | localhost ansible_connection=local
3 |
4 | [myrole]
5 | localhost
6 |
7 | [all:vars]
8 | ansible_python_interpreter=/usr/bin/python3
9 |
--------------------------------------------------------------------------------
/ansible/lineinfile/README.md:
--------------------------------------------------------------------------------
1 | code supporting blog: https://fabianlee.org/2021/01/03/ansible-lineinfile-with-regex-to-robustly-populate-key-value-pairs-in-config-file/
2 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/delete-configmap.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | $patch: delete
3 | apiVersion: v1
4 | kind: ConfigMap
5 | metadata:
6 | name: myconfigmap
7 | namespace: default
8 |
--------------------------------------------------------------------------------
/sysprep/w2k12/dosysprep.bat:
--------------------------------------------------------------------------------
1 | del /q panther\*.*
2 |
3 | c:\windows\system32\sysprep\sysprep /generalize /oobe /shutdown /unattend:c:\windows\system32\sysprep\unattend.xml
4 |
--------------------------------------------------------------------------------
/windows/README:
--------------------------------------------------------------------------------
1 |
2 | Configuring AD, DS, DNS with Powershell
3 | https://medium.com/@eeubanks/install-ad-ds-dns-and-dhcp-using-powershell-on-windows-server-2016-ac331e5988a7
4 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/delete-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | $patch: delete
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | name: mydeployment
7 | namespace: default
8 |
--------------------------------------------------------------------------------
/terraform/yaml_contribution_model/external-dc.yaml:
--------------------------------------------------------------------------------
1 | datacenters:
2 | neam1:
3 | name: Germany
4 | resources: 50
5 | neam2:
6 | name: France
7 | resources: 49
8 |
--------------------------------------------------------------------------------
/terraform/yaml_contribution_model/external.yaml:
--------------------------------------------------------------------------------
1 | files:
2 | - name: foo3
3 | content: |
4 | this is foo3
5 | - name: foo4
6 | content: |
7 | this is foo4
8 |
--------------------------------------------------------------------------------
/bash/test_process_exit_code.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # force a process failure exit code using exit in subshell
3 |
4 | $(exit 99)
5 |
6 | echo "last process exit code was $?"
7 |
8 |
9 |
--------------------------------------------------------------------------------
/tensorflow/macm1/requirements.txt:
--------------------------------------------------------------------------------
1 | # pyenv for 3.11.7
2 | # table: https://pypi.org/project/tensorflow-metal/
3 | tensorflow==2.15.1
4 | tensorflow-macos==2.15.1
5 | tensorflow-metal==1.1.0
6 |
--------------------------------------------------------------------------------
/openwrt/README.md:
--------------------------------------------------------------------------------
1 | # https://wiki.openwrt.org/doc/howto/generic.sysupgrade
2 |
3 | awk -f /tmp/listuserpackages.awk /usr/lib/opkg/status
4 |
5 | Diagram can be edited at:
6 | https://app.diagrams.net/
7 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/salt/logrotate/defaults.yaml:
--------------------------------------------------------------------------------
1 | # /srv/salt/logrotate/defaults.yaml
2 |
3 | logrotate:
4 | rotatejobs:
5 | # placeholder:
6 | # path: /var/log/placeholder/dummy.log
7 |
--------------------------------------------------------------------------------
/file-encoding/python_read_file.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import sys
3 |
4 | # read file line by line
5 | with open(sys.argv[1],"r") as file:
6 | for line in file:
7 | print(type(line))
8 |
9 |
--------------------------------------------------------------------------------
/podman/build.sh:
--------------------------------------------------------------------------------
1 | nginx_id=$(buildah from docker.io/nginx:1.23.1-alpine)
2 | copy $nginx_id /tmp/index.html /usr/share/nginx/html
3 | buildah config --port 8080 $nginx_id
4 | buildah commit $nginx_id nginx-alpine-custom
5 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/pillar/logrotate-maya3ds.sls:
--------------------------------------------------------------------------------
1 | # /srv/pillar/logrotate-maya3ds.sls
2 |
3 | logrotate-maya3ds:
4 | lookup:
5 | rotatejobs:
6 | maya3ds:
7 | path: /var/log/3ds/3ds.log
8 |
--------------------------------------------------------------------------------
/golang/myarch/myarch.go:
--------------------------------------------------------------------------------
1 | // put at $GOPATH/src/myarch/myarch.go
2 | package main
3 |
4 | import "fmt"
5 | import "runtime"
6 |
7 | func main() {
8 | fmt.Printf("Hello from: %s %s",runtime.GOOS,runtime.GOARCH)
9 | }
10 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/README.md:
--------------------------------------------------------------------------------
1 | # blogcode
2 |
3 | Supporting code for blog entry:
4 | https://fabianlee.org/2017/05/12/saltstack-combine-multiple-pillar-files-under-a-single-key/
5 |
6 | Copy these files to: /srv
7 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/pillar/logrotate-matlab.sls:
--------------------------------------------------------------------------------
1 | # /srv/pillar/logrotate-matlabl.sls
2 |
3 | logrotate-matlab:
4 | lookup:
5 | rotatejobs:
6 | matlab:
7 | path: /var/log/matlab/matlab.log
8 |
9 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/salt/top.sls:
--------------------------------------------------------------------------------
1 | # /srv/salt/top.sls
2 | base:
3 |
4 | 'G@role:matlab':
5 | - match: compound
6 | - logrotate
7 |
8 | 'G@role:maya3ds':
9 | - match: compound
10 | - logrotate
11 |
--------------------------------------------------------------------------------
/windows/rename-host.ps1:
--------------------------------------------------------------------------------
1 | param([String]$newname = "win2k19-dc1")
2 |
3 | rename-computer -NewName $newname
4 | write-output "name changed to $newname, going to reboot in 3 seconds..."
5 | start-sleep -seconds 3
6 |
7 | restart-computer
8 |
--------------------------------------------------------------------------------
/ansible/roles/echo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: echo
4 | command: "echo 'Hello, {{noun}}! this is from {{inventory_hostname}} on dist {{ansible_distribution}}'"
5 | register: stdout
6 | - debug: msg="{{stdout.stdout_lines}}"
7 |
8 |
--------------------------------------------------------------------------------
/vagrant/awscli1604/aws-python-sdk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # enter virtualenv created earlier
4 | source awscli/bin/activate
5 |
6 | # install boto3 python sdk for aws
7 | pip install boto3 --upgrade
8 |
9 | # exit virtualenv
10 | deactivate
11 |
--------------------------------------------------------------------------------
/bash/awk-using-variable.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # supports blog:
4 | #
5 | # Shows how awk can have use embedded bash variable
6 | #
7 |
8 | thedir="/tmp"
9 | ls $thedir | awk -v thedir=$thedir '{ printf "directory %s has file %s\n",thedir,$1 }'
10 |
--------------------------------------------------------------------------------
/python/print_color.py3:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # blog: https://fabianlee.org/2021/12/03/python-printing-in-color-using-ansi-color-codes/
4 | #
5 |
6 | print("\033[0;32mOK this is green\033[00m")
7 |
8 | print("\033[0;31mERROR this is red\033[00m")
9 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/pillar/top.sls:
--------------------------------------------------------------------------------
1 | # /srv/pillar/top.sls
2 | base:
3 |
4 | 'G@role:matlab':
5 | - match: compound
6 | - logrotate-matlab
7 |
8 | 'G@role:maya3ds':
9 | - match: compound
10 | - logrotate-maya3ds
11 |
12 |
--------------------------------------------------------------------------------
/terraform/json_vars_file/variable-values.json:
--------------------------------------------------------------------------------
1 | {
2 | "a":"123",
3 | "strlist": [ "a","b","c" ],
4 | "vms": {
5 | "host1": { "os":"ubuntu","cpu":2 },
6 | "host2": { "os":"rhel","cpu":1 },
7 | "host3": { "os":"windows","cpu":4 }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/kubectl/kustomize-secret/README.md:
--------------------------------------------------------------------------------
1 | Example of how to create kubernetes TLS secret with kustomize
2 |
3 | blog entry:
4 |
5 | The my-tls.crt and my-tls.key are quick self-signed certificate used for testing, they are not sensitive or used in any other context except this example.
6 |
--------------------------------------------------------------------------------
/terraform/hardcoded_resources/main.tf:
--------------------------------------------------------------------------------
1 |
2 | resource "local_file" "foo1" {
3 | content = "this is foo1"
4 | filename = "${path.module}/foo1.txt"
5 | }
6 | resource "local_file" "foo2" {
7 | content = "this is foo2"
8 | filename = "${path.module}/foo2.txt"
9 | }
10 |
--------------------------------------------------------------------------------
/terraform/json_vars_file/local-values.json:
--------------------------------------------------------------------------------
1 | {
2 | "mylocal": "is local",
3 | "mylocal_map" : {
4 | "one" : { "name":"first", "url":"first.com" },
5 | "two" : { "name":"second", "url":"second.net" },
6 | "three" : { "name":"third", "url":"thirde.org" }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/k8s/kustomization-svc-acct-patch/sa-patch.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: default
6 | namespace: default
7 | annotations:
8 | foo: bar
9 | iam.gke.io/gcp-service-account: dummy-sa@dummyproject.iam.gserviceaccount.com
10 |
--------------------------------------------------------------------------------
/yq/update-deep/company-servers.yaml:
--------------------------------------------------------------------------------
1 | company:
2 | regions: [ 'us-east', 'us-central', 'us-west']
3 | inventory:
4 | machineA:
5 | region: us-east
6 | tags:
7 | - linux
8 | machineB:
9 | region: us-central
10 | tags:
11 | - windows
12 |
--------------------------------------------------------------------------------
/yq/update-filesection/company-servers.yaml:
--------------------------------------------------------------------------------
1 | company:
2 | regions: [ 'us-east', 'us-central', 'us-west']
3 | inventory:
4 | machineA:
5 | region: us-east
6 | tags:
7 | - linux
8 | machineB:
9 | region: us-central
10 | tags:
11 | - windows
12 |
--------------------------------------------------------------------------------
/ansible/lineinfileyml/my.yml.bak:
--------------------------------------------------------------------------------
1 | ---
2 | root:
3 | child1:
4 | key1: value1
5 | #key2: value2
6 | arrays:
7 | - animals
8 | - bear
9 | - cat
10 | - dog
11 | #- elephant
12 | - servers
13 | - www.google.com
14 | - www.ansible.com
15 |
--------------------------------------------------------------------------------
/vagrant/auditd/quicktest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "$0 invoked as user '${SUDO_USER}' which is member of groups:"
3 | groups
4 | echo "script being run as user id $EUID"
5 | if [[ $EUID -ne 0 ]] ; then echo "UNEXPECTED!!! this script was expectd to be run as root/sudo" ; exit 1 ; fi
6 | grep alice /etc/shadow
7 |
--------------------------------------------------------------------------------
/ansible/lineinfile/key-value.cfg.bak:
--------------------------------------------------------------------------------
1 | # Testing Ansible lineinfile with regex
2 |
3 | # spacing variations
4 | thiskey1=thisvalue1
5 | thiskey2 = thisvalue2
6 | thiskey3 =thisvalue3
7 |
8 | # comment variations
9 | #thiskey4 = thisvalue4
10 | # thiskey5 = thisvalue5
11 | ## thiskey6 = thisvalue6
12 |
13 |
--------------------------------------------------------------------------------
/openwrt/listuserpackages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/ash
2 |
3 | packages="$(cat /usr/lib/opkg/status | grep -n 'user install' | cut -d ':' -f1)"
4 |
5 | printf %s "$packages" | while IFS= read -r nline; do
6 | echo -n "opkg install "
7 | sed -n 1,$nline' s/Package/&/p' /usr/lib/opkg/status | tail -n 1 | awk '{print $2}'
8 | done
9 |
--------------------------------------------------------------------------------
/vagrant/aptcacherng/aptcacherng.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo apt-get update -q
4 | sudo ufw allow 3142/tcp
5 | sudo apt-get install apt-cacher-ng -y
6 | echo "PassThroughPattern: .*" | sudo tee -a /etc/apt-cacher-ng/acng.conf
7 | echo "VerboseLog: 2" | sudo tee -a /etc/apt-cacher-ng/acng.conf
8 | sudo ufw allow 3142/tcp
9 |
--------------------------------------------------------------------------------
/sysprep/w2k12/test.ps1:
--------------------------------------------------------------------------------
1 | param($first,$second)
2 |
3 | write-host "hello world with params $first and $second" -foregroundcolor yellow
4 | $scriptDir = Split-Path -Path $MyInvocation.MyCommand.Definition -Parent
5 | write-host "script root at $scriptDir"
6 |
7 | set-content $scriptDir\test.log "hello world with params $first and $second"
8 |
--------------------------------------------------------------------------------
/kubectl/kustomize-secret/my-tls-secret-extfile.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: builtin
3 | kind: SecretGenerator
4 | metadata:
5 | name: my-tls-secret-extfile
6 | #namespace: default
7 | behavior: create
8 | files:
9 | - tls.crt=my-tls.crt
10 | - tls.key=my-tls.key
11 | type: kubernetes.io/tls
12 | options:
13 | disableNameSuffixHash: true
14 |
--------------------------------------------------------------------------------
/ansible/conditional_block.yml:
--------------------------------------------------------------------------------
1 | # called out from playbook-block-loop.yml, using include_tasks to create looping block
2 | ---
3 |
4 | - name: included block with conditional
5 | block:
6 | - name: included block task1
7 | debug: msg="hello"
8 | - name: included block task2
9 | debug: msg="{{item}}"
10 | when: do_block_logic|bool
11 |
12 |
--------------------------------------------------------------------------------
/vault/tokenreview.yaml:
--------------------------------------------------------------------------------
1 | # https://support.hashicorp.com/hc/en-us/articles/18712750429843-How-to-check-validity-of-JWT-token-in-kubernetes
2 | # export JWT=...
3 | # cat tokenreview.yaml | envsubst | kubectl apply -o json -f - | jq .status
4 | ---
5 | kind: TokenReview
6 | apiVersion: authentication.k8s.io/v1
7 | metadata:
8 | name: test
9 | spec:
10 | token: $JWT
11 |
--------------------------------------------------------------------------------
/k8s-dashboard/limited-ns-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | annotations:
5 | rbac.authorization.kubernetes.io/autoupdate: "true"
6 | labels:
7 | name: limited-ns-role
8 | namespace: default
9 | rules:
10 | - apiGroups:
11 | - ""
12 | resources: ["secrets"]
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/myconfigmap.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | configMapGenerator:
6 | - name: myconfigmap123
7 | namespace: default
8 | generatorOptions:
9 | disableNameSuffixHash: true
10 |
11 | #apiVersion: v1
12 | #kind: ConfigMap
13 | #metadata:
14 | # name: myconfigmap123
15 | #data:
16 | # foo: bar
17 |
--------------------------------------------------------------------------------
/bash/test_yaml_aggregation_with_substitution.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # use awk to create single aggregated yaml
4 | # use environment variable substitution
5 | #
6 | # blog:
7 | #
8 |
9 |
10 | # declare variables
11 | export first="1"
12 | export animal="dog"
13 |
14 | awk 'FNR==1 {print "---" "\n# source:" FILENAME }{print $0}' yamlfiles/*.yaml | envsubst '$first $animal'
15 |
16 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/salt/logrotate/init.sls:
--------------------------------------------------------------------------------
1 | # /srv/salt/logrotate/init.sls
2 | {% from "logrotate/map.jinja" import logrotate with context %}
3 |
4 | {% for key,value in logrotate['rotatejobs'].items() %}
5 | {% set thisjob = key %}
6 | {% set thislog = value.path %}
7 | logrotate-task-{{thisjob}}:
8 | cmd.run:
9 | - name: echo asked to logrotate {{thislog}}
10 | {% endfor %}
11 |
12 |
--------------------------------------------------------------------------------
/bash/generate_random_string.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # generates random string of alpha, num, special chars
4 | #
5 | # https://tecadmin.net/how-to-generate-random-string-in-bash/
6 | # https://stackoverflow.com/questions/61590006/generate-random-string-where-it-must-have-a-special-character-in-shell-script
7 |
8 | nchars=40
9 | cat /dev/urandom | tr -dc 'a-zA-Z0-9!"#$%&'\''()*+' | fold -w $nchars | head -n1
10 |
--------------------------------------------------------------------------------
/sysprep/w2k12/vagrant.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key
2 |
--------------------------------------------------------------------------------
/tensorflow/macm1/tf-list-devices.py3:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # suggests using pinned version of pip modules: https://forums.developer.apple.com/forums/thread/683757?page=2
3 | # https://gist.github.com/bhanukaManesha/0163a2d173593213424955a7c26bf8d5
4 | # python -c "import tensorflow as tf; print(tf.config.list_physical_devices())"
5 | import tensorflow as tf
6 | import pprint
7 |
8 | pprint.pprint(tf.config.list_physical_devices())
9 |
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory = ansible_inventory
3 | host_key_checking = False
4 |
5 | roles_path=./roles:../../ansible-roles-community
6 | #collections_path = ../../ansible-roles-community/galaxy-collections
7 |
8 | #interpreter_python = /usr/bin/python3
9 | #vault_password_file = .vault_pass
10 |
11 | # requires Ansible galaxy community module if included
12 | #stdout_callback = yaml
13 | #bin_ansible_callbacks = True
14 |
--------------------------------------------------------------------------------
/ansible/playbook-role-for-group.yaml:
--------------------------------------------------------------------------------
1 | # Example showing multiple 'hosts' sections to support applying role to specific group
2 | # blog: https://fabianlee.org/2021/05/24/ansible-applying-roles-to-certain-groups-in-a-single-playbook/
3 | ---
4 |
5 | - hosts: all
6 | gather_facts: yes
7 | roles:
8 | - echo
9 | - { role: echo, when: "'myrole' in group_names" }
10 |
11 | - hosts: myrole
12 | roles:
13 | - { role: echo }
14 |
--------------------------------------------------------------------------------
/k8s-dashboard/limited-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | annotations:
5 | rbac.authorization.kubernetes.io/autoupdate: "true"
6 | labels:
7 | name: limited-clusterrole
8 | namespace: default
9 | rules:
10 | - apiGroups:
11 | - ""
12 | resources: ["namespaces","pods", "configmaps", "services", "pods/log"]
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 |
--------------------------------------------------------------------------------
/ansible/playbook-filetree.yml:
--------------------------------------------------------------------------------
1 | # Example of using 'with_filetree' from role
2 | #
3 | # Supporting blog entry:
4 | # https://fabianlee.org/2023/04/15/ansible-generating-templates-while-maintaining-deep-directory-structure-using-with_filetree/
5 | #
6 | # example:
7 | # ansible-playbook playbook-filetree.yml --connection=local
8 | #
9 | ---
10 | - hosts: all
11 | become: no
12 | gather_facts: no
13 |
14 | roles:
15 | - roles/filetreetest
16 |
--------------------------------------------------------------------------------
/ansible/from-inside-cron.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Place at /tmp so it can be run by cron
4 | #
5 |
6 | # should be owned and writable by root (cron user)
7 | logfile=/tmp/from-inside-cron.log
8 | if [ ! -f $logfile ]; then
9 | touch $logfile
10 | fi
11 |
12 | echo "*********************** $(date) ***************************" >> $logfile
13 |
14 | # show environment variables where not all uppercase
15 | env | grep "^[A-Za-z][a-z]" >> $logfile
16 |
17 |
--------------------------------------------------------------------------------
/yq/multi-doc.yaml:
--------------------------------------------------------------------------------
1 | # multi-doc.yaml
2 | ---
3 | kind: namespace
4 | metadata:
5 | name: mynamespace
6 | ---
7 | kind: DaemonSet
8 | metadata:
9 | name: mydaemonset
10 | spec:
11 | template:
12 | metadata:
13 | annotations:
14 | my/annotation: "is-daemonset"
15 | ---
16 | kind: Deployment
17 | metadata:
18 | name: mydeployment
19 | spec:
20 | template:
21 | metadata:
22 | annotations:
23 | my/annotation: "is-deployment"
24 |
--------------------------------------------------------------------------------
/k8s/kustomization-svc-acct-patch/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | # for testing:
6 | # kubectl kustomize . --enable-helm
7 | # kubectl get serviceaccount default -n default -o=yaml
8 | #
9 | # to apply:
10 | # kubectl apply -k .
11 | # OR
12 | # kubectl kustomize . --enable-helm | kubectl apply -f -
13 |
14 | resources:
15 | - sa.yaml
16 |
17 | #components:
18 |
19 | patches:
20 | - path: sa-patch.yaml
21 |
--------------------------------------------------------------------------------
/k8s-dashboard/dashboard-admin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: dashboard-admin
5 | namespace: kubernetes-dashboard
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: cluster-admin-rolebinding
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: dashboard-admin
18 | namespace: kubernetes-dashboard
19 |
--------------------------------------------------------------------------------
/sysprep/w2k12/Readme.md:
--------------------------------------------------------------------------------
1 |
2 | unattend.xml - sysprep for base windows 2012 R2
3 |
4 | unattend-vagrant.xml - sysprep for base windows 2012 R2 with vagrant user and winrm enabled
5 |
6 | openssh.ps1 - installs openssh on Windows, taken from the packer-windows github project
7 | https://github.com/joefitzgerald/packer-windows
8 |
9 | vagrant.pub - standard vagrant public key that allows vagrant to ssh into guest OS initially without password
10 | https://github.com/hashicorp/vagrant/tree/master/keys
11 |
12 |
--------------------------------------------------------------------------------
/bash/tmux-new-shared-session.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Creates a new tmux session capable of being shared
4 | #
5 |
6 | myid=$(whoami)
7 | sharedSocket=/tmp/${myid}_tmux_shared
8 |
9 | echo "===IMPORTANT==="
10 | echo "Be sure to open permissions to this socket for others when you get into your tmux session"
11 | echo ""
12 | echo "chmod 777 $sharedSocket"
13 | echo ""
14 | read -p "Press to continue" $dummy
15 |
16 | # create tmux session
17 | tmux -S $sharedSocket new-session -s ${myid}_tmux_shared
18 |
--------------------------------------------------------------------------------
/haproxy/ab/ab.sh:
--------------------------------------------------------------------------------
1 | sudo apt-get update -q
2 |
3 | cd /usr/src
4 | wget http://apache.mirrors.pair.com/httpd/httpd-2.4.28.tar.gz
5 | tar xfz httpd-2.4.28.tar.gz
6 | cd httpd-2.4.28
7 |
8 | cp support/ab.c support/ab.c.old
9 | wget https://raw.githubusercontent.com/fabianlee/blogcode/master/haproxy/ab.c -O support/ab.c
10 |
11 | sudo apt-get install libapr1-dev libaprutil1-dev libpcre3 libpcre3-dev -y
12 | ./configure
13 | make
14 |
15 | support/ab -V
16 |
17 | sudo cp support/ab /usr/sbin/ab
18 |
19 | ab -V
20 |
--------------------------------------------------------------------------------
/python/most_recent_file.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # How to get the latest modified file in a directory matching a pattern
4 | #
5 | import os
6 | import glob
7 |
8 | # get list of files that matches pattern
9 | pattern="/tmp/*"
10 | files = list(filter(os.path.isfile, glob.glob(pattern)))
11 |
12 | # sort by modified time
13 | files.sort(key=lambda x: os.path.getmtime(x))
14 |
15 | # get last item in list
16 | lastfile = files[-1]
17 |
18 | print("Most recent file matching {}: {}".format(pattern,lastfile))
19 |
--------------------------------------------------------------------------------
/windows/install-dc.ps1:
--------------------------------------------------------------------------------
1 | #
2 | # Windows PowerShell script for AD DS Deployment
3 | #
4 |
5 | Import-Module ADDSDeployment
6 | Install-ADDSForest `
7 | -CreateDnsDelegation:$false `
8 | -DatabasePath "C:\Windows\NTDS" `
9 | -DomainMode "Win2012R2" `
10 | -DomainName "FABIAN.LEE" `
11 | -DomainNetbiosName "FABIAN" `
12 | -ForestMode "Win2012R2" `
13 | -InstallDns:$true `
14 | -LogPath "C:\Windows\NTDS" `
15 | -NoRebootOnCompletion:$false `
16 | -SysvolPath "C:\Windows\SYSVOL" `
17 | -Force:$true
18 |
19 |
--------------------------------------------------------------------------------
/ansible/playbook-fallback.yml:
--------------------------------------------------------------------------------
1 | #
2 | # illustrates how multiple fallbacks can be chained together
3 | #
4 | ---
5 |
6 | - hosts: localhost
7 | connection: local
8 | become: no
9 | gather_facts: no
10 |
11 | vars:
12 | # commenting out any of these lines allows fallback values to be used
13 | mydict:
14 | mystring: foo
15 | fallback1: bar
16 | fallback2: final
17 |
18 | tasks:
19 |
20 | - debug:
21 | msg: "{{mydict.mystring|default(fallback1)|default(fallback2)}}"
22 |
23 |
24 |
--------------------------------------------------------------------------------
/vault/jwt/UNUSED-sa-secret-template.yaml:
--------------------------------------------------------------------------------
1 | # We are going to use short-lived JWT, so we do not want to create this secret
2 | # cat sa-secret-template.yaml | ns=sales name=sales-auth envsubst | kubectl apply -f -
3 | # cat sa-secret-template.yaml | ns=engineering name=eng-auth envsubst | kubectl apply -f -
4 | ---
5 | apiVersion: v1
6 | kind: Secret
7 | metadata:
8 | name: $name-with-token
9 | namespace: $ns
10 | annotations:
11 | kubernetes.io/service-account.name: $name
12 | type: kubernetes.io/service-account-token
13 |
--------------------------------------------------------------------------------
/bash/create_temp_dir.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Creates temp directory where temp files can be created
4 | # blog:
5 | #
6 |
7 | # create temporary directory
8 | tmp_dir=$(mktemp -d)
9 | echo "tmp_dir = $tmp_dir"
10 |
11 | # create temp file in the temp dir
12 | tmp_file=$(mktemp -p $tmp_dir)
13 | echo "tmp_file = $tmp_file"
14 |
15 | # create temp file with suffix in the temp dir
16 | tmp_file=$(mktemp -p $tmp_dir --suffix=.tgz)
17 | echo "tmp_file (tgz) = $tmp_file"
18 |
19 | # cleanup
20 | echo "deleting tmp_dir"
21 | rm -fr $tmp_dir
22 |
--------------------------------------------------------------------------------
/terraform/templatefile_test/script.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # will be replaced by Terraform templating
4 | default_args="${params_for_inline_command}"
5 |
6 | # double dollar sign is escape so Terraform does not replace
7 | final_args="$${@:-$default_args}"
8 | echo "final_args = $final_args"
9 |
10 | for myarg in $final_args; do
11 | echo "arg is $myarg"
12 | done
13 |
14 | echo "Illustrating a way to inject Terraform list variables via templating"
15 | %{ for param in params ~}
16 | echo "list item param is ${param}"
17 | %{ endfor ~}
18 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/mydeployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: mydeployment
6 | namespace: default
7 | labels:
8 | app: mydeployment
9 | spec:
10 | foo: bar
11 | selector:
12 | matchLabels:
13 | app: mydeployment
14 | template:
15 | metadata:
16 | labels:
17 | app: mydeployment
18 | spec:
19 | containers:
20 | - name: mydeployment
21 | image: gcr.io/google-samples/hello-app:1.0
22 | ports:
23 | - containerPort: 8080
24 |
--------------------------------------------------------------------------------
/bash/is_script_sourced.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Tests whether script was invoked as source
4 | # blog: https://fabianlee.org/2022/05/07/bash-test-whether-script-is-invoked-directly-or-sourced/
5 | #
6 |
7 | # https://stackoverflow.com/questions/2683279/how-to-detect-if-a-script-is-being-sourced
8 | (return 0 2>/dev/null) && sourced=1 || sourced=0
9 | if [ $sourced -eq 0 ]; then
10 | echo "ERROR, this script is meant to be sourced. Try 'source ./is_script_sourced.sh'"
11 | exit 1
12 | fi
13 |
14 | export FOO=bar
15 | echo "'FOO' env var exported"
16 |
--------------------------------------------------------------------------------
/vault/vso/vaultconnection.yaml:
--------------------------------------------------------------------------------
1 | # https://developer.hashicorp.com/vault/docs/platform/k8s/vso/api-reference#vaultconnectionspec
2 | # cat vaultconnection.yaml | vso_ns=vault-secrets-operator vault_url=http://192.168.2.239:8200 envsubst | kubectl apply -f -
3 | apiVersion: secrets.hashicorp.com/v1beta1
4 | kind: VaultConnection
5 | metadata:
6 | namespace: $vso_ns
7 | name: vault-external
8 | spec:
9 | #address: http://vault.vault.svc.cluster.local:8200
10 | address: $vault_url
11 | skipTLSVerify: true
12 | #tlsServerName: ""
13 | #caCertSecretRef: ""
14 |
15 |
--------------------------------------------------------------------------------
/yq/update-filesection/install-yq.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo snap install yq
4 | yq --version
5 |
6 | exit 0
7 |
8 | #
9 | # manual steps for installing yq below
10 | #
11 |
12 | sudo apt get install curl jq wget -y
13 |
14 | # get latest release version
15 | latest_yq_linux=$(curl -sL https://api.github.com/repos/mikefarah/yq/releases/latest | jq -r ".assets[].browser_download_url" | grep linux_amd64.tar.gz)
16 |
17 | # download, extract, and put binary into PATH
18 | wget $latest_yq_linux
19 | tar xvfz yq_linux_amd64.tar.gz
20 | sudo cp yq_linux_amd64 /usr/local/bin/yq
21 |
--------------------------------------------------------------------------------
/tensorflow/macm1/tensorflow-sample.py3:
--------------------------------------------------------------------------------
1 | # https://developer.apple.com/metal/tensorflow-plugin/
2 | import tensorflow as tf
3 |
4 | cifar = tf.keras.datasets.cifar100
5 | (x_train, y_train), (x_test, y_test) = cifar.load_data()
6 | model = tf.keras.applications.ResNet50(
7 | include_top=True,
8 | weights=None,
9 | input_shape=(32, 32, 3),
10 | classes=100,)
11 |
12 | loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
13 | model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
14 | model.fit(x_train, y_train, epochs=5, batch_size=64)
15 |
--------------------------------------------------------------------------------
/yq/update-filesection/yq-mikefarah-merge-two-files.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # merging section of one file to another using yq (https://github.com/mikefarah/yq)
4 | #
5 |
6 | echo ""
7 | echo "--- company-servers.yaml --"
8 | cat company-servers.yaml
9 |
10 | echo ""
11 | echo "--- company-regions-new.yaml --"
12 | cat company-regions-new.yaml
13 |
14 | echo ""
15 | echo "--- merging 'regions-new' with eval-all and fileIndex ---"
16 | yq eval-all 'select(fileIndex==0).company.regions = select(fileIndex==1).regions-new | select(fileIndex==0)' company-servers.yaml company-regions-new.yaml
17 |
--------------------------------------------------------------------------------
/bash/openssl_key_cert_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # validates if key and cert are matched
4 | # can also check against custom CA
5 | #
6 |
7 | [[ -n "$1" && -n "$2" ]] || { echo "Usage: keyFile certFile [CACertFile]"; exit 1; }
8 |
9 | openssl rsa -noout -modulus -in $1 | openssl md5
10 | openssl x509 -noout -modulus -in $2 | openssl md5
11 | echo "if the md5 matches, the key matches the cert"
12 |
13 | if [ -n "$3" ]; then
14 | echo "Check if CA and cert are paired"
15 | openssl verify -CAfile $3 $2
16 | fi
17 |
18 | # check info in csr
19 | #openssl req -text -noout -verify -in my.csr
20 |
--------------------------------------------------------------------------------
/ansible/lineinfileyml/test-ansible-lineinfile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # invokes local playbook that exercises Ansible lineinfile
3 |
4 | # reset file and show contents
5 | cp my.yml.bak my.yml
6 | echo "***ORIGINAL*******************************"
7 | cat my.yml
8 | echo "******************************************"
9 |
10 |
11 | # run transformation
12 | ansible-playbook --connection=local --inventory 127.0.0.1 test-lineinfile.yml
13 |
14 |
15 | # show results and then reset file
16 | echo "***UPDATED********************************"
17 | cat my.yml
18 | echo "******************************************"
19 | cp my.yml.bak my.yml
20 |
--------------------------------------------------------------------------------
/bash/test_awk_NF.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Example of using awk NF conditional to avoid error
4 | # blog: https://fabianlee.org/2024/10/18/bash-resolving-awk-run-time-error-negative-field-index/
5 | #
6 |
7 |
8 | read -r -d '' myheredoc1 <2 ? $(NF-2):"None"),(NF>1 ? $(NF-1):"None") }'
18 |
19 | echo ""
20 | echo ""
21 | echo "$myheredoc1" | awk -F/ '{printf "last 2 dir paths = %d %s/%s\n",NF,$(NF-2),$(NF-1) }'
22 |
23 |
--------------------------------------------------------------------------------
/keycloak/keycloak-subdomain-ingress.yaml:
--------------------------------------------------------------------------------
1 | # keycloak-subdomain-ingress.yaml
2 | ---
3 | apiVersion: networking.k8s.io/v1
4 | kind: Ingress
5 | metadata:
6 | annotations:
7 | kubernetes.io/ingress.class: nginx
8 | labels:
9 | app: keycloak
10 | name: keycloak
11 | spec:
12 | rules:
13 | - host: keycloak.kubeadm.local
14 | http:
15 | paths:
16 | - pathType: Prefix
17 | path: "/"
18 | backend:
19 | service:
20 | name: keycloak
21 | port:
22 | number: 8080
23 | tls:
24 | - hosts:
25 | - keycloak.kubeadm.local
26 | secretName: tls-credential
27 |
--------------------------------------------------------------------------------
/haproxy/ab/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:latest
2 |
3 | RUN apk update &&\
4 | apk add wget &&\
5 | mkdir /usr/src
6 | WORKDIR /usr/src
7 |
8 | ARG AB_VERSION=2.4.59
9 | RUN wget http://archive.apache.org/dist/httpd/httpd-${AB_VERSION}.tar.gz &&\
10 | tar xvfz httpd-*.tar.gz
11 | WORKDIR /usr/src/httpd-${AB_VERSION}
12 |
13 | RUN cp support/ab.c support/ab.c.old &&\
14 | wget https://raw.githubusercontent.com/fabianlee/blogcode/master/haproxy/ab.c -O support/ab.c &&\
15 | apk add build-base apr-dev apr apr-util apr-util-dev pcre pcre-dev &&\
16 | ./configure &&\
17 | make &&\
18 | cp support/ab /usr/sbin/ab
19 |
20 | ENTRYPOINT ["/usr/sbin/ab"]
21 |
--------------------------------------------------------------------------------
/keycloak/selfsigned-openssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | FQDN="$1"
4 | [ -n "$FQDN" ] || { echo "ERROR provide FQDN for self-signed cert"; exit 3; }
5 |
6 | # older was libssl1.0.0
7 | #sudo apt install libssl1.1 -y
8 |
9 | echo -------------------
10 | echo FQDN is $FQDN
11 | echo -------------------
12 |
13 | openssl req -x509 -nodes -days 3650 -newkey rsa:2048 \
14 | -keyout /tmp/$FQDN.key -out /tmp/$FQDN.pem \
15 | -subj "/C=US/ST=CA/L=SFO/O=myorg/CN=$FQDN"
16 |
17 | openssl x509 -in /tmp/$FQDN.pem -text -noout | grep -E "Subject:|Not After :|DNS:|Issuer:"
18 |
19 | echo ""
20 | echo "public cert and private key are located in /tmp directory"
21 |
22 |
--------------------------------------------------------------------------------
/ansible/lineinfile/test-ansible-lineinfile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # invokes local playbook that exercises Ansible lineinfile
3 |
4 | # reset file and show contents
5 | cp key-value.cfg.bak key-value.cfg
6 | echo "***ORIGINAL*******************************"
7 | cat key-value.cfg
8 | echo "******************************************"
9 |
10 |
11 | # run transformation
12 | ansible-playbook --connection=local --inventory 127.0.0.1 test-lineinfile.yml
13 |
14 |
15 | # show results and then reset file
16 | echo "***UPDATED********************************"
17 | cat key-value.cfg
18 | echo "******************************************"
19 | cp key-value.cfg.bak key-value.cfg
20 |
--------------------------------------------------------------------------------
/k8s/example-ingress.yaml:
--------------------------------------------------------------------------------
1 | # from page: https://kubernetes.io/docs/tasks/access-application-cluster/ingress-minikube/
2 | # https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/service/networking/example-ingress.yaml
3 | apiVersion: networking.k8s.io/v1
4 | kind: Ingress
5 | metadata:
6 | name: example-ingress
7 | spec:
8 | ingressClassName: nginx
9 | rules:
10 | - host: hello-world.example
11 | http:
12 | paths:
13 | - path: /
14 | pathType: Prefix
15 | backend:
16 | service:
17 | name: web
18 | port:
19 | number: 8080
20 |
--------------------------------------------------------------------------------
/bash/test_sed_between.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # using sed begin and end range
4 | #
5 | # then using awk to get Nth match
6 | #
7 |
8 | echo "***get only certificate data..."
9 |
10 | openssl s_client -showcerts -servername www.google.com -connect www.google.com:443 < /dev/null 2>/dev/null | sed -n '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/p'
11 |
12 | echo ""
13 | echo "***get Nth certificate data, awk starts at index 1..."
14 |
15 | openssl s_client -showcerts -servername www.google.com -connect www.google.com:443 < /dev/null 2>/dev/null | sed -n '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/p' | awk "/-----BEGIN CERTIFICATE-----/{i++}i==2"
16 |
--------------------------------------------------------------------------------
/yq/update-deep/yq-mikefarah-update-deep-element.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # update deeply nested elementing using yq (https://github.com/mikefarah/yq)
4 | #
5 |
6 | echo ""
7 | echo "--- company-servers.yaml --"
8 | cat company-servers.yaml
9 |
10 | echo ""
11 | echo "-- replace all tags --"
12 | yq '(.. | select(has("tags")).tags) = ["coreos","arm64"]' company-servers.yaml
13 |
14 | echo ""
15 | echo "-- append to tags --"
16 | yq '(.. | select(has("tags")).tags) += ["amd64"]' company-servers.yaml
17 |
18 |
19 | echo ""
20 | echo "-- update tags where region=us-east --"
21 | yq '(.. | select(has("tags") and .region=="us-east").tags) += ["amd64"]' company-servers.yaml
22 |
--------------------------------------------------------------------------------
/ansible/playbook-kernel-headers.yaml:
--------------------------------------------------------------------------------
1 | # Example of installing matching linux kernel headers for Ubuntu
2 | #
3 | # Supporting blog entry: https://fabianlee.org/2021/05/19/ansible-installing-linux-headers-matching-kernel-for-ubuntu/
4 | #
5 | # example:
6 | # ansible-playbook playbook-kernel-headers.yml
7 | #
8 | ---
9 | - hosts: all
10 | become: yes
11 | connection: local
12 | gather_facts: yes
13 |
14 | tasks:
15 |
16 | - name: install headers matching kernel
17 | apt:
18 | pkg:
19 | - linux-headers-{{ ansible_kernel }}
20 |
21 | - name: meta package that auto-matches kernel
22 | apt:
23 | pkg:
24 | - linux-headers-generic
25 |
26 |
--------------------------------------------------------------------------------
/ansible/playbook-find-files-iterate.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that uses 'find' to list all files on remote host (fileglob is for local)
3 | #
4 | # ansible-playbook playbook-find-files-iterate.yml --connection=local
5 | #
6 | ---
7 | - hosts: all
8 | become: no
9 | gather_facts: no
10 | connection: local
11 |
12 | tasks:
13 |
14 | - name: find remote files
15 | find:
16 | paths: /tmp
17 | recurse: no
18 | register: tmpfiles
19 | #- debug: msg="{{tmpfiles}}"
20 |
21 | - name: show each file
22 | debug:
23 | msg: "{{item.path}}"
24 | loop_control:
25 | label: "{{item.path}}"
26 | loop: "{{tmpfiles.files}}"
27 |
--------------------------------------------------------------------------------
/sysprep/w2k12/MakeDomainController.ps1:
--------------------------------------------------------------------------------
1 | param($netbiosName="contoso",$domainFQDN="contoso.com",$domainMode="Win2012R2")
2 |
3 | Import-Module servermanager
4 | Install-windowsfeature -name AD-Domain-Services -IncludeManagementTools
5 |
6 | $cred = ConvertTo-SecureString "ThisIsMyP4ss!" -AsPlainText -Force
7 |
8 | Import-Module ADDSDeployment
9 | Install-ADDSForest -CreateDNSDelegation:$false -DatabasePath "c:\windows\NTDS" -DomainMode $domainMode -DomainName $domainFQDN -DomainNetbiosName $netbiosName -ForestMode $domainMode -InstallDNS:$true -LogPath c:\Windows\NTDSLogs -NoRebootOnCompletion:$false -SysvolPath c:\Windows\SYSVOL -Force:$true -SafeModeAdministratorPassword $cred
10 |
11 |
--------------------------------------------------------------------------------
/bash/test_timeout.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Illustrates use of timeout to halt process after certain n seconds
4 | # https://ss64.com/bash/timeout.html
5 | #
6 |
7 | echo ""
8 | echo "--- 5 sec timeout not reached, normal exit code returned ---"
9 |
10 | timeout 5 sleep 1
11 | echo "exited uniterrupted short sleep with $? (expected 0)"
12 |
13 |
14 | echo ""
15 | echo "--- 5 sec timeout reached (124=timeout,127=cmd not found) ---"
16 |
17 | timeout 5 sleep 10
18 | echo "exited long sleep ending in true with $? (expected 124)"
19 |
20 |
21 | echo ""
22 | echo ""
23 | echo "---ping will die after 3 seconds ---"
24 | timeout 3 ping 127.0.0.1
25 | echo "exited ping with $? (expected 124 for timeout)"
26 |
--------------------------------------------------------------------------------
/k8s-dashboard/limited-binding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: limited-binding
6 | roleRef:
7 | kind: ClusterRole
8 | name: limited-clusterrole
9 | apiGroup: rbac.authorization.k8s.io
10 | subjects:
11 | - kind: ServiceAccount
12 | name: limited-user
13 | namespace: kubernetes-dashboard
14 | ---
15 | apiVersion: rbac.authorization.k8s.io/v1
16 | kind: RoleBinding
17 | metadata:
18 | name: limited-ns-binding
19 | roleRef:
20 | kind: Role
21 | name: limited-ns-role
22 | apiGroup: rbac.authorization.k8s.io
23 | subjects:
24 | - kind: ServiceAccount
25 | name: limited-user
26 | namespace: kubernetes-dashboard
27 |
--------------------------------------------------------------------------------
/pandas/mariadb/fetch_sample_employees_database.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Download Employees sample database for loading into MariaDB
4 | #
5 |
6 | echo "Fetching example Employees database to ~/Downloads/test_db"
7 |
8 | pushd . > /dev/null
9 | cd ~/Downloads
10 |
11 | # download and unzip
12 | if [ ! -f test_db-1.0.7.tar.gz ]; then
13 | wget https://github.com/datacharmer/test_db/releases/download/v1.0.7/test_db-1.0.7.tar.gz
14 | else
15 | echo "test datbase archive already downloaded"
16 | fi
17 | if [ ! -d test_db ]; then
18 | tar xvfz test_db-1.0.7.tar.gz
19 | else
20 | echo "test database already unarchived"
21 | fi
22 |
23 | cd test_db
24 | ls -l *.sql
25 |
26 |
27 | popd > /dev/null
28 |
--------------------------------------------------------------------------------
/ansible/playbook-cached-facts.yaml:
--------------------------------------------------------------------------------
1 | # illustrates in-memory cache of playbook to pass variables from one host to another
2 | # https://docs.ansible.com/ansible/latest/user_guide/playbooks_vars_facts.html#caching-facts
3 | #
4 | # ansible-playbook playbook-cached-facts.yaml
5 |
6 | # save IP address of ansible orchestrator
7 | - hosts: localhost
8 | connection: local
9 | gather_facts: yes
10 | tasks:
11 | - set_fact:
12 | hostvm_ip: "{{ ansible_default_ipv4.address }}"
13 | - debug: msg="The localhost default IP is {{hostvm_ip}}"
14 |
15 | - hosts: all
16 | tasks:
17 | - debug: msg="The ansible orchestrator default IP was cached in memory as {{hostvars['localhost']['hostvm_ip']}}"
18 |
--------------------------------------------------------------------------------
/ansible/playbook-never-tag.yml:
--------------------------------------------------------------------------------
1 | # Example using 'never' tag to avoid action being executed when called without any tags
2 | # which fallsback to 'all' behavior
3 | #
4 | # Supporting blog entry:
5 | # https://fabianlee.org/2021/04/05/ansible-action-only-executed-if-tag-set-avoiding-all-behavior/
6 | #
7 | # example:
8 | # ansible-playbook playbook-conditional-tasks.yml --connection=local
9 | #
10 | ---
11 | - hosts: all
12 | become: no
13 | gather_facts: no
14 |
15 | #roles:
16 | # - roles/conditional-tasks
17 |
18 | tasks:
19 |
20 | - debug: msg="when tag 'run'"
21 | tags: run
22 |
23 | - debug: msg="only when tag is explictly set to 'delete'"
24 | tags: ['delete','never']
25 |
--------------------------------------------------------------------------------
/jekyll/escape_liquid_directives.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Escapes double curly brackets and curly bracket percent that
4 | # would be interpreted as Liquid template directives
5 |
6 | me=$(basename "$0")
7 |
8 | for pattern in '{{' '}}' '{%' '%}'; do
9 | echo "PATTERN $pattern"
10 | for file in $(grep -srl $pattern); do
11 | if [[ "$file" == "$me" ]]; then
12 | #echo "SKIPPING self"
13 | continue
14 | fi
15 |
16 | echo "CLEANING $file"
17 |
18 | # clean up {% ... %}
19 | sed -i 's/{%/{\\%/g' $file
20 | sed -i 's/%}/\\%}/g' $file
21 |
22 | # clean up {{ ... }}
23 | sed -i 's/{{/\\{\\{/g' $file
24 | sed -i 's/}}/\\}\\}/g' $file
25 | done
26 | done
27 |
--------------------------------------------------------------------------------
/bash/test_cached_file.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Test of file existence, size, and age
4 | #
5 | # blog: https://fabianlee.org/2023/05/10/bash-testing-if-a-file-exists-has-content-and-is-recently-modified/
6 | #
7 |
8 | # test for existence and content
9 | cachefile=/tmp/cached_file.html
10 | [ -s $cachefile ] || echo "File $cachefile does not exist or is 0 bytes"
11 |
12 | find $cachefile -mtime -1 -size +0b 2>/dev/null | grep .
13 | if [ $? -ne 0 ]; then
14 | echo "$cachefile needs to be downloaded"
15 | wget -q https://fabianlee.org/ -O $cachefile
16 | echo "DONE downloaded $cachefile"
17 | else
18 | echo "$cachefile already exists, has content size, and was last modified in the last day"
19 | fi
20 |
21 |
--------------------------------------------------------------------------------
/bash/logic-as-shorthand-control.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Using logic expressions as shortcut control statements
4 | #
5 | # blog: https://fabianlee.org/2020/10/14/bash-using-logic-expressions-as-a-shorthand-for-if-then-else-control
6 | #
7 |
8 | [ 1 -eq 1 ] && echo "correct, 1 does indeed equal 1" || echo "impossible!"
9 | [ 1 -eq 0 ] && echo "impossible!" || echo "correct, 1 does not equal 0"
10 |
11 | # if expression false, runs only third expression
12 | [ 1 -eq 1 ] && { echo "1 does indeed equal 1";false; } || echo "1 does not equal 1 !!!"
13 |
14 |
15 |
16 | # for assertions (file existence, variable population, etc)
17 | [ 1 -eq 1 ] || { echo "ERROR this test should have been true"; exit 3; }
18 |
19 |
20 |
--------------------------------------------------------------------------------
/tensorflow/macm1/README.md:
--------------------------------------------------------------------------------
1 | pyenv install --list | grep 3\.11
2 | pyenv install 3.11.10
3 |
4 | pyenv virtualenv 3.11.10 py3.11.10
5 | pyenv virtualenvs
6 |
7 | mkdir tflow-macm1-py311 && cd $_
8 | pyenv local py3.11.10
9 | cat .python-version
10 |
11 | python --version
12 | python -m pip install --upgrade pip
13 | pip install -r requirements.txt
14 |
15 | pip list | grep tensorflow-m
16 | tensorflow-macos 2.15.1
17 | tensorflow-metal 1.1.0
18 |
19 | python -c "import tensorflow as tf; print(tf.config.list_physical_devices())"
20 | [PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU'),
21 | PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/kubectl/kustomize-secret/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # shows how tls secret can be embedded as base64 OR provided as file
2 | #
3 | # generating secret+key
4 | # openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout my-tls.key -out my-tls.crt -subj "/CN=my-tls.com"
5 | #
6 | # testing kustomize:
7 | # kustomize build --enable-helm
8 | # applying to cluster:
9 | # kubectl apply -k .
10 | ---
11 | apiVersion: kustomize.config.k8s.io/v1beta1
12 | kind: Kustomization
13 |
14 | # overide if you want non-default
15 | #namespace: default
16 |
17 | resources:
18 | # load secret from embedded content
19 | - my-tls-secret-embedded.yaml
20 |
21 | generators:
22 | # load secret from file
23 | - my-tls-secret-extfile.yaml
24 |
--------------------------------------------------------------------------------
/vagrant/gpg1604/createsecret.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # create directory r/w only by root/sudo
4 | sudo mkdir /etc/deployerkeys
5 | sudo chmod 600 /etc/deployerkeys
6 |
7 | # define input used to generate key
8 | cat >genkeyinput < /tmp/test.crypt
24 |
25 |
--------------------------------------------------------------------------------
/ansible/playbook-timestamp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that shows how a timestamp can be generated (for logs, archives, etc)
3 | #
4 | # ansible-playbook playbook-timestamp.yml -l localhost
5 | #
6 | # blog:
7 | #
8 | ---
9 | - hosts: all
10 | become: no
11 | gather_facts: yes # if 'no', then ansible_date_time would not be populated
12 | connection: local
13 |
14 | tasks:
15 |
16 | - set_fact:
17 | facts_timestamp: "{{ ansible_date_time.iso8601_basic_short }}" # requires gather_facts
18 | pipe_timestamp: "{{ lookup('pipe', 'date +%Y%m%dT%H%M%S') }}"
19 |
20 | - debug:
21 | msg: "from facts: {{ facts_timestamp }}"
22 | - debug:
23 | msg: "from pipe: {{pipe_timestamp}}"
24 |
25 |
26 |
--------------------------------------------------------------------------------
/bash/show-git-untracked-files.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Shows untracked files in git repo
4 | #
5 | # more typical would be to simply use 'git status --ignored -s' or 'git ls-files --others'
6 | # https://fabianlee.org/2020/11/22/git-identifying-files-that-gitignore-is-purposely-skipping/
7 | #
8 |
9 | global_allfiles=$(mktemp)
10 | global_gitfiles=$(mktemp)
11 |
12 | # all files in folder, no hidden folders
13 | find . -type f -not -path '*/\.*' | sort > $global_allfiles
14 |
15 | # all files in git already under source control
16 | git ls-files | xargs -d '\n' printf "./%s\n" | sort > $global_gitfiles
17 |
18 | comm $global_allfiles $global_gitfiles -2 | grep -v "^\s"
19 |
20 | rm $global_allfiles
21 | rm $global_gitfiles
22 |
--------------------------------------------------------------------------------
/gitlab/install-zaquestion-lab-cli.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Install 'lab' GitLab CLI
4 | # https://github.com/zaquestion/lab
5 | #
6 |
7 | latest="$(curl -sL 'https://api.github.com/repos/zaquestion/lab/releases/latest' | grep 'tag_name' | grep -o 'v[0-9\.]\+' | cut -c 2-)"
8 | echo "latest=$latest"
9 |
10 | [ -f /tmp/lab.tar.gz ] || wget "https://github.com/zaquestion/lab/releases/download/v${latest}/lab_${latest}_linux_amd64.tar.gz" -O /tmp/lab.tar.gz
11 |
12 | # place binary in current directory, then move to PATH
13 | tar xvfz /tmp/lab.tar.gz lab
14 | sudo chown root:root lab
15 | sudo mv lab /usr/local/bin/.
16 |
17 | which lab
18 | lab --version
19 |
20 | echo "Run 'lab' to create config ~/.config/lab/lab.toml"
21 |
22 |
--------------------------------------------------------------------------------
/ansible/playbook-loop-exceptfor.yml:
--------------------------------------------------------------------------------
1 | # Example of processing each home directory but a select list
2 | #
3 | ---
4 | - hosts: all
5 | become: no
6 | gather_facts: no
7 |
8 | tasks:
9 |
10 | - name: find all user home directories
11 | find:
12 | paths: /home
13 | pattern: "*"
14 | depth: 1
15 | file_type: directory
16 | register: user_dirs
17 |
18 |
19 | # processing every home directory that is not root or fabian
20 | - name: each file
21 | debug:
22 | msg: "Processing ... {{item.path}}"
23 | loop: "{{user_dirs.files}}"
24 | when: "not item.path in ['/home/root','/home/fabian']"
25 | loop_control:
26 | label: "{{item.path}}"
27 |
28 |
--------------------------------------------------------------------------------
/bash/lines_before_and_after_regex.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # supports blog: https://fabianlee.org/2020/09/06/bash-output-all-lines-before-after-line-identified-by-regex/
4 | #
5 | # example showing how to:
6 | # show all the lines before a regex match
7 | # show all the lines after a regex match
8 | #
9 |
10 | read -r -d '' lines < /dev/null && pwd)
8 | CURRENT_DIR=$(pwd)
9 |
10 | echo "Current directory: $CURRENT_DIR"
11 | echo "Relative script dir: $SCRIPT_DIR_REL"
12 | echo "Absolute script dir: $SCRIPT_DIR_ABS"
13 |
14 |
15 | echo ""
16 | SCRIPT_NAME=$(basename $0)
17 | echo "Script name: $SCRIPT_NAME"
18 |
19 |
20 | echo ""
21 | echo "Testing location using relative and absolute paths"
22 | set -x
23 | ls -l $SCRIPT_DIR_REL/$SCRIPT_NAME
24 | ls -l $SCRIPT_DIR_ABS/$SCRIPT_NAME
25 |
--------------------------------------------------------------------------------
/bash/test_value_choices.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Test multiple choices for value
4 | # https://
5 | #
6 | #
7 |
8 | myval="$1"
9 | [ -n "$myval" ] || { echo "ERROR please provide a parameter at the command line"; exit 1; }
10 | echo "myval = $myval"
11 |
12 | # one liner
13 | [[ "$myval" == @("on"|"off") ]] || { echo "ERROR does not match"; exit 4; }
14 |
15 | # if statement
16 | if [[ ! "$myval" == @(on|off) ]]; then
17 | echo "ERROR value must either be 'on' or 'off'"; exit 3;
18 | else
19 | echo "OK value matched"
20 | fi
21 |
22 | # use case statement
23 | #case $myval in
24 | #on|off)
25 | # ;;
26 | #*)
27 | # echo "ERROR does not match"
28 | # exit 5;
29 | # ;;
30 | #esac
31 |
32 |
33 | echo "SUCCESS value was '$myval'"
34 |
--------------------------------------------------------------------------------
/vagrant/awscli1604/list_aws_regions.py:
--------------------------------------------------------------------------------
1 | #
2 | # Runs smoke test on AWS SDK for Python
3 | # list regions
4 | #
5 |
6 | # boto3 library
7 | import boto3, botocore
8 | import logging
9 |
10 | # full debug logging so we can see any issues
11 | logging.basicConfig(level=logging.DEBUG,format=f'%(asctime)s %(levelname)s %(message)s')
12 | logger = logging.getLogger()
13 |
14 | # create client
15 | ec2 = boto3.client('ec2')
16 |
17 | # Retrieves all regions/endpoints that work with EC2
18 | response = ec2.describe_regions()
19 | #print('Regions:', response['Regions'])
20 |
21 | # prints each region details
22 | for region in response['Regions']:
23 | print("Name: {:16} Endpoint: {}".format(region['RegionName'],region['Endpoint']))
24 |
25 |
--------------------------------------------------------------------------------
/bash/compare_values_on_line_regex_capture.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Test values on same line using regex capture group
4 | #
5 |
6 | # lines are similar to kubectl output checking deployment desired replica count and true ready count
7 | # when 2nd and 3rd column match, the deployment is fully healthy
8 | # name,numberScheduled,numberReady
9 | read -r -d '' csvlines < allcerts.pem
8 |
9 | # count how many certs were pulled
10 | certcount=$(grep -e "-----BEGIN CERTIFICATE-----" allcerts.pem | wc -l)
11 |
12 | # pull each cert individually, use openssl to show critical properties
13 | for index in $(seq 1 $certcount); do
14 | echo "==== cert $index"
15 | awk "/-----BEGIN CERTIFICATE-----/{i++}i==$index" allcerts.pem > $index.crt
16 | openssl x509 -in $index.crt -text -noout | grep -E "Subject:|Not After :|DNS:"
17 | rm $index.crt
18 | done
19 |
20 | rm allcerts.pem
21 |
--------------------------------------------------------------------------------
/haproxy/selfsigned.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo mkdir -p /etc/pki/tls/certs
4 | sudo chmod 755 /etc/pki/tls/certs
5 |
6 | # https://askubuntu.com/questions/1424442/libssl1-1-is-deprecated-in-ubuntu-22-04-what-to-do-now
7 | #lsb_release -rs
8 | # package only valid for Ubuntu<22
9 | sudo apt-get install libssl1.0.0 -y
10 |
11 | cd /etc/pki/tls/certs
12 | if [[ -n "$1" ]]; then
13 | export FQDN="$1"
14 | else
15 | export FQDN=`hostname -f`
16 | fi
17 | echo -------------------
18 | echo FQDN is $FQDN
19 | echo -------------------
20 |
21 | sudo openssl req -x509 -nodes -days 3650 -newkey rsa:2048 \
22 | -keyout $FQDN.key -out $FQDN.crt \
23 | -subj "/C=US/ST=CA/L=SFO/O=myorg/CN=$FQDN"
24 |
25 | sudo cat $FQDN.crt $FQDN.key | sudo tee -a $FQDN.pem
26 | openssl x509 -noout -subject -in /etc/pki/tls/certs/$FQDN.crt
27 |
--------------------------------------------------------------------------------
/bash/tmux-view-shared-session.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Allows you to view tmux session shared by someone else
4 | #
5 |
6 | myid="$1"
7 | if [ -c $myid ]; then
8 | echo "ERROR you must supply the userid of the person whose tmux session you want to view"
9 | exit 3
10 | fi
11 |
12 | sharedSocket=/tmp/${myid}_tmux_shared
13 | ls -l $sharedSocket
14 | if [[ ! -r $sharedSocket || ! -w $sharedSocket || ! -x $sharedSocket ]]; then
15 | echo "Need full permission to $sharedSocket to use tmux sharing. Request that $myid runs:"
16 | echo "chmod 777 $sharedSocket"
17 | exit 9
18 | fi
19 |
20 | echo "==SESSIONS=="
21 | tmux -S $sharedSocket list-sessions
22 |
23 | echo ""
24 | read -p "press to start viewing session, to detatch press CTRL-b d" dummy
25 | tmux -S $sharedSocket attach-session -t ${myid}_tmux_shared -r
26 |
--------------------------------------------------------------------------------
/vault/vso/vaultstaticsecret-hello.yaml:
--------------------------------------------------------------------------------
1 | # https://github.com/hashicorp/vault-secrets-operator/blob/main/chart/crds/secrets.hashicorp.com_vaultstaticsecrets.yaml
2 | # https://github.com/hashicorp/vault-secrets-operator/tree/main/config/samples
3 | # https://developer.hashicorp.com/vault/docs/platform/k8s/vso/api-reference#vaultstaticsecretspec
4 | # kubectl apply -f vaultstaticsecret-hello.yaml
5 | apiVersion: secrets.hashicorp.com/v1beta1
6 | kind: VaultStaticSecret
7 | metadata:
8 | name: vso-staticsecret-hello
9 | spec:
10 | vaultAuthRef: vso-jwt-auth #$vso_ns/vso-jwt-auth
11 | mount: secret
12 | type: kv-v2
13 | path: webapp/hello
14 | refreshAfter: 10s
15 | hmacSecretData: true
16 | rolloutRestartTargets:
17 | - kind: Deployment
18 | name: web-hello
19 | destination:
20 | create: true
21 | name: hello-secret
22 |
--------------------------------------------------------------------------------
/socat/curl_client_calls.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # uses curl to test socat HTTPS web server
4 | #
5 | # blog: https://fabianlee.org/2022/10/26/linux-socat-used-as-secure-https-web-server/
6 | #
7 |
8 | [[ -x $(which curl) ]] || sudo apt install curl -y
9 |
10 | FQDN="${1:-mysocat.local}"
11 | PORT="${2:-9443}"
12 |
13 | cacert="${FQDN}.crt"
14 | resolvestr="${FQDN}:${PORT}:127.0.0.1"
15 |
16 | set -x
17 | curl --cacert $cacert --resolve $resolvestr https://${FQDN}:${PORT}/
18 |
19 | # do POST with x-www-form-urlencoded parameters
20 | curl --cacert $cacert --resolve $resolvestr -X POST -d 'foo=bar&email=me@mydomain.com' https://${FQDN}:${PORT}/test
21 |
22 | # do file POST
23 | [[ -f toupload.txt ]] || echo "this is going up">toupload.txt
24 | curl --cacert $cacert --resolve $resolvestr -X POST -F 'image=@toupload.txt' https://${FQDN}:${PORT}/test
25 |
--------------------------------------------------------------------------------
/jekyll/install_jekyll_via_rvm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo apt update
4 | sudo apt update sudo apt install -y curl gpg g++ gcc autoconf automake bison libc6-dev libffi-dev libgdbm-dev libncurses5-dev libsqlite3-dev libtool libyaml-dev make pkg-config sqlite3 zlib1g-dev libgmp-dev libreadline-dev libssl-dev
5 |
6 | # install GPG keys to packages
7 | curl -sSL https://rvm.io/mpapis.asc | gpg --import -
8 | curl -sSL https://rvm.io/pkuczynski.asc | gpg --import -
9 |
10 | # install rvm and source the env
11 | curl -sSL https://get.rvm.io | bash -s stable
12 | source ~/.rvm/scripts/rvm
13 |
14 | # use rvm to install ruby and set default ruby 3.x
15 | rvm install ruby-3.1.0
16 | rvm --default use ruby-3.1.0
17 |
18 | # check versions (should be 3.1.x+)
19 | ruby --version
20 | gem --version
21 |
22 | # install jekyll
23 | gem install jekyll
24 | jekyll --version
25 |
--------------------------------------------------------------------------------
/python/json-to-dot2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # minor modifications to make python3 safe
4 | # https://stackoverflow.com/questions/33489209/print-unique-json-keys-in-dot-notation-using-python
5 |
6 | import json
7 | import sys
8 |
9 | json_data = json.load(sys.stdin)
10 |
11 | def walk_keys(obj, path = ""):
12 | if isinstance(obj, dict):
13 | for k, v in obj.items(): # iteritems is py2 only
14 | for r in walk_keys(v, path + "." + k if path else k):
15 | yield r
16 | elif isinstance(obj, list):
17 | for i, v in enumerate(obj):
18 | s = ""
19 | for r in walk_keys(v, path if path else s):
20 | yield r
21 | else:
22 | yield path
23 |
24 | all_keys = list(set(walk_keys(json_data)))
25 |
26 | print('\n'.join([str(x) for x in sorted(all_keys)]))
27 |
28 |
--------------------------------------------------------------------------------
/terraform/yaml_contribution_model/map-external.tf:
--------------------------------------------------------------------------------
1 | #
2 | # example of pulling external map
3 | #
4 |
5 | locals {
6 | datacenter_base_map = {
7 | usne1 = { name="US northeast1",resources=99 },
8 | usw1 = { name="US west1",resources=32 }
9 | }
10 |
11 | datacenter_ext_yaml = fileexists("${path.module}/external-dc.yaml") ? yamldecode(file("${path.module}/external-dc.yaml")):{ datacenters={} }
12 |
13 | datacenter_data_merge = merge( local.datacenter_base_map, local.datacenter_ext_yaml.datacenters )
14 | }
15 |
16 | # for data-driven resources
17 | resource "local_file" "dc" {
18 | for_each = local.datacenter_data_merge
19 | content = "${each.value.name} with resources ${each.value.resources}"
20 | filename = "${path.module}/${each.key}.txt"
21 | }
22 |
23 | output "show_data_datacenter" {
24 | value = local.datacenter_data_merge
25 | }
26 |
--------------------------------------------------------------------------------
/vagrant/gpg1604/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | #
4 | #
5 | vmname = 'gpg1604'
6 | boxname = 'ubuntu/xenial64'
7 |
8 | Vagrant.configure(2) do |config|
9 | config.vm.hostname = "#{vmname}"
10 | config.vm.box = "#{boxname}"
11 | config.vm.network "private_network", type: "dhcp"
12 |
13 | config.vm.provider "virtualbox" do |v|
14 | v.name = "#{vmname}"
15 | v.customize ["modifyvm", :id, "--memory","1024" ]
16 | v.customize ["modifyvm", :id, "--cpus","1" ]
17 | end
18 |
19 | config.vm.provision "shell", path: "gpgsetup.sh", privileged: false
20 | config.vm.provision "shell", path: "createsecret.sh", privileged: false
21 | config.vm.provision "shell", path: "fordevelopers.sh", privileged: false
22 |
23 | config.vm.provision "shell", inline: <<-SHELL
24 |
25 | echo "SUCCESS!"
26 |
27 | SHELL
28 |
29 | end
30 |
--------------------------------------------------------------------------------
/golang/echoservice/systemd/echoservice.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Echo service
3 | ConditionPathExists=/home/ubuntu/work/src/echoservice/echoservice
4 | After=network.target
5 |
6 | [Service]
7 | Type=simple
8 | User=echoservice
9 | Group=echoservice
10 | LimitNOFILE=1024
11 |
12 | Restart=on-failure
13 | RestartSec=10
14 | startLimitIntervalSec=60
15 |
16 | WorkingDirectory=/home/ubuntu/work/src/echoservice
17 | ExecStart=/home/ubuntu/work/src/echoservice/echoservice
18 |
19 | # make sure log directory exists and owned by syslog
20 | PermissionsStartOnly=true
21 | ExecStartPre=/bin/mkdir -p /var/log/echoservice
22 | ExecStartPre=/bin/chown syslog:adm /var/log/echoservice
23 | ExecStartPre=/bin/chmod 755 /var/log/echoservice
24 | StandardOutput=syslog
25 | StandardError=syslog
26 | SyslogIdentifier=echoservice
27 |
28 | [Install]
29 | WantedBy=multi-user.target
30 |
--------------------------------------------------------------------------------
/ansible/split-string-with-tertiary.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Shows how to do conditional (tertiary) with string split
3 | #
4 | # ansible-playbook split-string-with-tertiary.yml --connection=local
5 | #
6 | ---
7 | - hosts: all
8 | become: no
9 | gather_facts: no
10 | connection: local
11 |
12 | vars:
13 | vmdk: "subfolder/my-asldj.vmdk"
14 | #vmdk: "/subfolder/my-asldj.vmdk"
15 |
16 | tasks:
17 |
18 | # can handle optional leading forward slash
19 | - set_fact:
20 | just_parent: "{{ vmdk.split('/')[1] if vmdk.startswith('/') else vmdk.split('/')[0] }}"
21 | just_name: "{{ vmdk.split('/')[2] if vmdk.startswith('/') else vmdk.split('/')[1] }}"
22 |
23 | # shows just the name without starting folder path
24 | - debug:
25 | msg: "folder path is: {{ just_parent }}"
26 | - debug:
27 | msg: "file name is: {{ just_name }}"
28 |
--------------------------------------------------------------------------------
/vagrant/aptcacherng/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | vmname = 'aptcacherng'
5 | boxname = 'ubuntu/xenial64'
6 |
7 | Vagrant.configure(2) do |config|
8 | config.vm.hostname = "#{vmname}"
9 | config.vm.box = "#{boxname}"
10 | config.vm.network "public_network", ip: "192.168.2.125", bridge: "eth0"
11 | config.vm.box_check_update = false
12 |
13 | config.vm.provider "virtualbox" do |v|
14 | v.name = "#{vmname}"
15 | v.customize ["modifyvm", :id, "--memory","1024" ]
16 | v.customize ["modifyvm", :id, "--cpus","1" ]
17 | end
18 |
19 | config.vm.synced_folder "/home/fabian/Documents", "/Documents"
20 |
21 | config.vm.provision "shell", path: "aptcacherng.sh", privileged: false
22 |
23 | config.vm.provision "shell", inline: <<-SHELL
24 | #apt-get update -q
25 | echo "DONE" >> /tmp/done.txt
26 | SHELL
27 |
28 | end
29 |
--------------------------------------------------------------------------------
/zabbixlld/produce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # send back discovery key, list of all available array keys
4 | # for a discovery type of "Zabbix agent"
5 | cat << EOF
6 | { "data": [
7 | { "{#ITEMNAME}":"carrot" },
8 | { "{#ITEMNAME}":"banana" },
9 | { "{#ITEMNAME}":"lettuce" },
10 | { "{#ITEMNAME}":"tomato" }
11 | ]}
12 | EOF
13 |
14 | # now take advantage of this invocation to send back values
15 | # build up list of values in /tmp/zdata.txt
16 | agenthost="`hostname -f`"
17 | zserver="myzabbix"
18 | zport="10051"
19 |
20 | cat /dev/null > /tmp/zdata.txt
21 | for item in "carrot" "banana" "lettuce" "tomato"; do
22 | randNum="$(( (RANDOM % 30)+1 ))"
23 | echo $agenthost instock[$item] $randNum >> /tmp/zdata.txt
24 | done
25 |
26 | # push all these trapper values back to zabbix
27 | zabbix_sender -vv -z $zserver -p $zport -i /tmp/zdata.txt >> /tmp/zsender.log 2>&1
28 |
--------------------------------------------------------------------------------
/golang/sleepservice/systemd/sleepservice.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Sleep service
3 | ConditionPathExists=/home/ubuntu/work/src/sleepservice/sleepservice
4 | After=network.target
5 |
6 | [Service]
7 | Type=simple
8 | User=sleepservice
9 | Group=sleepservice
10 | LimitNOFILE=1024
11 |
12 | Restart=on-failure
13 | RestartSec=10
14 | startLimitIntervalSec=60
15 |
16 | WorkingDirectory=/home/ubuntu/work/src/sleepservice
17 | ExecStart=/home/ubuntu/work/src/sleepservice/sleepservice --name=foo
18 |
19 | # make sure log directory exists and owned by syslog
20 | PermissionsStartOnly=true
21 | ExecStartPre=/bin/mkdir -p /var/log/sleepservice
22 | ExecStartPre=/bin/chown syslog:adm /var/log/sleepservice
23 | ExecStartPre=/bin/chmod 755 /var/log/sleepservice
24 | StandardOutput=syslog
25 | StandardError=syslog
26 | SyslogIdentifier=sleepservice
27 |
28 | [Install]
29 | WantedBy=multi-user.target
30 |
--------------------------------------------------------------------------------
/ansible/playbook-ubuntu-cron.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that creates /etc/cron.d entry
3 | #
4 | # ansible-playbook playbook-ubuntu-cron.yml -l localhost
5 | #
6 | # blog: https://fabianlee.org/2021/06/17/ansible-creating-a-cron-d-file-for-periodic-system-jobs/
7 | #
8 | ---
9 | - hosts: all
10 | become: yes
11 | gather_facts: no
12 | connection: local
13 |
14 | tasks:
15 |
16 | - name: Places script for cron to run
17 | copy:
18 | src: "{{playbook_dir}}/from-inside-cron.sh"
19 | dest: /tmp/from-inside-cron.sh
20 | mode: 0755
21 |
22 | - name: Creates a cron file under /etc/cron.d
23 | cron:
24 | name: from-inside-cron
25 | day: "*"
26 | minute: "*"
27 | hour: "*"
28 | user: root
29 | job: "foo=bar test=this /tmp/from-inside-cron.sh"
30 | cron_file: from-inside-cron
31 | state: present
32 |
--------------------------------------------------------------------------------
/ansible/playbook-remove-symlink.yml:
--------------------------------------------------------------------------------
1 | # Example of checking for symlink and removing
2 | #
3 | # Supporting blog entry:
4 | # https://fabianlee.org/2021/03/02/ansible-deleting-a-file-path-but-only-if-a-symbolic-link/
5 | #
6 | # example:
7 | # ansible-playbook playbook-remove-symlink.yml --extra-vars "link_path=testlink"
8 | #
9 | ---
10 | - hosts: all
11 | become: no
12 | gather_facts: no
13 |
14 | vars:
15 | link_path: testlink
16 |
17 | tasks:
18 |
19 | - stat:
20 | path: "{{link_path}}"
21 | register: link
22 |
23 | - debug:
24 | msg: "does path {{link_path}} exist? {{link.stat.exists}} and is it link {{link.stat.islnk|default(false)}}"
25 |
26 | # only deletes if symbolic link
27 | - name: remove symlink
28 | file:
29 | path: "{{link_path}}"
30 | state: absent
31 | when: link.stat.islnk is defined and link.stat.islnk
32 |
33 |
--------------------------------------------------------------------------------
/vagrant/auditd/usersetup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # create alice user
4 | sudo useradd -u 20001 -m alice -s /bin/bash
5 |
6 | # set static password and ensure group definition added to /etc/group
7 | phash=$(openssl passwd -1 -salt mysalt alicepass)
8 | sudo usermod -p "$phash" alice
9 |
10 | # alice gets full sudo privileges
11 | echo "alice ALL=(ALL) ALL" | sudo tee /etc/sudoers.d/alice
12 |
13 | # create script that developers can use in sudo
14 | cat >/tmp/quicktest.sh <<'EOL'
15 | #!/bin/bash
16 | echo "$0 invoked as user '${SUDO_USER}' which is member of groups:"
17 | groups
18 | echo "script being run as user id $EUID"
19 | if [[ $EUID -ne 0 ]] ; then echo "UNEXPECTED!!! this script was expectd to be run as root/sudo" ; exit 1 ; fi
20 | grep alice /etc/shadow
21 | EOL
22 |
23 | # owned by root but executable by all
24 | sudo chown root:root /tmp/quicktest.sh
25 | sudo chmod ugo+r+x /tmp/quicktest.sh
26 |
27 |
--------------------------------------------------------------------------------
/python/json-to-dot.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # originally from https://techtldr.com/convert-json-to-dot-notation-with-python/
3 | # enhanced with ability to handle empty dict and array
4 | #
5 | # Takes json and produces dot notation paths
6 | #
7 | # If you need to convert YAML to json use 'yq eval - -o=json -P'
8 |
9 | import json
10 | import sys
11 |
12 | def getKeys(val, old=""):
13 | if isinstance(val, dict):
14 | if val:
15 | for k in val.keys():
16 | getKeys(val[k], old + "." + str(k))
17 | else:
18 | print("{} : {}".format(old,"{}"))
19 | elif isinstance(val, list):
20 | if val:
21 | for i,k in enumerate(val):
22 | getKeys(k, old + "." + str(i))
23 | else:
24 | print("{} : []".format(old,"{}"))
25 | else:
26 | print("{} : {}".format(old,str(val)))
27 |
28 | data=json.load(sys.stdin)
29 | getKeys(data)
30 |
--------------------------------------------------------------------------------
/keycloak/keycloak-patch.yaml:
--------------------------------------------------------------------------------
1 | # patch for quickstart keycloak deployment so we have extra files mounted
2 | spec:
3 | template:
4 | spec:
5 | volumes:
6 | - name: keycloak-hookvolume
7 | configMap:
8 | name: keycloak-configmap
9 | defaultMode: 0755
10 | containers:
11 | - name: keycloak
12 | lifecycle:
13 | postStart:
14 | exec:
15 | # lifecycle hook called right after container created, bash script has built-in delay
16 | command: ["/bin/bash","-c","cd /opt/keycloak/bin; ./poststart.sh > /tmp/poststart.log"]
17 | volumeMounts:
18 | - mountPath: /opt/keycloak/bin/poststart.sh
19 | subPath: poststart.sh
20 | name: keycloak-hookvolume
21 | - mountPath: /tmp/myclient.exported.json
22 | subPath: myclient.exported.json
23 | name: keycloak-hookvolume
24 |
--------------------------------------------------------------------------------
/pandas/flatten_relational/fetch_relational_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # blog: https://fabianlee.org/2021/12/09/python-flattening-relational-data-with-pandas/
4 | #
5 |
6 | echo "Fetching relevant csv datafiles from Microsoft AdventureWorks relational database..."
7 |
8 | for f in StateProvince.csv SalesOrderHeader.csv Address.csv CreditCard.csv instawdb.sql; do
9 | [ -f $f ] || wget -q https://github.com/microsoft/sql-server-samples/raw/master/samples/databases/adventure-works/oltp-install-script/$f
10 | done
11 |
12 | # some files are in utf-16 with multi-char separator
13 | # they would be cleaned like this
14 | #iconv -f utf-16 -t utf-8 BusinessEntity.csv -o BusinessEntity8.csv
15 | #cat BusinessEntity8.csv | tr -d "+" | sed 's/&\|$//' > BusinessEntity-clean.csv
16 |
17 | # convert from utf-16 to utf-8 for proper parsing by pandas
18 | iconv -f utf-16 -t utf-8 StateProvince.csv -o StateProvince8.csv
19 |
20 | ls -l *.csv
21 |
--------------------------------------------------------------------------------
/ansible/playbook-block-loop.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that does loop on block (which is not natively supported syntax of Ansible)
3 | #
4 | # ansible-playbook playbook-block-loop.yml -l localhost
5 | #
6 | # blog: https://fabianlee.org/2021/06/18/ansible-implementing-a-looping-block-using-include_tasks/
7 | #
8 | ---
9 | - hosts: all
10 | become: no
11 | gather_facts: no
12 | connection: local
13 |
14 | vars_prompt:
15 | - name: do_block_logic
16 | prompt: "do_block_logic (true|false)"
17 | private: no
18 |
19 | tasks:
20 |
21 | - name: simple block with conditional
22 | block:
23 | - name: simple block task1
24 | debug: msg="hello"
25 | - name: simple block task2
26 | debug: msg="world"
27 | when: do_block_logic|bool
28 |
29 | - name: calling block with conditional on loop
30 | include_tasks: conditional_block.yml
31 | loop: ['world','universe']
32 |
33 |
34 |
--------------------------------------------------------------------------------
/k8s/ndots-glibc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | namespace: default
5 | name: glibc-test
6 | spec:
7 |
8 | containers:
9 | - name: glibc-test
10 | image: busybox:1.35.0-glibc
11 | args:
12 | - /bin/sh
13 | - -c
14 | - touch /tmp/healthy; date; echo initializing...; sleep 30d
15 | livenessProbe:
16 | exec:
17 | command:
18 | - cat
19 | - /tmp/healthy
20 | initialDelaySeconds: 5
21 | periodSeconds: 10
22 | readinessProbe:
23 | exec:
24 | command:
25 | - cat
26 | - /tmp/healthy
27 | initialDelaySeconds: 5
28 | periodSeconds: 10
29 | #dnsPolicy: ClusterFirst # this is the default if no values provided
30 | #dnsPolicy: Default # uses node upstream
31 | dnsConfig:
32 | options:
33 | - name: ndots
34 | value: "1" # will be tried as absolute if at least 1 period
35 |
--------------------------------------------------------------------------------
/haproxy/haproxytest.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Purpose: restarts HAProxy service on sysV and Systemd on Ubuntu
4 |
5 |
6 | if [ "$#" -ne 2 ]; then
7 | echo Usage: restart\|reload\|reload-socket timeDelay
8 | exit 1
9 | fi
10 |
11 | dist=`lsb_release -sc`
12 | echo dist is $dist
13 |
14 | echo Going to $1 every $2 seconds...
15 | while true;
16 | do sleep $2
17 | if [ "$dist" == "trusty" ]; then
18 | service haproxy $1
19 | echo done with haproxy $1 using sysv, again in $2 seconds...
20 | elif [ "$dist" == "xenial" ]; then
21 | # we always do 'restart', have to change service file and haproxy.cfg to
22 | # change behavior to legacy reload or socket reload
23 | systemctl $1 haproxy.service
24 | echo done with haproxy.service $1 using systemd, again in $2 seconds...
25 | else
26 | echo this script is meant for trusty/xenial to determine whether sysv or systemd is in place. Modify accordingly
27 | fi
28 | done
29 |
--------------------------------------------------------------------------------
/k8s/ndots-musl.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | namespace: default
5 | name: musl-test
6 | spec:
7 |
8 | containers:
9 | - name: musl-test
10 | image: giantswarm/tiny-tools:3.12
11 | args:
12 | - /bin/sh
13 | - -c
14 | - touch /tmp/healthy; date; echo initializing...; sleep 30d
15 | livenessProbe:
16 | exec:
17 | command:
18 | - cat
19 | - /tmp/healthy
20 | initialDelaySeconds: 5
21 | periodSeconds: 10
22 | readinessProbe:
23 | exec:
24 | command:
25 | - cat
26 | - /tmp/healthy
27 | initialDelaySeconds: 5
28 | periodSeconds: 10
29 | #dnsPolicy: ClusterFirst # this is the default if no values provided
30 | #dnsPolicy: Default # uses node upstream
31 | dnsConfig:
32 | options:
33 | - name: ndots
34 | value: "1" # will be tried as absolute if at least 1 period
35 |
--------------------------------------------------------------------------------
/vault/vso/web-hello.yaml:
--------------------------------------------------------------------------------
1 | # kubectl create secret generic hello-secret --from-literal="greeting=My Secret"
2 | # kubectl apply -f web-hello.yaml
3 | # kubectl exec -it deployment/web-hello -n default -- wget -q http://localhost:8080 -O-
4 | ---
5 | apiVersion: apps/v1
6 | kind: Deployment
7 | metadata:
8 | labels:
9 | app: web-hello
10 | name: web-hello
11 | namespace: default
12 | spec:
13 | replicas: 1
14 | selector:
15 | matchLabels:
16 | app: web-hello
17 | template:
18 | metadata:
19 | labels:
20 | app: web-hello
21 | spec:
22 | containers:
23 | - image: ghcr.io/fabianlee/docker-golang-hello-world-web:1.0.2
24 | imagePullPolicy: IfNotPresent
25 | name: hello-app
26 | env:
27 | # create environment variable from secret
28 | - name: GREETING
29 | valueFrom:
30 | secretKeyRef:
31 | name: hello-secret
32 | key: greeting
33 |
--------------------------------------------------------------------------------
/ansible/install_dependencies.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # get latest ansible role for installing microk8s
4 | - hosts: localhost
5 | become: no
6 | tasks:
7 |
8 | # https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters_ipaddr.html
9 | # still required for 'ansible.netcommon' collection to work
10 | - name: install local packages needed for ipaddr jinja2 filter
11 | delegate_to: localhost
12 | become: yes
13 | apt:
14 | pkg:
15 | - python-netaddr
16 | - python3-netaddr
17 | state: present
18 |
19 | # https://galaxy.ansible.com/ansible/netcommon
20 | - name: get ansible.netcommon collection from ansible galaxy, be patient can take 5 minutes
21 | command:
22 | cmd: ansible-galaxy collection install ansible.netcommon:2.2.0
23 | # -p .
24 | #creates: "{{playbook_dir}}/../collections/ansible_collections/ansible/netcommon"
25 | #chdir: "{{playbook_dir}}/../collections"
26 |
--------------------------------------------------------------------------------
/ansible/playbook-list-union.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that tests the union of lists
3 | #
4 | # ansible-playbook playbook-list-union.yml -l localhost
5 | #
6 | # blog:
7 | #
8 | # https://www.tailored.cloud/devops/how-to-filter-and-map-lists-in-ansible/
9 | # https://ansiblemaster.wordpress.com/2017/02/24/operating-loop-results-with-dict-or-array/
10 | ---
11 | - hosts: all
12 | become: no
13 | gather_facts: no
14 | connection: local
15 |
16 | vars:
17 | list_mammals:
18 | - cow
19 | - dog
20 | list_mammals_also:
21 | - dolphin
22 | list_reptiles:
23 | - snake
24 | - crocodile
25 |
26 | tasks:
27 |
28 | - name: two variable lists, union with plus sign
29 | command:
30 | cmd: echo {{item}}
31 | loop: "{{list_mammals}} + {{list_mammals_also}}"
32 |
33 | - name: one variable list, another hardcoded
34 | command:
35 | cmd: echo {{item}}
36 | loop: "{{list_mammals}} + ['squirrel']"
37 |
--------------------------------------------------------------------------------
/vagrant/gpg1604/fordevelopers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # create script that developers can use in sudo
4 | cat >/tmp/developersDecrypt.sh <<'EOL'
5 | #!/bin/bash
6 | echo "$0 invoked as user '${SUDO_USER}' which is member of groups:"
7 | groups
8 | echo "script being run as user id $EUID"
9 | if [[ $EUID -ne 0 ]] ; then echo "EXPECT ERROR!!! this script must be run as root/sudo" ; exit 1 ; fi
10 | gpg --homedir /etc/deployerkeys --list-keys
11 | echo "NOTE: This decrypted secret would normally be silently passed to another process, and not ever shown or available to the user. But for purposes of example, here it is:"
12 | gpg --homedir /etc/deployerkeys -qd /tmp/test.crypt
13 | echo
14 | EOL
15 |
16 | # owned by root but executable by all
17 | sudo chown root:root /tmp/developersDecrypt.sh
18 | sudo chmod ugo+r+x /tmp/developersDecrypt.sh
19 |
20 | # now create another script that 'developers' cannot execute via sudo
21 | sudo cp /tmp/developersDecrypt.sh /tmp/developersNotAllowed.sh
22 |
23 |
--------------------------------------------------------------------------------
/terraform/templatefile_test/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | params = [ "a", "b", "c"]
3 | params_for_inline_command = join(" ",local.params)
4 | }
5 |
6 | # for local ssh connection
7 | variable user { default="" }
8 | variable password { default="" }
9 |
10 | resource "null_resource" "test" {
11 |
12 | # local ssh connection, make sure to pass variables 'user' andd 'password' on cli
13 | # terraform apply -var user=$USER -var password=xxxxxxx
14 | connection {
15 | type="ssh"
16 | agent="false"
17 | host="localhost"
18 | user=var.user
19 | password=var.password
20 | }
21 |
22 | provisioner "file" {
23 | destination = "/tmp/script.sh"
24 | content = templatefile(
25 | "${path.module}/script.sh.tpl",
26 | {
27 | "params": local.params
28 | "params_for_inline_command" : local.params_for_inline_command
29 | }
30 | )
31 | }
32 |
33 | }
34 |
35 | output "run_this_script" {
36 | value="/bin/bash /tmp/script.sh"
37 | }
38 |
39 |
--------------------------------------------------------------------------------
/CF/which-cf-buildpack.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Determines exact buildpack details for CloudFoundry app
4 | #
5 | # Supporting blog: https://fabianlee.org/2020/10/20/cloudfoundry-determining-buildpack-used-by-application/
6 | #
7 | app="$1"
8 | if [ -z "$app" ]; then
9 | echo "ERROR You must provide a valid cf app name"
10 | exit 1
11 | fi
12 |
13 | cf apps | awk {'print $1'} | grep $app >/dev/null
14 | if [ $? -eq 1 ]; then
15 | echo "ERROR the source app name $app is not valid"
16 | exit 1
17 | fi
18 |
19 | guid=$(cf app $app --guid)
20 | echo "$app guid = $guid"
21 |
22 | bpackguid=$(cf curl /v2/apps/$guid/summary | jq .detected_buildpack_guid | tr -d '"')
23 | echo "buildpack guid = $bpackguid"
24 |
25 | # list all buildpacks
26 | # cf curl /v2/buildpacks
27 | bpackname=$(cf curl /v2/buildpacks/$bpackguid | jq .entity.name | tr -d '"')
28 | bpackfile=$(cf curl /v2/buildpacks/$bpackguid | jq .entity.filename | tr -d '"')
29 | echo "buildpack used by $app name/file = $bpackname/$bpackfile"
30 |
--------------------------------------------------------------------------------
/bash/awk_nth_match.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Uses awk to pull content after Nth match
4 | #
5 | # blog: https://fabianlee.org/2023/01/05/bash-awk-to-extract-nth-match-from-file-based-on-line-separator/
6 | #
7 |
8 | # create multiline string
9 | read -r -d '' herecontent < %s\n",key,val)
44 | }
45 | }
46 |
47 |
--------------------------------------------------------------------------------
/ansible/playbook-ipaddr.yaml:
--------------------------------------------------------------------------------
1 | # shows how to use jinja2 ipaddr filter from ansible, needs 'netaddr' pip or package
2 | # https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters_ipaddr.html
3 | #
4 | # prerequisite: install netaddr os package and galaxy collection:
5 | # ansible-playbook install_dependencies.yml
6 | ---
7 | - hosts: localhost
8 | gather_facts: true
9 |
10 | vars:
11 | - IP: 172.31.3.13/23
12 | - CIDR: 192.168.0.0/16
13 | - MAC: 1a:2b:3c:4d:5e:6f
14 | - PREFIX: 18
15 |
16 | tasks:
17 |
18 | - debug: msg="___ {{ IP }} ___ ADDRESS {{ IP | ipaddr('address') }}"
19 | - debug: msg="___ {{ IP }} ___ BROADCAST {{ IP | ipaddr('broadcast') }}"
20 | - debug: msg="___ {{ IP }} ___ NETMASK {{ IP | ipaddr('netmask') }}"
21 | - debug: msg="___ {{ IP }} ___ NETWORK {{ IP | ipaddr('network') }}"
22 | - debug: msg="___ {{ IP }} ___ PREFIX {{ IP | ipaddr('prefix') }}"
23 | - debug: msg="___ {{ IP }} ___ SIZE {{ IP | ipaddr('size') }}"
24 |
25 |
26 | - debug: msg="first 3 octets of 192.168.1.114 = {{ "192.168.1.114".split('.')[0:3] | join('.') }}.xx"
27 |
--------------------------------------------------------------------------------
/haproxy/ubuntu1604/switchhaproxy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Purpose: Changes values in /etc/haproxy/haproxy.cfg and /etc/default/haproxy
4 | # so that Systemd can do legacy reload versus seamless socket transfer
5 |
6 |
7 | if [ "$#" -ne 1 ]; then
8 | echo Usage: reload\|reload-socket
9 | exit 1
10 | fi
11 |
12 | # make all lines relevant lines uncommented so double commenting does not happen
13 | sed -i '/mode 660 level admin/s/^#//' /etc/haproxy/haproxy.cfg
14 | sed -i '/mode 777 level admin expose-fd/s/^#//' /etc/haproxy/haproxy.cfg
15 | sed -i '/RELOADOPTS/s/^#//' /etc/default/haproxy
16 |
17 | echo Changing configs so that Systemd does $1
18 | if [ "$1" == "reload" ]; then
19 | # add comment to line
20 | sed -i '/mode 777 level admin expose-fd/s/^/#/g' /etc/haproxy/haproxy.cfg
21 | sed -i '/RELOADOPTS/s/^/#/g' /etc/default/haproxy
22 |
23 | elif [ "$1" == "reload-socket" ]; then
24 |
25 | # add comment to line
26 | sed -i '/mode 660 level admin/s/^/#/g' /etc/haproxy/haproxy.cfg
27 |
28 | else
29 | echo Did not recognize mode. Only 'reload' and 'reload-socket' are valid values
30 | fi
31 |
32 |
33 |
--------------------------------------------------------------------------------
/ansible/lineinfileyml/test-lineinfile.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Playbook for testing lineinfile with regex
3 | hosts: localhost
4 | tasks:
5 | - name: Replace values in yml file
6 | lineinfile:
7 | backup: no
8 | backrefs: yes
9 | state: present
10 | path: ./my.yml
11 | regexp: '^(\s*)[#]?{{ item.search }}(: )*'
12 | line: '\1{{ item.replace }}'
13 | with_items:
14 | - { search: 'key1', replace: 'key1: NEWvalue1' }
15 | - { search: 'key2', replace: 'key2: NEWvalue2' }
16 | - { search: '\- elephant', replace: '- heffalump' }
17 | - { search: '\- www.ansible.com', replace: '- www.redhat.com' }
18 |
19 | - name: add item to yml file at correct indentation level
20 | lineinfile:
21 | backup: no
22 | backrefs: yes
23 | state: present
24 | path: ./my.yml
25 | regexp: '^(\s*)[#]?{{ item.search }}(.*)'
26 | line: '\1{{ item.search }}\2\n\1{{ item.add }}'
27 | with_items:
28 | - { search: 'key2', add: 'key3: INSvalue3' }
29 | - { search: '- cat', add: '- deer' }
30 |
--------------------------------------------------------------------------------
/ansible/lineinfile/test-lineinfile.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Playbook for testing lineinfile with regex
3 | hosts: localhost
4 | tasks:
5 | - name: Replace values in config file
6 | lineinfile:
7 | backup: no
8 | state: present
9 | path: ./key-value.cfg
10 | regexp: '^[# ]*{{ item.search }}\s*=\s*'
11 | line: '{{ item.replace }}'
12 | with_items:
13 | - { search: 'thiskey1', replace: 'thiskey1 = NEWthisval1' }
14 | - { search: 'thiskey2', replace: 'thiskey2 = NEWthisval2' }
15 | - { search: 'thiskey3', replace: 'thiskey3 = NEWthisval3' }
16 | - { search: 'thiskey4', replace: 'thiskey4 = NEWthisval4' }
17 | - { search: 'thiskey5', replace: 'thiskey5 = NEWthisval5' }
18 | - { search: 'thiskey6', replace: 'thiskey6 = NEWthisval6' }
19 | # keys that do not exist get created
20 | - { search: 'thiskey7', replace: 'thiskey7 = INSthisnewval7' }
21 |
22 | # - name: Show new file contents
23 | # command: "/bin/cat ./key-value.cfg"
24 | # register: details
25 | #
26 | # - debug: msg="{{ details.stdout_lines }}"
27 |
--------------------------------------------------------------------------------
/kubectl/kustomize-delete/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # example showing how delete patches can remove resources
2 | # using delete patch from either file or inline
3 | ---
4 | apiVersion: kustomize.config.k8s.io/v1beta1
5 | kind: Kustomization
6 |
7 | resources:
8 | - myns.yaml
9 | - mydeployment.yaml
10 |
11 | configMapGenerator:
12 | - name: myconfigmap
13 | namespace: default
14 | literals:
15 | - foo=bar
16 | generatorOptions:
17 | disableNameSuffixHash: true
18 |
19 | patches:
20 |
21 | # OPTION #1 delete with patch from file
22 | # - delete-ns.yaml
23 | # - delete-configmap.yaml
24 | # - delete-deployment.yaml
25 |
26 | # OPTION #2 delete with inline patch
27 | # - |-
28 | # $patch: delete
29 | # apiVersion: v1
30 | # kind: Namespace
31 | # metadata:
32 | # name: myns
33 | # - |-
34 | # $patch: delete
35 | # apiVersion: v1
36 | # kind: ConfigMap
37 | # metadata:
38 | # name: myconfigmap
39 | # namespace: default
40 | # - |-
41 | # $patch: delete
42 | # apiVersion: apps/v1
43 | # kind: Deployment
44 | # metadata:
45 | # name: mydeployment
46 | # namespace: default
47 |
--------------------------------------------------------------------------------
/python-jsonpath/squad.json:
--------------------------------------------------------------------------------
1 | {
2 | "squadName": "Super hero squad",
3 | "homeTown": "Metro City",
4 | "formed": 2016,
5 | "secretBase": "Super tower",
6 | "active": true,
7 | "members": [
8 | {
9 | "name": "Molecule Man",
10 | "age": 29,
11 | "secretIdentity": "Dan Jukes",
12 | "powers": [
13 | "Radiation resistance",
14 | "Turning tiny",
15 | "Radiation blast"
16 | ]
17 | },
18 | {
19 | "name": "Madame Uppercut",
20 | "age": 39,
21 | "secretIdentity": "Jane Wilson",
22 | "powers": [
23 | "Million tonne punch",
24 | "Damage resistance",
25 | "Superhuman reflexes"
26 | ],
27 | "aliases": [ "Lady Destructo", "Little Red" ]
28 | },
29 | {
30 | "name": "Eternal Flame",
31 | "age": 1000000,
32 | "secretIdentity": "Unknown",
33 | "powers": [
34 | "Immortality",
35 | "Heat Immunity",
36 | "Inferno",
37 | "Teleportation",
38 | "Interdimensional travel"
39 | ],
40 | "aliases": [ "Power Flame", "Red Roaster" ]
41 | }
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 fabianlee
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/bash/pull_and_parse_certs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Pulls all PEM certs out of yaml file
4 | # then examines properties using openssl
5 |
6 | server_name="$1"
7 | server_ip="$2"
8 | if [[ -z "$server_name" ]]; then
9 | echo "usage: serverFQDN [serverIP]"
10 | exit 3
11 | fi
12 | [[ -n "$server_ip" ]] || server_ip="$server_name"
13 |
14 | echo | openssl s_client -showcerts -servername $server_name -connect $server_ip:443 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > $1.crt
15 |
16 | # pull out multiline sections, remove leading spaces
17 | sed -ne '/-----BEGIN CERTIFICATE-----/,/-----END CERTIFICATE-----/p' $1.crt | sed 's/^\s*//' > allcerts.pem
18 |
19 | # count how many certs were pulled
20 | certcount=$(grep -e "-----BEGIN CERTIFICATE-----" allcerts.pem | wc -l)
21 |
22 | # pull each cert individually, use openssl to show critical properties
23 | for index in $(seq 1 $certcount); do
24 | echo "==== cert $index"
25 | awk "/-----BEGIN CERTIFICATE-----/{i++}i==$index" allcerts.pem > $index.crt
26 | openssl x509 -in $index.crt -text -noout | grep -E "Subject:|Not After :|DNS:"
27 | rm $index.crt
28 | done
29 |
30 | rm allcerts.pem
31 |
--------------------------------------------------------------------------------
/haproxy/ab/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 | config.vm.box = "ubuntu/trusty64"
6 | config.vm.hostname = "ab"
7 | config.vm.network "public_network", ip: "192.168.2.122", bridge: "eth0"
8 |
9 | config.vm.provider "virtualbox" do |v|
10 | v.name = "ab"
11 | v.customize ["modifyvm", :id, "--memory","1024" ]
12 | v.customize ["modifyvm", :id, "--cpus","1" ]
13 | end
14 |
15 | #config.vm.synced_folder "/home/fabian/Documents", "/Documents"
16 | #config.vm.provision "shell", path: "ab.sh", privileged: true
17 |
18 | config.vm.provision "shell", inline: <<-SHELL
19 | apt-get update -q
20 |
21 | cd /usr/src
22 | wget http://archive.apache.org/dist/httpd/httpd-2.4.37.tar.gz
23 | tar xfz httpd*.tar.gz
24 | cd httpd*
25 |
26 | cp support/ab.c support/ab.c.old
27 | wget https://raw.githubusercontent.com/fabianlee/blogcode/master/haproxy/ab.c -O support/ab.c
28 |
29 | apt-get install libapr1-dev libaprutil1-dev libpcre3 libpcre3-dev -y
30 | ./configure
31 | make
32 |
33 | support/ab -V
34 |
35 | cp support/ab /usr/sbin/ab
36 |
37 | ab -V
38 |
39 | SHELL
40 |
41 | end
42 |
--------------------------------------------------------------------------------
/ansible/roles/filetreetest/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | # use 'with_filetree' lookup plugin (fully qualified is 'with_community.general.filetree')
2 | # to create deep directory structure of templates
3 | ---
4 |
5 | - set_fact:
6 | dest_dir: /tmp/filetreetest
7 |
8 | - name: Create directory structure
9 | file:
10 | path: '{{dest_dir}}/{{ item.path }}'
11 | state: directory
12 | mode: '{{ item.mode }}'
13 | #with_community.general.filetree:
14 | with_filetree: '{{role_path}}/templates/'
15 | loop_control:
16 | label: '{{item.path}}'
17 | when: item.state == 'directory'
18 |
19 | - name: show Template files
20 | debug:
21 | msg: 'dest={{dest_dir}}/{{ item.path }}'
22 | with_filetree: '{{role_path}}/templates/'
23 | loop_control:
24 | label: '{{item.path}}"'
25 | when: item.state == 'file'
26 |
27 | - name: Generate Template files with directory structure
28 | template:
29 | src: '{{ item.src }}'
30 | dest: '{{dest_dir}}/{{ item.path }}'
31 | mode: '{{ item.mode }}'
32 | #with_community.general.filetree
33 | with_filetree: '{{role_path}}/templates/'
34 | loop_control:
35 | label: '{{item.path}}'
36 | when: item.state == 'file'
37 |
--------------------------------------------------------------------------------
/sysprep/w2k12/Vagrantfile:
--------------------------------------------------------------------------------
1 |
2 | # for linked clones
3 | #Vagrant.require_version ">= 1.8
4 |
5 | vmname = 'clone1'
6 | boxname = 'w2k12base-sysprep-ready'
7 |
8 | Vagrant.configure(2) do |config|
9 | config.vm.hostname = "#{vmname}"
10 | config.vm.box = "#{boxname}"
11 |
12 | # must have for Windows to specify OS type
13 | config.vm.guest = :windows
14 |
15 | # if using host only network
16 | #config.vm.network 'private_network', type: 'dhcp'
17 |
18 | # winrm | ssh
19 | config.vm.communicator = "winrm"
20 | config.winrm.username = "vagrant"
21 | config.winrm.password = "vagrant"
22 | config.ssh.insert_key = false
23 |
24 | # virtualbox provider
25 | config.vm.provider "virtualbox" do |v|
26 | v.name = "#{vmname}"
27 | v.gui = true
28 | # use linked clone for faster spinup
29 | v.linked_clone = true
30 | v.customize ["modifyvm", :id, "--memory","1024" ]
31 | v.customize ["modifyvm", :id, "--cpus","1" ]
32 | # dynamically set properties that can be fetched inside guestOS
33 | v.customize ["guestproperty", "set", :id, "myid", :id ]
34 | v.customize ["guestproperty", "set", :id, "myname", "#{vmname}" ]
35 | end
36 |
37 | end
38 |
--------------------------------------------------------------------------------
/socat/start_socat_tls.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # uses socat to create HTTPS web server running on local port
4 | #
5 | # blog: https://fabianlee.org/2022/10/26/linux-socat-used-as-secure-https-web-server/
6 | #
7 |
8 | FQDN="${1:-mysocat.local}"
9 | PORT="${2:-9443}"
10 |
11 | [[ -x $(which socat) ]] || sudo apt install socat -y
12 | #which socat
13 | #if [[ $? -ne 0 ]]; then
14 | # sudo apt install socat -y
15 | #fi
16 |
17 | set -x
18 |
19 | [[ -f $FQDN.key ]] || openssl genrsa -out $FQDN.key 2048
20 | [[ -f $FQDN.crt ]] || openssl req -new -key $FQDN.key -x509 -days 3653 -out $FQDN.crt -subj "/C=US/ST=CA/L=SFO/O=myorg/CN=$FQDN"
21 | [[ -f $FQDN.pem ]] || cat $FQDN.key $FQDN.crt >$FQDN.pem
22 |
23 | chmod 600 $FQDN.key $FQDN.pem
24 | chmod 644 $FQDN.crt
25 |
26 | # shows HTTP protocol
27 | socat -v -ls OPENSSL-LISTEN:${PORT},reuseaddr,cert=${FQDN}.pem,verify=0,crlf,fork SYSTEM:"echo HTTP/1.0 200; echo Content-Type\: text/plain; echo; echo \"hello from $(hostname) at \$(date)\""
28 |
29 |
30 | # if we had wanted plain, insecure HTTP
31 | #socat -v -ls TCP-LISTEN:${PORT},reuseaddr,crlf,fork SYSTEM:"echo HTTP/1.0 200; echo Content-Type\: text/plain; echo; echo \"hello from $(hostname) at \$(date)\""
32 |
--------------------------------------------------------------------------------
/kubectl/kustomize-secret/my-tls.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDCzCCAfOgAwIBAgIUYquvjom/2hBxtsReBxdTyXUlWBkwDQYJKoZIhvcNAQEL
3 | BQAwFTETMBEGA1UEAwwKbXktdGxzLmNvbTAeFw0yMzA4MDUyMzM3MTBaFw0yNDA4
4 | MDQyMzM3MTBaMBUxEzARBgNVBAMMCm15LXRscy5jb20wggEiMA0GCSqGSIb3DQEB
5 | AQUAA4IBDwAwggEKAoIBAQCddiXx92uXW7qKJI2P3AzbvcbXpcSHe7psoe6+nQwW
6 | S0QlVo1b2fKZGoegUkjdSZ2u+VpkQSeBG//UStKZP807Yg4eEsRUXrJcJMSGmyv2
7 | K0EPt0VQerLf1rKqkA8oL7AgVno/ft06Q6nbQrWoZQIwEKHlFOHGtQvyU/EdHiQw
8 | nvg3J5aw806hkfWw0wACjnXVGC5T5H4Pb6gXmU3J38IGnwSD3p8ZGBmgaj0ciuMQ
9 | DCXicdfBRqBkc24zYHOc1/CSAju9Vs8gn8j/lscBUrBcvRtx6HeB/Y/Kkv3JSbAH
10 | PMSnRJO5yJk0Rjv0b5kPgkD7rN8WkVl7eRZ2uX7UnaP7AgMBAAGjUzBRMB0GA1Ud
11 | DgQWBBSZiuOuDxvhcXZjByx/yL4tHNNDmzAfBgNVHSMEGDAWgBSZiuOuDxvhcXZj
12 | Byx/yL4tHNNDmzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAU
13 | Q6rSUGv+wMIDYP0p6tzvHbMmoEyS5b+meRdRC2x5coQHYX7iW6civCyKFB2N8DFt
14 | YSul+WMSxHPv2irvXHG8urK6yavIlz+g0ylr6BWGpbhIERrShme/TPMMpJYCjPwD
15 | O3yie+26dWaEoWfPzKkIjWse7gfxRfboiy2WC6fIokgvdtlKmDLmSRC2SJAzk3dp
16 | yUWyQe3ph2HapgZQjJb6xErYxX6y7N9jl2eVSGLbIzzdNb1/oodSjEXPXLGs/9t9
17 | tJ+Dw5lRHd9fEWhVdnzee3y+ct366FbGYk3itAoHJqS6oZBobkGkNE/1YxkPyXOP
18 | CEAN7EjX3gLtzDYQpQqo
19 | -----END CERTIFICATE-----
20 |
--------------------------------------------------------------------------------
/k8s/kyverno/kyv-require-ns-label.yaml:
--------------------------------------------------------------------------------
1 | # originally from: https://medium.com/@glen.yu/why-i-prefer-kyverno-over-gatekeeper-for-native-kubernetes-policy-management-35a05bb94964
2 | # https://gist.githubusercontent.com/Neutrollized/7e261ddea3d26cf774344edbfd20e0de/raw/c76c56b56f897a4c7ac8bf240b0fe2a7805a0344/kyv-require-ns-label.yaml
3 | ---
4 | apiVersion: kyverno.io/v1
5 | kind: ClusterPolicy
6 | metadata:
7 | name: require-ns-label
8 | spec:
9 | validationFailureAction: Enforce
10 | rules:
11 | - name: require-ns-owner-label
12 | match:
13 | any:
14 | - resources:
15 | kinds:
16 | - Namespace
17 | validate:
18 | message: "You must have label `owner` with a value of set on all new namespaces."
19 | pattern:
20 | metadata:
21 | labels:
22 | owner: "?*"
23 | - name: require-ns-env-label
24 | match:
25 | any:
26 | - resources:
27 | kinds:
28 | - Namespace
29 | validate:
30 | message: "You must have label `env` with a value of `dev`, `stage`, or `prod` set on all new namespaces."
31 | pattern:
32 | metadata:
33 | labels:
34 | env: dev | staging | prod
35 |
--------------------------------------------------------------------------------
/winpscert/newSelfSignedCert.ps1:
--------------------------------------------------------------------------------
1 | # https://fabianlee.org/2019/09/08/powershell-creating-a-self-signed-certificate-using-powershell-without-makecert-or-iis/
2 | #requires -version 4
3 | #requires -runasadministrator
4 | param ([string] $cn=$null,[string] $pw=$null)
5 |
6 | # check for mandatory parameters
7 | if ( ! ($cn -and $pw)) {
8 | write-host "Usage: "
9 | write-host "Example: myserver.com Certp4ss!"
10 | exit 1
11 | }
12 |
13 | # for old versions of powershell, exit if not using at least PS4
14 | $pver=$PSVersionTable.PSVersion.Major
15 | write-host "powershell major version is " $pver
16 | if ($pver -lt 4) {
17 | write-host "ERROR, you need at least Powershell 4+ to run this script"
18 | exit 2
19 | }
20 |
21 | write-host "creating new self-signed cert for cn:" $cn
22 | $cert = New-SelfSignedCertificate -certstorelocation cert:\localmachine\my -dnsname $cn
23 |
24 | write-host "exporting cert"
25 | $pwd = ConvertTo-SecureString -String $pw -Force -AsPlainText
26 | $pspath = 'cert:\localMachine\my\'+$cert.thumbprint
27 | Export-PfxCertificate -cert $pspath -FilePath "c:\temp\${cn}.pfx" -Password $pwd
28 | write-host "`n`nexported cert: c:\temp\${cn}.pfx"
29 |
30 |
31 |
--------------------------------------------------------------------------------
/cve/meltdown-candidate/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 | config.vm.box = "ubuntu/xenial64"
6 | config.vm.hostname = "meltdown1"
7 | config.vm.network "private_network", type: "dhcp"
8 |
9 | config.vm.provider "virtualbox" do |v|
10 | v.name = "meltdown1"
11 | v.customize ["modifyvm", :id, "--memory","1024" ]
12 | v.customize ["modifyvm", :id, "--cpus","1" ]
13 | end
14 |
15 | config.vm.provision "file", source: "testMeltdown.sh", destination: "/home/ubuntu/testMeltdown.sh"
16 |
17 | config.vm.provision "shell", inline: <<-SHELL
18 | uname -r > /tmp/kernel-orig.txt
19 | sudo apt-get update -q
20 |
21 | sudo add-apt-repository ppa:canonical-kernel-team/pti -y
22 | sudo apt-get update -q
23 |
24 | sudo apt-cache search linux-headers-4.4.0-109-generic
25 | sudo apt-cache search linux-image-4.4.0-109-generic
26 |
27 | sudo apt-get install linux-headers-4.4.0-109-generic linux-image-4.4.0-109-generic -y
28 |
29 | sudo apt-get install git build-essential -y
30 | cd /home/vagrant
31 | git clone https://github.com/raphaelsc/Am-I-affected-by-Meltdown.git
32 | cd Am-I-affected-by-Meltdown
33 | make
34 |
35 | sudo init 6
36 |
37 | SHELL
38 |
39 | end
40 |
--------------------------------------------------------------------------------
/pandas/mariadb/load_sample_employees_database.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Load Employees sample database into MariaDB
4 | #
5 |
6 | echo "Loading example Employees database from ~/Downloads/test_db"
7 |
8 | pushd . > /dev/null
9 | cd ~/Downloads
10 | [ -d "test_db" ] || { echo "ERROR could not find ~/Downloads/test_db, have you run 'fetch_sample_employees_database.sh'?"; exit 3; }
11 |
12 | cd test_db
13 |
14 | path=$(which mysql)
15 | [ -n "$path" ] || { echo "ERROR could not find mysql client binary. Use: sudo apt install mariadb-client"; exit 3; }
16 |
17 | export dbIP=$(sudo docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mariadbtest)
18 | [ -n "$dbIP" ] || { echo "ERROR could not find MariaDB docker image named 'mariadbtest'"; exit 3; }
19 | echo "MariaDB listening on $dbIP:3306"
20 |
21 | mysql_prefix="mysql -h $dbIP -u root -pthepassword"
22 | if $mysql_prefix -e "show databases;" | grep -q "employees"; then
23 | echo "employees database already exists"
24 | else
25 | echo "need to create employees database"
26 | $mysql_prefix -e "create database employees;"
27 | echo ""
28 | echo "need to load employees data"
29 | $mysql_prefix -D employees < employees.sql
30 | fi
31 |
32 | popd > /dev/null
33 |
--------------------------------------------------------------------------------
/ansible/playbook-local-remote-content.yaml:
--------------------------------------------------------------------------------
1 | # Example showing retrieval of local and remote file content
2 | # blog: https://fabianlee.org/2021/05/25/ansible-creating-a-variable-from-a-remote-or-local-file-content/
3 | ---
4 |
5 | - hosts: all
6 | gather_facts: yes
7 |
8 | vars:
9 | local_path: local-README.md
10 | remote_path: /tmp/remote-README.md
11 |
12 | tasks:
13 |
14 | - name: get content of local file
15 | set_fact:
16 | readme_contents: "{{ lookup('file',playbook_dir + '/' + local_path) }}"
17 |
18 | - debug:
19 | msg: "content of local file {{local_path}}: {{readme_contents}}"
20 |
21 |
22 | - name: create emulated 'remote' file
23 | delegate_to: localhost
24 | copy:
25 | dest: "{{remote_path}}"
26 | mode: '0666'
27 | content: |
28 | This is remote content
29 |
30 | - name: get content of remote file
31 | slurp:
32 | src: "{{remote_path}}"
33 | register: remote_content_encoded
34 | - name: decode remote content
35 | set_fact:
36 | remote_content: "{{remote_content_encoded.content | b64decode}}"
37 |
38 | - debug:
39 | msg: "content of remote file {{remote_path}}: {{remote_content}}"
40 |
--------------------------------------------------------------------------------
/keycloak/apply.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Deploys Keycloak as a Daemonset in kubernetes cluster
4 |
5 | # Starts by deploying manifest from KeyCloak QuickStart
6 | # Then augments with volume mount of files and lifecycle hook that bootstraps at startup
7 |
8 | # create configmap that holds file content
9 | kubectl delete configmap keycloak-configmap
10 | kubectl create configmap keycloak-configmap --from-file=poststart.sh --from-file=myclient.exported.json
11 |
12 | # either pull quickstart manifest remotely OR use the one I copied down locally
13 | #curl -s https://raw.githubusercontent.com/keycloak/keycloak-quickstarts/latest/kubernetes-examples/keycloak.yaml | sed 's/type: LoadBalancer/type: ClusterIP/' | kubectl apply -f -
14 | cat keycloak.yaml | sed 's/type: LoadBalancer/type: ClusterIP/' | kubectl apply -f -
15 |
16 | echo "sleeping 3 seconds, then going to apply patch for volume bindings..."
17 | sleep 3
18 | kubectl patch deployment keycloak --type strategic --patch-file keycloak-patch.yaml
19 |
20 | # show OAuth2 client_id and client_secret
21 | # kubectl exec -it deployment/keycloak -n default -c keycloak -- cat /tmp/keycloak.properties
22 |
23 | # restart of deployment
24 | # kubectl rollout restart deployment/keycloak -n default
25 |
--------------------------------------------------------------------------------
/logstash-metadata.conf:
--------------------------------------------------------------------------------
1 | input {
2 | stdin { }
3 | }
4 |
5 | filter {
6 |
7 | # initialize metadata field used as flag
8 | mutate {
9 | add_field => { "[@metadata][foundtype]" => "" }
10 | }
11 |
12 | # try to match 'hello' looking messages
13 | if "" == [@metadata][foundtype] {
14 | grok {
15 | match => { "message" => "hello %{GREEDYDATA:[@metadata][myname]}" }
16 | break_on_match => false
17 | add_field => { "[@metadata][foundtype]" => "hellotype" }
18 | add_tag => [ "didhello" ]
19 | }
20 | }
21 |
22 | # try to match 'bye' looking messages
23 | if "" == [@metadata][foundtype] {
24 | grok {
25 | match => { "message" => "bye %{GREEDYDATA:[@metadata][myname]}" }
26 | break_on_match => false
27 | add_field => { "[@metadata][foundtype]" => "byetype" }
28 | add_tag => [ "didbye" ]
29 | }
30 | }
31 |
32 | # add description based on flag
33 | if !("" == [@metadata][foundtype]) {
34 | mutate {
35 | add_field => { "description" => "action performed by %{[@metadata][myname]}" }
36 | }
37 | }else {
38 | mutate {
39 | add_field => { "description" => "this was not a hello or bye message type" }
40 | }
41 | }
42 |
43 |
44 | } # filter
45 |
46 | output {
47 | stdout { codec => rubydebug }
48 | }
49 |
50 |
--------------------------------------------------------------------------------
/ansible/playbook-recursive-dirs.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that retrieves the recursive list of directories from a base path
3 | # 'with_filetree' would be a cleaner solution, but requires the community Galaxy module
4 | #
5 | # ansible-playbook playbook-recursive-dirs.yml -l localhost
6 | #
7 | # blog:
8 | #
9 | ---
10 | - hosts: all
11 | become: no
12 | gather_facts: no
13 | connection: local
14 |
15 | tasks:
16 |
17 | - set_fact:
18 | base_path: /tmp
19 |
20 | # undefine if you want the full path returned
21 | - set_fact:
22 | base_path_to_remove: "{{ base_path }}/"
23 |
24 | # retrieve directory entries recursively
25 | - find:
26 | paths: "{{ base_path }}"
27 | file_type: directory
28 | recurse: yes
29 | register: dir_item_list
30 | #- debug: var=dir_item_list
31 |
32 | # transform from item list to string path list
33 | - set_fact:
34 | dir_path_list: "{{ dir_item_list.files|map(attribute='path') | sort }}"
35 | # remove prefixed base path
36 | - set_fact:
37 | dir_path_list: "{{ dir_path_list | regex_replace(base_path_to_remove,'') }}"
38 | when: base_path_to_remove is defined
39 | # show final results
40 | - debug: var=dir_path_list
41 |
--------------------------------------------------------------------------------
/diagrams/ssh-x11forwarding.drawio:
--------------------------------------------------------------------------------
1 | 5Zhbb5swFMc/TR5bcQskj03arpNaqVMmrW+Rix2wajCynds+/Y7BEMBE3SXptvaFmL/v53eOfcjIn2e7TwIV6QPHhI08B+9G/vXI81zHmcKPVvaVEgVeJSSCYtPoICzod1L3NOqaYiI7DRXnTNGiK8Y8z0msOhoSgm+7zVacdWctUEIsYREjZqvfKFZppU7GzkG/IzRJ65lhx1VNhurGRpApwnzbkvybkT8XnKuqlO3mhGnj1Xap+t0eqW0WJkiufqZDGkTTLw9hGqX7i8f8/k7OpHdhRtkgtjYbNotV+9oCSBaVYVd0R2CsWUEEzYgiAjSYAkiQx4M0S1XGoMaFImI0yaEcwxLLusYGDrxgJFM9YPkiCxTTPPnKCxB8EGhWkql/r2mWwN4YfYYnihXdkCWmAlbGxR6khOSwhngpidgQcSk3CfQ1myNCkd1Rq7kNC3BiwmEbekDHdBjXpI3/NkNsW95gmqRtRwi8y0lo/ND4YNKMfsAEBUPqF6j574UaQ4XixTJmFOY6HTQ37EGr31vQpkPQ6uA9ObGxRezJhT04i9JdLXqwdaXNqwR/IXPOuKaW85y08DCy0m1WlLFeixZLbUYKp9mV6ZRRjPUks21KFVkAPj3jFs5u0ARf57hhu+K5MuexG9bvZonuKSB5r0MKByD552IUvZeoWsvmBOxDPAU3N+hym0YWNz96y+AKjwTXvDxWPmRwhU43uPy/HVyT14MLAqLQRQm+z8iVTt6O2LLF4AwO7vVvj6ltu8E7PzyX8epkuGO9kGkPxXQDxUSVW6+k51qQMsU69UYk43ldCwt4tnqIvgKtBkbuSLdExfCESaonXkIOvqLJHwwKYXvLxRYJDC6gAUDmf6TvPx7TzmlcMQh6rji2XdF13zKOXfuLofQA5+JppC/0EGXaMiUzfQ81SsUt6DUR4JpgVpMFdZp+EMReOOme1K59nbqRN3SfnguxZyEuP+61zUC+JvIFkvaPwsdKU4OBb4k3xWN//c3vP/8/3xIn4mJlOANp6FCG8xtc4PXwN01Z1/qzy7/5AQ==
--------------------------------------------------------------------------------
/ansible/playbook-pass-extra-vars.yml:
--------------------------------------------------------------------------------
1 | # Example of overriding boolean variable using extra-vars
2 | # Boolean MUST be passed as json
3 | # because key/value pair gets interpreted as string that always evaluates to true
4 | #
5 | # blog: https://fabianlee.org/2021/07/28/ansible-overriding-boolean-values-using-extra-vars-at-runtime/
6 | #
7 | # will correctly say myflag is true, because that is the default
8 | # ansible-playbook playbook-pass-extra-vars.yml
9 | #
10 | # will incorrectly say myflag is true
11 | # ansible-playbook playbook-pass-extra-vars.yml --extra-vars "myflag=false"
12 | #
13 | # will correctly say myflag is false
14 | # ansible-playbook playbook-pass-extra-vars.yml --extra-vars '{ "myflag": false }'
15 | ---
16 | - hosts: localhost
17 | connection: local
18 | become: no
19 | gather_facts: no
20 |
21 | # false by default, override with json
22 | vars:
23 | myflag: true
24 |
25 | tasks:
26 |
27 | - debug:
28 | msg: "myflag is true."
29 | when: myflag
30 |
31 | - debug:
32 | msg: "myflag is false. You must have used a boolean sent as json or empty value"
33 | when: not myflag
34 |
35 | - debug:
36 | msg: "myflag is false when using explicit boolean conversion"
37 | when: not myflag|bool
38 |
--------------------------------------------------------------------------------
/keycloak/keycloak.yaml:
--------------------------------------------------------------------------------
1 | # from quickstart:
2 | # https://raw.githubusercontent.com/keycloak/keycloak-quickstarts/latest/kubernetes-examples/keycloak.yaml
3 | ---
4 | apiVersion: v1
5 | kind: Service
6 | metadata:
7 | name: keycloak
8 | labels:
9 | app: keycloak
10 | spec:
11 | ports:
12 | - name: http
13 | port: 8080
14 | targetPort: 8080
15 | selector:
16 | app: keycloak
17 | type: LoadBalancer
18 | ---
19 | apiVersion: apps/v1
20 | kind: Deployment
21 | metadata:
22 | name: keycloak
23 | labels:
24 | app: keycloak
25 | spec:
26 | replicas: 1
27 | selector:
28 | matchLabels:
29 | app: keycloak
30 | template:
31 | metadata:
32 | labels:
33 | app: keycloak
34 | spec:
35 | containers:
36 | - name: keycloak
37 | image: quay.io/keycloak/keycloak:19.0.1
38 | args: ["start-dev"]
39 | env:
40 | - name: KEYCLOAK_ADMIN
41 | value: "admin"
42 | - name: KEYCLOAK_ADMIN_PASSWORD
43 | value: "admin"
44 | - name: KC_PROXY
45 | value: "edge"
46 | ports:
47 | - name: http
48 | containerPort: 8080
49 | readinessProbe:
50 | httpGet:
51 | path: /realms/master
52 | port: 8080
53 |
--------------------------------------------------------------------------------
/ansible/playbook-only-for-missing-files.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Playbook that takes action for files not found in list
3 | #
4 | # ansible-playbook playbook-only-for-missing-files.yml --connection=local
5 | #
6 | ---
7 | - hosts: all
8 | become: no
9 | gather_facts: no
10 | connection: local
11 |
12 | vars:
13 | filelist: ['a.txt','b.txt','c.txt']
14 |
15 | tasks:
16 |
17 | # remove all target files, then add only first one
18 | - file:
19 | path: "{{item}}"
20 | state: absent
21 | loop: "{{filelist}}"
22 | - file:
23 | path: "{{ filelist | first }}"
24 | state: touch
25 |
26 | - name: check for file existence
27 | stat:
28 | path: "{{item}}"
29 | loop: "{{filelist}}"
30 | register: filestats
31 | #- debug: msg="{{filestats}}"
32 |
33 | - name: take actions on non-existent files (should be b and c)
34 | debug:
35 | msg: I have determined that {{item.item}} does not exist, do something!
36 | loop_control:
37 | label: "{{item.item}}"
38 | when: not item.stat.exists
39 | loop: "{{filestats.results}}"
40 |
41 | # remove all files to prepare for next run
42 | - file:
43 | path: "{{item}}"
44 | state: absent
45 | loop: "{{filelist}}"
46 |
--------------------------------------------------------------------------------
/make/Makefile.envfile.exists:
--------------------------------------------------------------------------------
1 | # Example make file showing how env var and file existence
2 | # can be tested using make constructs
3 | #
4 | # This can be used to set values conditionally,
5 | # or preference for env var versus defaults
6 | #
7 | # run with neither env var or file existing:
8 | # make -f Makefile.envfile.exists
9 | #
10 | # run with env var existing, but not file it points to:
11 | # MY_FILE=fakefile.txt make -f Makefile.envfile.exists
12 | #
13 | # run with env var existing, and also file it points to
14 | # echo "test me" >> /tmp/tempfile
15 | # MY_FILE=/tmp/tempfile make -f Makefile.envfile.exists
16 |
17 | ENV_VARS := -e FOO=bar -e name=me
18 |
19 | # test for env var existence
20 | ifeq ($(origin MY_FILE),undefined)
21 | ENV_VAR_EXISTS = 0
22 | else
23 | ENV_VAR_EXISTS = 1
24 | endif
25 |
26 | # test for file existence
27 | ifneq ("$(wildcard $(MY_FILE))","")
28 | FILE_EXISTS = 1
29 | else
30 | FILE_EXISTS = 0
31 | endif
32 |
33 | # if file content exists, append to current var
34 | ifeq ($(FILE_EXISTS),1)
35 | ENV_VARS += -e FILE_CONTENTS="$(shell cat $$MY_FILE | sed 's/\n/ /' )"
36 | endif
37 |
38 |
39 | init:
40 | @echo "MY_FILE env var exists? $(ENV_VAR_EXISTS)"
41 | @echo "MY_FILE file content exists? $(FILE_EXISTS)"
42 | true $(ENV_VARS)
43 |
--------------------------------------------------------------------------------
/file-encoding/test-ascii-utf8-conversion.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # creates a text file with ascii7 encoding and then utf-8 with embedded unicode character
4 | #
5 | # Supporting blog: https://fabianlee.org/2020/11/18/bash-fixing-an-ascii-text-file-changed-with-unicode-character-sequences/
6 | #
7 |
8 | # remove any files from last run
9 | rm -f test-ascii.txt test-utf8.txt test-bom-utf8.txt
10 |
11 | # create two test files, one with utf8 encoding and Unicode
12 | printf 'Hello, World!' > test-ascii.txt
13 | printf 'Hello,\xE2\x98\xA0World!' > test-utf8.txt
14 | printf '\xEF\xBB\xBFHello,\xE2\x98\xA0World!' > test-bom-utf8.txt
15 |
16 | # show file encoding types
17 | echo "==file encoding==========================="
18 | file -bi test-ascii.txt
19 | file -bi test-utf8.txt
20 | file -bi test-bom-utf8.txt
21 |
22 | # show hex/ascii dump
23 | echo "==hex dumps==========================="
24 | cat test-ascii.txt | hexdump -C
25 | cat test-utf8.txt | hexdump -C
26 | cat test-bom-utf8.txt | hexdump -C
27 |
28 | # do conversion from utf-8 to ascii, throw away unicode sequences
29 | echo "==convert to ASCII==========================="
30 | iconv -f UTF-8 -t ASCII//IGNORE test-utf8.txt 2>/dev/null > test-utf8-to-ascii.txt
31 | file -bi test-utf8-to-ascii.txt
32 | cat test-utf8-to-ascii.txt | hexdump -C
33 |
--------------------------------------------------------------------------------
/k8s/keda/keda-scaledobject-pubsub.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: keda.sh/v1alpha1
2 | kind: TriggerAuthentication
3 | metadata:
4 | name: keda-workload-identity-auth
5 | spec:
6 | podIdentity:
7 | provider: gcp
8 | ---
9 | apiVersion: keda.sh/v1alpha1
10 | kind: ScaledObject
11 | metadata:
12 | name: pubsub-scaledobject
13 | spec:
14 | pollingInterval: 10 # seconds, too fast for real deployments
15 | cooldownPeriod: 10 # seconds, too fast for real deployments
16 | maxReplicaCount: 5
17 | minReplicaCount: 0
18 | scaleTargetRef:
19 | name: golang-hello-world-web-scaled-pubsub
20 | triggers:
21 | - type: gcp-pubsub
22 | authenticationRef:
23 | name: keda-workload-identity-auth
24 | metadata:
25 | subscriptionName: my-sub # "projects//subscriptions/"
26 | #topicName: my-topic
27 | mode: "SubscriptionSize" # Optional - Default is SubscriptionSize - SubscriptionSize or OldestUnackedMessageAge
28 | aggregation: "sum" # Optional - Only meaningful for distribution-valued metrics
29 | value: "5" # Optional - Default is 10
30 | valueIfNull: '1.0' # Optional - Default is ""
31 | activationValue: "10" # Optional - Default is 0
32 | timeHorizon: "1m" # Optional - Default is 2m and with aggregation 5m
33 |
--------------------------------------------------------------------------------
/terraform/flatten_list/main.tf:
--------------------------------------------------------------------------------
1 | #
2 | # blog:
3 | #
4 |
5 | locals {
6 | my_projects_list_of_obj = [
7 | {
8 | id="proj1",
9 | projects=[
10 | { project_id="proja" },
11 | { project_id="projb" },
12 | ]
13 | },
14 | {
15 | id="proj2",
16 | projects=[
17 | { project_id="projc" }
18 | ]
19 | },
20 | {
21 | id="proj3",
22 | projects=[
23 | ]
24 | }
25 | ]
26 |
27 | my_projects_simple_list = [ "proja","projb","projc" ]
28 |
29 | }
30 |
31 | output flatten_list_of_obj_to_list {
32 | value = flatten([ for cp in local.my_projects_list_of_obj :
33 | [
34 | for p_obj in cp.projects: [ p_obj.project_id ]
35 | ]
36 | ])
37 | }
38 | output flatten_list_of_obj_to_list_of_map {
39 | value = flatten([ for cp in local.my_projects_list_of_obj :
40 | [
41 | for p_obj in cp.projects: { "proj"=p_obj.project_id }
42 | ]
43 | ])
44 | }
45 | output flatten_list_of_obj_to_list_then_make_single_map {
46 | value = { for v in
47 | flatten([ for cp in local.my_projects_list_of_obj :
48 | [
49 | for p_obj in cp.projects: [ p_obj.project_id ]
50 | ]
51 | ]) : "${v}"=>"" }
52 | }
53 |
54 |
55 | output simple_list_to_map {
56 | value = { for v in local.my_projects_simple_list: "${v}"=>"" }
57 | }
58 |
--------------------------------------------------------------------------------
/systemd/ssh-agent.service:
--------------------------------------------------------------------------------
1 | #
2 | # Starts ssh-agent in user mode
3 | # this per-user process is started when user logs in for first time,
4 | # and stops when last session for use is closed
5 | #
6 | # supporting blog: https://fabianlee.org/2021/04/05/ubuntu-loading-a-key-into-ssh-agent-at-login-with-a-user-level-systemd-service/
7 | #
8 |
9 | [Unit]
10 | Description=SSH key agent
11 | DefaultDependencies=no
12 | After=network.target
13 |
14 | [Service]
15 | Type=simple
16 | # not required because running as user
17 | #User=
18 | #Group=
19 | Environment=SSH_AUTH_SOCK=%t/ssh-agent.socket
20 | Environment=KEYFILE=/home/%u/.ssh/id_github
21 |
22 | # start in foreground mode (-D), systemd takes care of running in background
23 | ExecStartPre=/bin/sh -c "echo going to load key $KEYFILE"
24 | ExecStart=/usr/bin/ssh-agent -D -a $SSH_AUTH_SOCK
25 |
26 | # add key and then write identities to output
27 | ExecStartPost=/bin/sleep 1
28 | ExecStartPost=/bin/sh -c "/usr/bin/ssh-add $KEYFILE"
29 | ExecStartPost=/bin/sh -c "/usr/bin/ssh-add -l"
30 |
31 | # stop using $MAINPID, provided by systemd
32 | ExecStop=/bin/sh -c "SSH_AGENT_PID=$MAINPID /usr/bin/ssh-agent -k"
33 | #ExecStop=/bin/kill -9 $MAINPID
34 |
35 | StandardOutput=syslog
36 | StandardError=syslog
37 |
38 | [Install]
39 | # note this is not multi-user.target
40 | WantedBy=default.target
41 |
--------------------------------------------------------------------------------
/vagrant/awscli1604/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | #
4 | #
5 | vmname = 'awscli1604'
6 | boxname = 'ubuntu/xenial64'
7 |
8 | Vagrant.configure(2) do |config|
9 | config.vm.hostname = "#{vmname}"
10 | config.vm.box = "#{boxname}"
11 | config.vm.network "private_network", type: "dhcp"
12 |
13 | config.vm.provider "virtualbox" do |v|
14 | v.name = "#{vmname}"
15 | v.customize ["modifyvm", :id, "--memory","1024" ]
16 | v.customize ["modifyvm", :id, "--cpus","1" ]
17 | end
18 |
19 | # optionally pass in AWS IAM id and secret key
20 | # AWS_ACCESS_KEY_ID=AK... AWS_SECRET_ACCESS_KEY="..." vagrant up
21 | config.vm.provision "shell", path: "awscli.sh", privileged: false, env: {AWS_ACCESS_KEY_ID:ENV['AWS_ACCESS_KEY_ID'],AWS_SECRET_ACCESS_KEY:ENV['AWS_SECRET_ACCESS_KEY'],AWS_REGION:ENV['AWS_REGION']}
22 | config.vm.provision "shell", path: "aws-python-sdk.sh", privileged: false
23 | config.vm.provision "file", source: "list_aws_regions.py", destination: "list_aws_regions.py"
24 |
25 | config.vm.provision "shell", inline: <<-SHELL
26 |
27 | echo "SUCCESS!"
28 |
29 | echo ==FINAL ENV======================================
30 | echo AWS_ACCESS_KEY_ID is #{ENV['AWS_ACCESS_KEY_ID']}
31 | echo Using AWS REGION/FORMAT: #{ENV['AWS_REGION']},#{ENV['$AWS_FORMAT']}
32 |
33 | SHELL
34 |
35 | end
36 |
--------------------------------------------------------------------------------
/vault/tiny-tools-test.yaml:
--------------------------------------------------------------------------------
1 | #
2 | # Used as example deployment to test cluster
3 | #
4 | # generate:
5 | # kubectl apply -f tiny-tools-test.yaml
6 | #
7 | # test from inside container:
8 | # kubectl exec -it deployment/tiny-tools-test -c tiny-tools --
9 | ---
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | metadata:
13 | name: tiny-tools-test
14 | labels:
15 | app: tiny-tools-test
16 | spec:
17 | selector:
18 | matchLabels:
19 | app: tiny-tools-test
20 | replicas: 1
21 | template:
22 | metadata:
23 | labels:
24 | app: tiny-tools-test
25 | spec:
26 | #serviceAccountName: default
27 | containers:
28 | - name: tiny-tools
29 | image: ghcr.io/fabianlee/tiny-tools-multi-arch:2.0.3
30 | args:
31 | - /bin/sh
32 | - -c
33 | - touch /tmp/healthy; date; echo "starting..."; sleep 30d
34 | livenessProbe:
35 | exec:
36 | command:
37 | - cat
38 | - /tmp/healthy
39 | initialDelaySeconds: 5
40 | periodSeconds: 5
41 | readinessProbe:
42 | exec:
43 | command:
44 | - cat
45 | - /tmp/healthy
46 | initialDelaySeconds: 5
47 | periodSeconds: 5
48 | restartPolicy: Always
49 | terminationGracePeriodSeconds: 30
50 |
--------------------------------------------------------------------------------
/bash/test_trap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Illustrates use of SIGINT, SIGUSR1, SIGEXIT capture in BASH
4 | #
5 |
6 | # custom user signal
7 | sigusr1_count=0
8 | function sigusr1_capture() {
9 | ((sigusr1_count+=1))
10 | echo "SIGUSR1 called $sigusr1_count times"
11 | }
12 |
13 | # Ctrl-C signal
14 | ctrl_c_count=0
15 | function sigint_capture() {
16 | ((ctrl_c_count+=1))
17 | echo "SIGINT CTRL-C sensed for $ctrl_c_count time"
18 | if [ $ctrl_c_count -gt 2 ]; then
19 | echo "SIGINT Ctrl-C pressed 3 times, flipping flag"
20 | continue_looping=0
21 | fi
22 | }
23 |
24 | # signal on exit of main process
25 | function sigexit_capture() {
26 | echo ""
27 | echo "== FINAL EXIT COUNTS ==="
28 | echo "SIGUSR1 called $sigusr1_count times"
29 | echo "SIGINT CTRL-C sensed for $ctrl_c_count time"
30 | }
31 |
32 |
33 | ########## MAIN #################
34 |
35 | # trap signals to functions
36 | trap sigint_capture SIGINT
37 | trap sigusr1_capture USR1
38 | trap sigexit_capture EXIT
39 |
40 | # send signals to self as test
41 | echo "current pid is $$ $BASHPID"
42 | kill -s SIGINT $$
43 | kill -s SIGUSR1 $$
44 |
45 |
46 | continue_looping=1
47 | while [ $continue_looping -eq 1 ]; do
48 | echo "Waiting for you to press -C 3 times..."
49 |
50 | # send SIGUSR1 every second
51 | kill -s SIGUSR1 $$
52 | sleep 1
53 | done
54 |
55 |
--------------------------------------------------------------------------------
/terraform/yaml_contribution_model/main.tf:
--------------------------------------------------------------------------------
1 | #
2 | # blog: https://fabianlee.org/2024/09/01/terraform-external-yaml-file-as-a-contribution-model-for-outside-teams/
3 | #
4 |
5 | locals {
6 | file_data_static = [
7 | { name="foo1", content="this is foo1" },
8 | { name="foo2", content="this is foo2" }
9 | ]
10 |
11 | #file_data_ext_yaml = yamldecode(file("${path.module}/external.yaml"))
12 | file_data_ext_yaml = fileexists("${path.module}/external.yaml") ? yamldecode(file("${path.module}/external.yaml")):{ files= [] }
13 |
14 | file_data_merge = concat( local.file_data_static, local.file_data_ext_yaml.files )
15 | }
16 |
17 | # if hard-coding resources
18 | #resource "local_file" "foo_test" {
19 | # for_each = { for entry in local.file_data : entry.name=>entry }
20 | # content = each.value.content
21 | # filename = "${path.module}/${each.value.name}.txt"
22 | #}
23 |
24 | # for data-driven resources
25 | resource "local_file" "foo" {
26 | #for_each = { for entry in local.file_data_ext_yaml.files : entry.name=>entry }
27 | for_each = { for entry in local.file_data_merge : entry.name=>entry }
28 | content = each.value.content
29 | filename = "${path.module}/${each.value.name}.txt"
30 | }
31 |
32 | output "show_data" {
33 | value = local.file_data_merge
34 | #value = flatten([for file in local.file_data_merge: file.name ])
35 | }
36 |
--------------------------------------------------------------------------------
/bash/file_existence_and_size.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Test to make sure file exists and has size greater than 0 bytes
4 | #
5 | # blog: https://fabianlee.org/2022/03/27/bash-test-both-file-existence-and-size-to-avoid-signalling-success/
6 | #
7 |
8 | # test for existence and content
9 | tempfile=$(mktemp)
10 | [ -s $tempfile ] || echo "File $tempfile does not exist or is 0 bytes"
11 |
12 | # now create, but it will have 0 bytes
13 | touch $tempfile
14 | [ -s $tempfile ] || echo "File $tempfile still does not pass test because it is 0 bytes"
15 |
16 | echo testing > $tempfile
17 | [ -s $tempfile ] || echo "This will not be invoked because file has content" && echo "File $tempfile passes test because it exists and has content"
18 |
19 | # stat would error if file did not exist
20 | filesize=$(stat -c%s $tempfile)
21 | if [[ -f $tempfile && $filesize -gt 0 ]]; then
22 | echo "longer form of validating file existence and size of $tempfile at $filesize bytes"
23 | else
24 | echo "file failed validation of existence+sie using stat"
25 | fi
26 |
27 | # use find to implement file existence and content size
28 | find $tempfile -size +0b 2>/dev/null | grep .
29 | if [[ $? -eq 0 ]]; then
30 | echo "'find' validated both file existence and size of $tempfile"
31 | else
32 | echo "file failed validation of existence+sie using find"
33 | fi
34 |
35 | rm -f $tempfile
36 |
--------------------------------------------------------------------------------
/golang/sleepservice/sleepservice.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "time"
5 | "log"
6 | "flag"
7 | "math/rand"
8 | "os"
9 | "os/signal"
10 | //"syscall"
11 | )
12 |
13 | func main() {
14 |
15 | // load command line arguments
16 | name := flag.String("name","world","name to print")
17 | flag.Parse()
18 |
19 | log.Printf("Starting sleepservice for %s",*name)
20 |
21 | // setup signal catching
22 | sigs := make(chan os.Signal, 1)
23 |
24 | // catch all signals since not explicitly listing
25 | signal.Notify(sigs)
26 | //signal.Notify(sigs,syscall.SIGQUIT)
27 |
28 | // method invoked upon seeing signal
29 | go func() {
30 | s := <-sigs
31 | log.Printf("RECEIVED SIGNAL: %s",s)
32 | AppCleanup()
33 | os.Exit(1)
34 | }()
35 |
36 | // infinite print loop
37 | for {
38 | log.Printf("hello %s",*name)
39 |
40 | // wait random number of milliseconds
41 | Nsecs := rand.Intn(3000)
42 | log.Printf("About to sleep %dms before looping again",Nsecs)
43 | time.Sleep(time.Millisecond * time.Duration(Nsecs))
44 | }
45 |
46 | }
47 |
48 | func AppCleanup() {
49 | log.Println("CLEANUP APP BEFORE EXIT!!!")
50 | }
51 |
52 |
--------------------------------------------------------------------------------
/kubectl/playbook-k8s-configmap-update.yml:
--------------------------------------------------------------------------------
1 | # Example of checking for ConfigMap existence so that create or replace can be used
2 | #
3 | # blog: https://fabianlee.org/2021/03/17/kubernetes-updating-an-existing-configmap-using-kubectl-replace/
4 | #
5 | # ansible-playbook playbook-k8s-configmap-update.yml --connection=local
6 | #
7 | ---
8 | - hosts: localhost
9 | connection: local
10 |
11 | tasks:
12 |
13 | - command: kubectl get nodes
14 |
15 | - name: does configmap exist?
16 | command: kubectl describe configmap test1
17 | failed_when: not configmap_res.rc in [0,1]
18 | register: configmap_res
19 |
20 | - name: show configmap results, 0=found,1=not found
21 | debug:
22 | msg: "{{configmap_res.rc}}"
23 |
24 | - name: kubectl to create new configmap
25 | command: kubectl create configmap test1 --from-file=ConfigMap-test1.yaml
26 | when: configmap_res.rc != 0
27 |
28 | # using 'shell' because we need to use a pipe
29 | - name: kubectl to update existing configmap
30 | shell: 'kubectl create configmap test1 --from-file=ConfigMap-test1.yaml -o yaml --dry-run | kubectl replace -f -'
31 | when: configmap_res.rc == 0
32 |
33 | - name: show final configmap
34 | command: kubectl describe ConfigMap test1
35 | register: final_output
36 |
37 | - debug:
38 | msg: "{{final_output.stdout_lines}}"
39 |
40 |
41 |
--------------------------------------------------------------------------------
/ruby/headless_chrome.rb:
--------------------------------------------------------------------------------
1 | #!/usr/bin/ruby
2 | require 'selenium-webdriver'
3 |
4 | def setup
5 |
6 | # path to Chrome driver
7 | Selenium::WebDriver::Chrome.driver_path = "./chromedriver"
8 |
9 | # options
10 | options = Selenium::WebDriver::Chrome::Options.new
11 | options.add_argument('--headless')
12 | options.add_argument('--disable-gpu')
13 | #options.add_argument('--remote-debugging-port=9222')
14 |
15 | # capabilities
16 | caps = Selenium::WebDriver::Remote::Capabilities.chrome
17 | caps["screen_resolution"] = "600x768"
18 | caps["record_network"] = "true"
19 |
20 | # create driver using options and capabilities
21 | @driver = Selenium::WebDriver.for :chrome, options: options, desired_capabilities: caps
22 |
23 | end
24 |
25 | def teardown
26 | @driver.quit
27 | end
28 |
29 | def run
30 | setup
31 | yield
32 | teardown
33 | end
34 |
35 |
36 | ############ MAIN ###################
37 |
38 | run do
39 |
40 | # go to google search page
41 | @driver.get 'http://www.google.com'
42 |
43 | # do search
44 | element = @driver.find_element(:name, 'q')
45 | element.send_keys "ruby selenium webdriver"
46 | element.submit
47 |
48 | # create screenshot of search results
49 | print "RESULTS TITLE: #{@driver.title}\n"
50 | @driver.save_screenshot('headless.png')
51 | end
52 |
--------------------------------------------------------------------------------
/vagrant/awscli1404/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | #
4 | #
5 | vmname = 'awscli1404'
6 | boxname = 'ubuntu/trusty64'
7 |
8 | Vagrant.configure(2) do |config|
9 | config.vm.hostname = "#{vmname}"
10 | config.vm.box = "#{boxname}"
11 | config.vm.network "private_network", type: "dhcp"
12 |
13 | config.vm.provider "virtualbox" do |v|
14 | v.name = "#{vmname}"
15 | v.customize ["modifyvm", :id, "--memory","1024" ]
16 | v.customize ["modifyvm", :id, "--cpus","1" ]
17 | end
18 |
19 | # optionally pass in AWS IAM id and secret key
20 | # AWS_ACCESS_KEY_ID=AK... AWS_ACCESS_KEY_ID="..." vagrant up
21 | config.vm.provision "shell", path: "../awscli1604/awscli.sh", privileged: false, env: {AWS_ACCESS_KEY_ID:ENV['AWS_ACCESS_KEY_ID'],AWS_SECRET_ACCESS_KEY:ENV['AWS_SECRET_ACCESS_KEY'],AWS_REGION:ENV['AWS_REGION']}
22 | config.vm.provision "shell", path: "../awscli1604/aws-python-sdk.sh", privileged: false
23 | config.vm.provision "file", source: "../awscli1604/list_aws_regions.py", destination: "list_aws_regions.py"
24 |
25 | config.vm.provision "shell", inline: <<-SHELL
26 |
27 | echo "SUCCESS!"
28 |
29 | echo ==FINAL ENV======================================
30 | echo AWS_ACCESS_KEY_ID is #{ENV['AWS_ACCESS_KEY_ID']}
31 | echo Using AWS REGION/FORMAT: #{ENV['AWS_REGION']},#{ENV['$AWS_FORMAT']}
32 |
33 | SHELL
34 |
35 | end
36 |
--------------------------------------------------------------------------------
/vagrant/gpg1604/gpgsetup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo apt-get update
4 | sudo apt-get install python-gnupg rng-tools -y
5 | sudo rngd -r /dev/urandom
6 |
7 | # create groups
8 | # 'deployers' ability to decrypt production secrets for operational tasks
9 | # 'developers' no ability to decrypt production secrets
10 | sudo groupadd -g 11000 deployers
11 | sudo groupadd -g 11001 developers
12 |
13 | # make 2 users part of 'deployers' group, should be able to decrypt and sudo
14 | sudo useradd -u 20001 -g deployers -m alice
15 | sudo useradd -u 20002 -g deployers -m bob
16 |
17 | # set static password and ensure group definition added to /etc/group
18 | phash=$(openssl passwd -1 -salt mysalt alicepass)
19 | sudo usermod -a -G deployers -p "$phash" alice
20 | phash=$(openssl passwd -1 -salt mysalt bobpass)
21 | sudo usermod -a -G deployers -p "$phash" bob
22 |
23 | # make 1 user part of 'developers' group, should not be able to decrypt or sudo
24 | sudo useradd -u 20003 -g developers -m billy
25 | phash=$(openssl passwd -1 -salt mysalt billypass)
26 | sudo usermod -a -G developers -p "$phash" billy
27 |
28 | # deployers group gets full sudo privileges
29 | echo "%deployers ALL=(ALL) ALL" | sudo tee /etc/sudoers.d/deployers
30 |
31 | # developers group gets access to single shell script
32 | echo "%developers ALL=(ALL) /tmp/developersDecrypt.sh" | sudo tee /etc/sudoers.d/developers
33 |
--------------------------------------------------------------------------------
/bash/openssl_cert_days_till_expiration.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # shows number of days before expiration of certificate using openssl
4 | # https://fabianlee.org/2024/09/05/bash-calculating-number-of-days-till-certificate-expiration-using-openssl/
5 | #
6 | # https://stackoverflow.com/questions/10175812/how-to-generate-a-self-signed-ssl-certificate-using-openssl
7 | # test using self-signed cert:
8 | # openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -sha256 -days 365 -nodes -subj "/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=CommonNameOrHostname"
9 | #
10 |
11 | # need parameter and valid certificate file
12 | [[ -n "$1" && -f "$1" ]] || { echo "Usage: certFile"; exit 1; }
13 |
14 | # calculate difference in days of strings in GMT format
15 | # https://unix.stackexchange.com/questions/24626/quickly-calculate-date-differences/24636#24636
16 | function datediff() {
17 | d1=$(date -d "$1" +%s)
18 | d2=$(date -d "$2" +%s)
19 | # in days
20 | echo $(( (d1 - d2) / 86400 + 1 ))
21 | }
22 |
23 |
24 | # pull expiration string from certificate
25 | cert_expiration_str=$(openssl x509 -in cert.pem -text -noout | grep -Po "Not After : \K.*" | head -n1)
26 | echo "cert_expiration_str=$cert_expiration_str"
27 |
28 | # show number of days till expiration
29 | echo "days till expiration:" $(datediff "$cert_expiration_str" "$(date -u)")
30 |
31 |
--------------------------------------------------------------------------------
/ansible/roles/conditional-tasks/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: show ansible tags
4 | debug: msg="{{ ansible_run_tags|default('') }}"
5 | tags: always
6 |
7 | # problem: only includes file, but unless commands inside are tagged with 'include', will not execute them
8 | # https://github.com/ansible/ansible/issues/30882
9 | #
10 | #- name: include_task test
11 | # include_tasks: include_task.yml
12 | # tags:
13 | # - include
14 |
15 | # workaround1: include in block
16 | #- block:
17 | # - name: include_task test
18 | # include_tasks: include_task.yml
19 | # tags:
20 | # - include
21 |
22 | # solution: apply tag to all tasks inside
23 | #- name: include_task test
24 | # include_tasks:
25 | # file: include_task.yml
26 | # apply:
27 | # tags:
28 | # - include
29 | # - always
30 | # tags:
31 | # - include
32 | # - always
33 |
34 | - debug: msg="(using when+tag) is tag 'import' set? {{'import' in ansible_run_tags}}"
35 | when: "'import' in ansible_run_tags"
36 | tags: import
37 |
38 | - debug: msg="(using never tag) is tag 'import' set? {{'import' in ansible_run_tags}}"
39 | tags:
40 | - import
41 | - never
42 |
43 | - name: import_task test
44 | import_tasks: import_task.yml
45 | tags: import
46 |
47 | - name: deletion task
48 | import_tasks: delete.yml
49 | # 'never' (special) do not invoke unless specified
50 | # 'delete' invoked if this tag explicitly set
51 | tags:
52 | - delete
53 | - never
54 |
--------------------------------------------------------------------------------
/yq/yq-update-ds-annotation.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # reads multi-document yaml, adds row to array of only targeted ones
4 | # https://mikefarah.gitbook.io/yq/usage/tips-and-tricks
5 | #
6 | # blog: https://fabianlee.org/2022/07/02/kubernetes-targeting-the-addition-of-array-items-to-a-multi-document-yaml-manifest
7 |
8 |
9 | # adds array row indiscrimanately to all documents (not what we want)
10 | #cat multi-doc.yaml | yq '.spec.template.metadata.annotations."prometheus.io/scrape"="true"'
11 | #exit 0
12 |
13 | # adds array row to only DaemonSet, but does not show other docs (not what we want)
14 | #cat multi-doc.yaml | yq 'select(.kind=="DaemonSet") | .spec.template.metadata.annotations."prometheus.io/scrape"="true"'
15 | #exit 0
16 |
17 |
18 | # adds array row to both DaemonSet and Deployment, both of which have target path
19 | # but also does not show other docs (not what we want)
20 | #cat multi-doc.yaml | yq 'select(.spec.template.metadata.annotations) | .spec.template.metadata.annotations."prometheus.io/scrape"="true"'
21 | #exit 0
22 |
23 | # final correct logic to target modification, but output all documents
24 | cat multi-doc.yaml | yq 'select (.spec.template.metadata.annotations) |= (
25 | select (.kind=="DaemonSet") |
26 | with(
27 | select(.spec.template.metadata.annotations."prometheus.io/scrape"==null);
28 | .spec.template.metadata.annotations."prometheus.io/scrape"="true" | .spec.template.metadata.annotations."prometheus.io/port"="10254"
29 | )
30 | )'
31 |
32 |
--------------------------------------------------------------------------------
/bash/grep_lookahead_lookbehind.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Tests LookAhead and LookBehind with grep to isolate output
4 | #
5 |
6 | testfile="greptest.txt"
7 |
8 | # create test file
9 | cat > $testfile << EOF
10 | http://www.google.com?q=foo
11 | https://images.google.com
12 | https://wikipedia.org/wiki/Regular_expression
13 | https://fr.wikipedia.org/wiki/Regular_expression
14 | http://www.yahoo.com
15 | EOF
16 |
17 |
18 | echo "#############################"
19 | echo "plain grep to pull out secure and insecure URL"
20 | grep --color -E 'https?://([^ /?\"])*' $testfile
21 |
22 | echo ""
23 | echo "#############################"
24 | echo "same regex, but with -o output flag"
25 | grep --color -Eo 'https?://([^ /?\"])*' $testfile
26 |
27 | echo ""
28 | echo "#############################"
29 | echo "PCRE Perl regex with \K which resets match to simulate LookBehind"
30 | grep -Po 'https?://\K([^ /?\"])*' $testfile
31 |
32 | echo ""
33 | echo "#############################"
34 | echo "PCR Perl regex with static length LookBehind"
35 | grep -Po "(?<=https://)([^ /?\"])*" $testfile
36 |
37 | echo ""
38 | echo "#############################"
39 | echo "PCR Perl regex with brute forced variable length LookBehind, use \K instead"
40 | grep -Po "(?:(?<=http://)|(?<=https://))([^ /?\"])*" $testfile
41 |
42 | echo ""
43 | echo "#############################"
44 | echo "PCR Perl regex with LookAhead for .org domains"
45 | grep -Po "https?://\K([^ /?\"])*(?=.com)" $testfile
46 |
47 |
48 | # cleanup
49 | rm $testfile
50 |
51 |
--------------------------------------------------------------------------------
/terraform/for_each_list/main.tf:
--------------------------------------------------------------------------------
1 | #
2 | # blog: https://fabianlee.org/2021/09/24/terraform-converting-ordered-lists-to-sets-to-avoid-errors-with-for_each/
3 | #
4 |
5 | locals {
6 | my_set = {
7 | "a" = { "id":"a", "name":"first", },
8 | "b" = { "id":"b", "name":"second" },
9 | "c" = { "id":"c", "name":"third" }
10 | }
11 | my_list = [
12 | { "id":"a", "name":"first" },
13 | { "id":"b", "name":"second" },
14 | { "id":"c", "name":"third" }
15 | ]
16 | str_list = [ "a","b","c" ]
17 | }
18 |
19 | resource "null_resource" "show_set" {
20 | for_each = local.my_set
21 | provisioner "local-exec" {
22 | command = "echo my_set: the name for ${each.key} is ${each.value.name}"
23 | }
24 | }
25 |
26 | resource "null_resource" "show_str_list" {
27 | # convert string list to set
28 | for_each = toset( local.str_list )
29 | provisioner "local-exec" {
30 | command = "echo str_list: the value is ${each.key}"
31 | }
32 | }
33 |
34 | resource "null_resource" "show_list" {
35 | # convert ordered list to set
36 | for_each = { for entry in local.my_list: entry.id=>entry }
37 | provisioner "local-exec" {
38 | command = "echo my_list for_each: the name for ${each.value.id} is ${each.value.name}"
39 | }
40 | }
41 |
42 | resource "null_resource" "show_list_with_count" {
43 | count = length(local.my_list)
44 | provisioner "local-exec" {
45 | command = "echo my_list count: the name for ${local.my_list[count.index].id} is ${local.my_list[count.index].name}"
46 | }
47 | }
48 |
49 |
--------------------------------------------------------------------------------
/gitlab/install-glab-cli.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Install 'glab' the GitLab official CLI
4 | #
5 |
6 | project="gitlab-org/cli"
7 |
8 | # make sure jq is installed
9 | sudo apt install jq -y
10 |
11 | # https://gitlab.com/gitlab-org/cli/-/releases
12 | latest_version=$(curl -s \
13 | -X POST \
14 | -H 'Content-Type: application/json' \
15 | --data-raw '[{"operationName":"allReleases","variables":{"fullPath":"gitlab-org/cli","first":10,"sort":"RELEASED_AT_DESC"},"query":"query allReleases($fullPath: ID!, $first: Int, $last: Int, $before: String, $after: String, $sort: ReleaseSort) {\n project(fullPath: $fullPath) {\n id\n releases(\n first: $first\n last: $last\n before: $before\n after: $after\n sort: $sort\n ) {\n nodes {\n ...Release\n }\n }\n }\n}\n\nfragment Release on Release {\n id\n name\n }\n \n"}]' \
16 | https://gitlab.com/api/graphql | jq -r ".[].data.project.releases.nodes[0].name")
17 | echo "latest_version=$latest_version"
18 |
19 | # strip off 'v' for filename
20 | tar_file="glab_${latest_version//v}_Linux_x86_64.tar.gz"
21 | tar_url="https://gitlab.com/$project/-/releases/$latest_version/downloads/$tar_file"
22 | echo "downloading: $tar_url"
23 | [ -f /tmp/$tar_file ] || wget $tar_url -O /tmp/$tar_file
24 |
25 | # place binary in current directory, then move to PATH
26 | tar xvfz /tmp/$tar_file bin/glab --strip=1
27 | sudo chown root:root glab
28 | sudo mv glab /usr/local/bin/.
29 |
30 | which glab
31 | glab --version
32 |
33 | echo "Run 'glab auth login' to authenticate"
34 |
35 |
--------------------------------------------------------------------------------
/terraform/json_vars_file/main.tf:
--------------------------------------------------------------------------------
1 | #
2 | # blog: https://fabianlee.org/2021/09/24/terraform-using-json-files-as-input-variables-and-local-variables/
3 | #
4 |
5 | locals {
6 | # can validate with: jq . values-local.json
7 | local_data = jsondecode(file("${path.module}/local-values.json"))
8 |
9 | # array of just keys from set
10 | my_map_keys = [ for k,v in local.local_data.mylocal_map: k ]
11 |
12 | # array of just values from set
13 | my_map_values = [ for k,v in local.local_data.mylocal_map: v ]
14 |
15 | # array of just specific attribute value from set
16 | my_map_values_url = [ for k,v in local.local_data.mylocal_map: v.url ]
17 |
18 | # set where we swap the key to be the url
19 | my_map_values_url_to_name = { for k,v in local.local_data.mylocal_map: v.url=>v.name }
20 | }
21 |
22 | # declare input variables and their default value
23 | variable a { default="n/a" }
24 | variable strlist { default=[] }
25 | variable vms {}
26 |
27 | # show simple variable
28 | output "show_var_a" {
29 | value = var.a
30 | }
31 | # show str list
32 | output "show_var_strlist" {
33 | value = var.strlist
34 | }
35 |
36 |
37 |
38 | output "show_mylocal" {
39 | value = local.local_data.mylocal
40 | }
41 | output "show_my_map_keys" {
42 | value = local.my_map_keys
43 | }
44 | output "show_my_map_values" {
45 | value = local.my_map_values
46 | }
47 | output "show_my_map_values_url" {
48 | value = local.my_map_values_url
49 | }
50 | output "show_my_map_values_url_to_name" {
51 | value = local.my_map_values_url_to_name
52 | }
53 |
--------------------------------------------------------------------------------
/bash/test_generic_variables_template.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # using sed or envsubst to replace multiple variables in template
4 | # blog: https://fabianlee.org/2021/05/29/bash-render-template-from-matching-bash-variables/
5 | #
6 |
7 | # declare variables
8 | first="1"
9 | animal="dog"
10 |
11 | echo ""
12 | echo "##########"
13 | echo "Dollar sign templating style"
14 | echo "##########"
15 |
16 | # create template, heredoc without var evaluation
17 | read -r -d '' mytemplate <<'EOF'
18 | this is $first
19 | the $animal goes woof
20 | EOF
21 |
22 | # create list of sed replacements
23 | sedcmd=''
24 | for var in first animal ; do
25 | printf -v sc 's/$%s/%s/;' $var "${!var//\//\\/}"
26 | sedcmd+="$sc"
27 | done
28 | sed -e "$sedcmd" <(echo "$mytemplate")
29 |
30 |
31 |
32 | echo ""
33 | echo "##########"
34 | echo "Dollar sign with curly bracket style"
35 | echo "##########"
36 |
37 | # allow var evaluation, so escape dollar sign
38 | read -r -d '' mytemplate <&2 echo -en "will search for installed charts in local repos (skipping 'stable'):\n$repo_list_local"
15 | >&2 echo ""
16 | >&2 echo ""
17 | >&2 echo "RELEASE,CHART,VERSION,REPO"
18 |
19 | # for each installed Release
20 | IFS=$'\n'
21 | for line in $(helm list -A 2>/dev/null | tail -n+2); do
22 | # release name
23 | name=$(echo $line | awk '{print $1}' | xargs)
24 |
25 | # skip 'stable' because it is central hub for too many charts
26 | [[ $name == "stable" ]] && continue
27 |
28 | # chart name is '-', split it
29 | chart_with_suffix=$(echo $line | awk -F' ' '{print $9}' | xargs)
30 | chart_name=${chart_with_suffix%-*}
31 | chart_version=${chart_with_suffix##*-}
32 |
33 | # brute-force check of each local repo
34 | while read -r repo; do
35 | helm show chart $repo/$chart_name >/dev/null 2>&1
36 | if [ $? -eq 0 ]; then
37 | echo "$name,$chart_name,$chart_version,$repo"
38 | fi
39 | done < <(echo "$repo_list_local")
40 |
41 | done
42 |
--------------------------------------------------------------------------------
/golang/zabbixsender/zabbixsender.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "flag"
6 | . "github.com/blacked/go-zabbix"
7 | )
8 |
9 | func main() {
10 |
11 | // load command line args
12 | zserver := flag.String("zabbix","","zabbix server e.g. '127.0.0.1'")
13 | targetHost := flag.String("host","","zabbix target host e.g. 'myhost'")
14 | zport := flag.Int("port",10051,"zabbix server port e.g. 10051")
15 | flag.Parse()
16 |
17 | // make sure required fields 'zabbix' and 'host' are populated
18 | if *zserver=="" || *targetHost=="" {
19 | flag.PrintDefaults()
20 | return
21 | }
22 |
23 | // debug
24 | fmt.Printf("Connecting to %s:%d to populate trapper items for host %s\n",*zserver,*zport,*targetHost)
25 | fmt.Printf("NOTE: If you do not have items of type trapper on host %s named 'mystr1' and 'myint1', then do not expect these keys to be successfuly processed\n\n",*targetHost)
26 |
27 | // prepare to send values to trapper items: myint1, mystr1
28 | var metrics []*Metric
29 | metrics = append(metrics, NewMetric(*targetHost, "myint1", "122"))
30 | metrics = append(metrics, NewMetric(*targetHost, "mystr1", "OK1"))
31 |
32 | // Create instance of Packet class
33 | packet := NewPacket(metrics)
34 |
35 | // Send packet to zabbix
36 | z := NewSender(*zserver, *zport)
37 | res,err := z.Send(packet)
38 |
39 | // check for error
40 | if err != nil {
41 | fmt.Println("ERROR: ",err)
42 | return
43 | }
44 |
45 | // show zabbix server reply
46 | fmt.Println(string(res))
47 | }
48 |
49 |
--------------------------------------------------------------------------------
/saltstack-pillarcombine-etc-srv/salt/logrotate/map.jinja:
--------------------------------------------------------------------------------
1 | # /srv/salt/logrotate/map.jinja
2 | {% import_yaml 'logrotate/defaults.yaml' as default_settings %}
3 |
4 | # standard OS family customization
5 | {% set os_family_map = salt['grains.filter_by']({
6 | 'RedHat': {
7 | },
8 | 'Arch': {
9 | },
10 | 'Debian': {
11 | },
12 | 'Suse': {
13 | },
14 | }, merge=salt['pillar.get']('logrotate:lookup'))
15 | %}
16 | {% do default_settings.logrotate.update(os_family_map) %}
17 |
18 | # merge pillar lookup on top of default settings
19 | {% set logrotate = salt['pillar.get'](
20 | 'logrotate:lookup',
21 | default=default_settings.logrotate,
22 | merge=True
23 | )
24 | %}
25 |
26 |
27 | # inner loop variables don't have scope, so have to use hash trick
28 | # https://fabianlee.org/2016/10/18/saltstack-setting-a-jinja2-variable-from-an-inner-block-scope/
29 | #
30 | {% set mergedresult = { 'logrotate': {} } %}
31 | {% for key in pillar.keys() %}
32 | {% if "logrotate-" in key %}
33 |
34 | {% set val = pillar.get(key) %}
35 | {% set keylookup = key~":lookup" %}
36 | {% set custjobs = salt['pillar.get'](keylookup,{}) %}
37 |
38 | {% set merged = salt['slsutil.merge'](
39 | mergedresult['logrotate'],
40 | custjobs,
41 | merge_lists=True
42 | )
43 | %}
44 | {% do mergedresult.update({'logrotate': merged}) %}
45 |
46 | {% endif %}
47 | {% endfor %}
48 |
49 |
50 | # do final merge, adding to main variable in outer scope
51 | {% set logrotate = salt['slsutil.merge'](
52 | logrotate,
53 | mergedresult.logrotate,
54 | merge_lists=True
55 | )
56 | %}
57 |
58 |
--------------------------------------------------------------------------------
/gcloud/gcp-add-data-disk.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # makes sure data disk at /dev/sdb is formatted and mounted
4 | # https://cloud.google.com/compute/docs/disks/add-persistent-disk#formatting
5 | #
6 | # blog: https://fabianlee.org/2022/05/01/gcp-moving-a-vm-instance-to-a-different-region-using-snapshots/
7 | #
8 | # meant to be run inside GCP VM instance using startup metadata
9 | # gcloud compute instances create .... --metadata-from-file=startup-script=gcp-add-data-disk.sh
10 | #
11 |
12 | set -x
13 |
14 | # is /dev/sdb formatted to ext4?
15 | if lsblk -f /dev/sdb | grep -q 'ext4' ; then
16 | echo "/dev/sdb already formatted"
17 | else
18 | sudo mkfs.ext4 -m 0 -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/sdb
19 | fi
20 |
21 | # is disk mounted?
22 | findmnt /datadisk1
23 | if [ $? -ne 0 ]; then
24 | sudo mkdir -p /datadisk1
25 | sudo mount -o discard,defaults /dev/sdb /datadisk1
26 | sudo chmod a+rw /datadisk1
27 | fi
28 |
29 | # is disk entry in fstab?
30 | UUID=$(sudo blkid -s UUID -o value /dev/sdb)
31 | if cat /etc/fstab | grep -q '/datadisk1' ; then
32 | echo "/datadisk1 already added to fstab, ensuring UUID is correct"
33 | sudo sed -i "s#^UUID=.* /datadisk1#UUID=$UUID /datadisk1#" /etc/fstab
34 | else
35 | echo UUID=$UUID /datadisk1 ext4 discard,defaults,noatime,nofail 0 2 | sudo tee -a /etc/fstab
36 | fi
37 |
38 | # writes timestamp and current zone to file
39 | file=/datadisk1/hello.txt
40 | sudo touch $file
41 | sudo chmod a+rw $file
42 | date "+%F %T" | tee -a $file
43 | curl -s http://metadata.google.internal/computeMetadata/v1/instance/zone -H "Metadata-Flavor: Google" | tee -a $file
44 | echo "" | tee -a $file
45 |
--------------------------------------------------------------------------------
/python/argParseTest.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Example usage of argparse library
4 |
5 | https://fabianlee.org/2019/09/14/python-parsing-command-line-arguments-with-argparse/
6 |
7 | USAGE
8 | ./argParseTest2.py 3 4
9 | ./argParseTest2.py 3 4 --op=mul
10 | """
11 | import sys
12 | import argparse
13 |
14 |
15 | def show_math_result(a, b, op="add", upper=False):
16 | """perform math operation, by default addition"""
17 | res = 0
18 | if op == "mul":
19 | res = a * b
20 | else:
21 | res = a + b
22 | op_display = op.upper() if upper else op
23 | print("{} {} {} = {}".format(a, op_display, b, res))
24 |
25 |
26 | def main(argv):
27 |
28 | examples = '''usage:
29 | 3 5
30 | 4 6 --op add
31 | 5 6 --op mul
32 | '''
33 |
34 | # define arguments
35 | ap = argparse.ArgumentParser(description="Example using ArgParse library",epilog=examples,formatter_class=argparse.RawDescriptionHelpFormatter)
36 | ap.add_argument('a', type=int, help="first integer")
37 | ap.add_argument('b', type=int, help="second integer")
38 | # use nargs for optional positional args or var args
39 | #ap.add_argument('c', type=int, nargs='?', help="third integer")
40 | ap.add_argument('-o', '--op', default="add",
41 | choices=['add', 'mul'], help="add or multiply")
42 | ap.add_argument(
43 | '-u', '--to-upper', action="store_true", help="show uppercase")
44 |
45 | # parse args
46 | args = ap.parse_args()
47 |
48 | # print results of math operation
49 | show_math_result(args.a, args.b, args.op, args.to_upper)
50 |
51 |
52 | if __name__ == '__main__':
53 | main(sys.argv)
54 |
--------------------------------------------------------------------------------
/bash/test_heredoc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Examples with heredoc
4 | #
5 | # https://tldp.org/LDP/abs/html/here-docs.html
6 | # https://linuxize.com/post/bash-heredoc/
7 | # https://stackoverflow.com/questions/2500436/how-does-cat-eof-work-in-bash
8 | # https://stackoverflow.com/questions/7316107/how-to-split-strings-over-multiple-lines-in-bash
9 | # escaping characters in heredoc, https://www.baeldung.com/linux/heredoc-herestring
10 | # http://www.linuxcommand.org/lc3_man_pages/readh.html
11 |
12 | echo ""
13 | echo "*** do not strip tabs ***"
14 | cat <> /tmp/appendint.txt
69 | appended at $datestr
70 | EOF
71 |
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/windows/disable-iesc-uac.ps1:
--------------------------------------------------------------------------------
1 |
2 | # https://stackoverflow.com/questions/9368305/disable-ie-security-on-windows-server-via-powershell
3 |
4 | function Disable-InternetExplorerESC {
5 | $AdminKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A7-37EF-4b3f-8CFC-4F3A74704073}"
6 | $UserKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A8-37EF-4b3f-8CFC-4F3A74704073}"
7 | Set-ItemProperty -Path $AdminKey -Name "IsInstalled" -Value 0 -Force
8 | Set-ItemProperty -Path $UserKey -Name "IsInstalled" -Value 0 -Force
9 | Stop-Process -Name Explorer -Force
10 | Write-Host "IE Enhanced Security Configuration (ESC) has been disabled." -ForegroundColor Green
11 | }
12 |
13 |
14 | function Enable-InternetExplorerESC {
15 | $AdminKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A7-37EF-4b3f-8CFC-4F3A74704073}"
16 | $UserKey = "HKLM:\SOFTWARE\Microsoft\Active Setup\Installed Components\{A509B1A8-37EF-4b3f-8CFC-4F3A74704073}"
17 | Set-ItemProperty -Path $AdminKey -Name "IsInstalled" -Value 1 -Force
18 | Set-ItemProperty -Path $UserKey -Name "IsInstalled" -Value 1 -Force
19 | Stop-Process -Name Explorer
20 | Write-Host "IE Enhanced Security Configuration (ESC) has been enabled." -ForegroundColor Green
21 | }
22 |
23 | function Disable-UserAccessControl {
24 | Set-ItemProperty "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "ConsentPromptBehaviorAdmin" -Value 00000000 -Force
25 | Write-Host "User Access Control (UAC) has been disabled." -ForegroundColor Green
26 | }
27 |
28 |
29 |
30 | ######### MAIN ################################3
31 | Disable-UserAccessControl
32 | Disable-InternetExplorerESC
33 |
34 |
--------------------------------------------------------------------------------
/golang/sleepservice19/sleepservice.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "time"
5 | "log"
6 | "flag"
7 | "math/rand"
8 | "os"
9 | "os/signal"
10 | "syscall"
11 | )
12 |
13 | func main() {
14 |
15 | // load command line arguments
16 | name := flag.String("name","world","name to print")
17 | flag.Parse()
18 |
19 | log.Printf("Starting sleepservice for %s",*name)
20 |
21 | // setup signal catching
22 | sigs := make(chan os.Signal, 1)
23 |
24 | // catch all signals since not explicitly listing
25 | signal.Notify(sigs)
26 | //signal.Notify(sigs,syscall.SIGQUIT)
27 |
28 | // method invoked upon seeing signal
29 | go func() {
30 | for sig := range sigs {
31 | log.Printf("RECEIVED SIGNAL: %s",sig)
32 |
33 | switch sig {
34 | // starting with golang 1.14, need to ignore SIGURG used for preemption
35 | // https://github.com/golang/go/issues/37942
36 | case syscall.SIGURG:
37 | log.Printf("ignoring sigurg")
38 | default:
39 | AppCleanup()
40 | os.Exit(1)
41 | } // switch
42 |
43 | } // for
44 |
45 | }()
46 |
47 | // infinite print loop
48 | for {
49 | log.Printf("hello %s",*name)
50 |
51 | // wait random number of milliseconds
52 | Nsecs := rand.Intn(3000)
53 | log.Printf("About to sleep %dms before looping again",Nsecs)
54 | time.Sleep(time.Millisecond * time.Duration(Nsecs))
55 | }
56 |
57 | }
58 |
59 | func AppCleanup() {
60 | log.Println("CLEANUP APP BEFORE EXIT!!!")
61 | }
62 |
63 |
--------------------------------------------------------------------------------
/k8s/tiny-tools.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: tiny-tools
6 | # not going to specify namespace, so that 'default' is implied
7 | # it can be overridden at command line if necessary
8 | # kubectl apply -f tiny-tools-per-node.yaml -n kube-system
9 | #namespace: default
10 | spec:
11 | selector:
12 | matchLabels:
13 | app: tiny-tools
14 |
15 | # can scale across all nodes later
16 | # kubectl scale --replicas=$(kubectl get nodes | tail -n+2 | wc -l) deployment/tiny-tools
17 | replicas: 1
18 |
19 | template:
20 | metadata:
21 | labels:
22 | app: tiny-tools
23 | annotations:
24 | # to avoid istio sidecar auto-injection (if applicable)
25 | sidecar.istio.io/inject: "false"
26 | spec:
27 | containers:
28 | - name: tiny-tools
29 | # https://github.com/giantswarm/tiny-tools
30 | # https://hub.docker.com/r/giantswarm/tiny-tools/
31 | #image: giantswarm/tiny-tools:3.12
32 | #
33 | # https://github.com/fabianlee/tiny-tools-multi-arch
34 | image: ghcr.io/fabianlee/tiny-tools-multi-arch:2.0.3
35 | args:
36 | - /bin/sh
37 | - -c
38 | - touch /tmp/healthy; date; echo "initializing..."; sleep 30d
39 | livenessProbe:
40 | exec:
41 | command:
42 | - cat
43 | - /tmp/healthy
44 | initialDelaySeconds: 5
45 | periodSeconds: 10
46 | readinessProbe:
47 | exec:
48 | command:
49 | - cat
50 | - /tmp/healthy
51 | initialDelaySeconds: 5
52 | periodSeconds: 10
53 | # this is the only supported value for 'Deployment'
54 | restartPolicy: Always
55 |
--------------------------------------------------------------------------------
/windows/install-adfs-2019.ps1:
--------------------------------------------------------------------------------
1 |
2 | Install-windowsfeature adfs-federation -IncludeManagementTools
3 |
4 | Import-Module ADFS
5 |
6 | $theDomain="$env:USERDOMAIN"
7 | $userDNSDomain="$env:USERDNSDOMAIN".ToLower()
8 |
9 | # do NOT prompt manually
10 | #$installationCredential = Get-Credential -Message "Enter credentials of user to perform config"
11 | #$serviceAccountCredential = Get-Credential -Message "Enter credential for federation service acct"
12 |
13 | # instead, construct PSCredential
14 | $password = ConvertTo-SecureString -string "ThisIsMyP4ss!" -AsPlainText -force
15 | $installationCredential = New-Object System.Management.Automation.PSCredential("$theDomain\Administrator",$password)
16 | $serviceAccountCredential = New-Object System.Management.Automation.PSCredential("$theDomain\Administrator",$password) # try 'adfs1' later
17 |
18 | $leafThumbprint=(get-ChildItem -Path 'Cert:\LocalMachine\My' | where-object { $_.Subject -like 'CN=win2k19-adfs1*' }).Thumbprint
19 | write-host "leafThumbprint is $leafThumbprint"
20 |
21 | # going to use our custom cert for: service, token-decrypt, and token-signing
22 | # otherwise we would need to fetch the generated tokens and add to 'Root' store
23 | # get-adfscertificate -certificateType token-signing
24 | # get-adfscertificate -certificateType token-decrypting
25 | Install-AdfsFarm `
26 | -CertificateThumbprint "$leafThumbprint" `
27 | -SigningCertificateThumbprint "$leafThumbprint" `
28 | -DecryptionCertificateThumbprint "$leafThumbprint" `
29 | -Credential $installationCredential `
30 | -FederationServiceDisplayName "My win2k19-adfs1.$userDNSDomain" `
31 | -FederationServiceName "win2k19-adfs1.$userDNSDomain" `
32 | -ServiceAccountCredential $serviceAccountCredential `
33 | -OverwriteConfiguration `
34 | -Confirm:$false
35 |
--------------------------------------------------------------------------------
/sysprep/w2k12/Vagrantfile-powershell.ps1:
--------------------------------------------------------------------------------
1 |
2 | # for linked clones
3 | Vagrant.require_version ">= 1.8"
4 |
5 | vmname = 'clone1'
6 | boxname = 'w2k12base-sysprep-ready'
7 | staticIP = '192.168.1.46'
8 | netbiosName = 'contoso'
9 | domainFQDN = 'contoso.com'
10 |
11 | Vagrant.configure(2) do |config|
12 | config.vm.hostname = "#{vmname}"
13 | config.vm.box = "#{boxname}"
14 |
15 | # must have for Windows to specify OS type
16 | config.vm.guest = :windows
17 |
18 | config.vm.network "public_network", ip: "#{staticIP}", bridge: "eth0"
19 |
20 | # winrm | ssh
21 | config.vm.communicator = "winrm"
22 | #config.ssh.username = "vagrant"
23 | #config.ssh.password = "vagrant"
24 | config.winrm.username = "vagrant"
25 | config.winrm.password = "vagrant"
26 | config.ssh.insert_key = false
27 |
28 | config.vm.synced_folder "/home/fabian/Documents", 'c:\users\Administrator\Documents2'
29 |
30 | # put powershell files unto guest and then execute
31 | config.vm.provision "file", source: "test.ps1", destination: 'c:\users\public\Documents\test.ps1'
32 | config.vm.provision "file", source: "MakeDomainController.ps1", destination: 'c:\users\public\Documents'
33 | config.vm.provision "shell", path: "test.ps1", privileged: true, args: "'1' '2'"
34 |
35 | # virtualbox provider
36 | config.vm.provider "virtualbox" do |v|
37 | v.name = "#{vmname}"
38 | v.gui = true
39 | # use linked clone for faster spinup
40 | v.linked_clone = true
41 | v.customize ["modifyvm", :id, "--memory","1024" ]
42 | v.customize ["modifyvm", :id, "--cpus","1" ]
43 | # dynamically set properties that can be fetched inside guestOS
44 | v.customize ["guestproperty", "set", :id, "myid", :id ]
45 | v.customize ["guestproperty", "set", :id, "myname", "#{vmname}" ]
46 | end
47 |
48 | end
49 |
50 |
51 |
--------------------------------------------------------------------------------
/bash/install-ansible-ubuntu.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 |
4 | sudo apt-get update
5 | sudo apt-get install software-properties-common -y
6 | sudo -E apt-add-repository --yes --update ppa:ansible/ansible
7 | sudo apt-get install ansible -y
8 |
9 | # quick ansible config
10 | cat << EOF > ansible.cfg
11 | [defaults]
12 | inventory = ansible-hosts
13 | host_key_checking = False
14 | EOF
15 |
16 | cat << EOF > ansible-hosts
17 | # aliases
18 | myhost ansible_host=127.0.0.1
19 |
20 | [my_group]
21 | myhost ansible_user=$(whoami)
22 | #myhost ansible_user=ubuntu ansible_password=xxxx ansible_become_pass=xxxxx
23 |
24 | [all:vars]
25 | ansible_python_interpreter=/usr/bin/python3
26 | EOF
27 |
28 | mkdir -p host_vars
29 | cat << EOF > host_vars/myhost
30 | myhostval1: one
31 | myhostval2: two
32 | host_myuser: World
33 | EOF
34 |
35 | cat << EOF > ansible-ping-playbook.yaml
36 | ---
37 | - hosts: all
38 | gather_facts: false
39 | vars:
40 | playbook_myuser: Foo
41 | vars_prompt:
42 | - name: ansible_ssh_pass
43 | prompt: "For the localhost test, what is the password for user $(whoami) ?"
44 | private: no
45 | tasks:
46 | - name: do ping
47 | action: ping
48 | - name: show variable
49 | debug:
50 | msg: hello, I'm pulling {{host_myuser}} from host level variables, and {{playbook_myuser}} from playbook level variables
51 | EOF
52 |
53 | touch run-ansible-playbook.sh
54 | chmod +x run-ansible-playbook.sh
55 | cat << EOF > run-ansible-playbook.sh
56 | ansible-playbook ansible-ping-playbook.yaml -v
57 | EOF
58 |
59 | touch run-ansible-ping.sh
60 | chmod +x run-ansible-ping.sh
61 | cat << EOF > run-ansible-ping.sh
62 | ansible -m ping all
63 | EOF
64 |
65 | echo ""
66 | echo ""
67 | echo "First run:"
68 | echo "./run-ansible-playbook.sh"
69 | echo ""
70 | echo "Then run:"
71 | echo "./run-ansible-ping.sh"
72 |
--------------------------------------------------------------------------------
/kubectl/kustomize-secret/my-tls.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCddiXx92uXW7qK
3 | JI2P3AzbvcbXpcSHe7psoe6+nQwWS0QlVo1b2fKZGoegUkjdSZ2u+VpkQSeBG//U
4 | StKZP807Yg4eEsRUXrJcJMSGmyv2K0EPt0VQerLf1rKqkA8oL7AgVno/ft06Q6nb
5 | QrWoZQIwEKHlFOHGtQvyU/EdHiQwnvg3J5aw806hkfWw0wACjnXVGC5T5H4Pb6gX
6 | mU3J38IGnwSD3p8ZGBmgaj0ciuMQDCXicdfBRqBkc24zYHOc1/CSAju9Vs8gn8j/
7 | lscBUrBcvRtx6HeB/Y/Kkv3JSbAHPMSnRJO5yJk0Rjv0b5kPgkD7rN8WkVl7eRZ2
8 | uX7UnaP7AgMBAAECggEASS6PJi0jAR6tzOR+Mp1IEJzcHH+7nmg/1ZtRNaAR7Q6g
9 | JmZanpTl7oho/ncfzFEZiyfy/eNWM+hKd2UrCfSgMvg4rXt1aez1ImQoPpNB1FB4
10 | FZDNE0FjfyrusNzxXnEDf5QRjw8sTJcEC5aujQwc/y/Lvod9A3hDjCW9SxYmJb4R
11 | 2L4quAO70/TSF4QCAe9uXTyhVaeAnxB0w8O0T28rGZFXuYpTr7iqe+3gbIak8tRV
12 | ZX9q/4QXOHPrXYaawsSSdpjsZmXXZfrzDJnvVbHHbU8nbmF5FNSZBQEfcFe5Tj/U
13 | AUDWomXevmTe/hCtwEY9bKXM623SbWz0r7uDx1DIwQKBgQDN+iINe84avtUVOsqS
14 | tDIesKhASJW+9Sc7Bmxi9s3d7rtmFGkis1ZQH8+Wbkp1VwSUVvGT7/JT3Sv2pbPa
15 | tPt6+uS7EF0Kwa7ONjZns7rui8wR09LSdKoLM6JTyU14mSFMH/1G5EznO2QhcB31
16 | HkFFY2z0qKRmWncBuQspa+KyGwKBgQDDs70MssAYT/XaJXKKdEd6MWFtR/lxyF6h
17 | /5BHCkbPp7uIwrL0z9JkphyGzndqtbuHaQSX6F2uS9TNetBw69V0zH+4cD2v/9BE
18 | pJJ9g0osAmPh0zHlyGbTKzttzSBzyw3g0nof4iUXcL8YNun8fJrBoM/tG8vv5kF6
19 | Hx2Ow7TzoQKBgEs9yBqs0wkqFKSAqWcVtAlJ9uwkl6791qrVGdq81U7w2jvb0NwA
20 | LtNuC2KkP0bF1sJpUC3+RMMl091xglWuLv8pj0xWGebIAZcKbq1LFkn1f0Iz4ptG
21 | Sqd04SNCMa6QIX2xYc/3ra/6HgWo8wiLWMSEIVTuFb/d5AMZGV60LkdpAoGBAIab
22 | 3M8dLFAMv9hapZxiOr/rs03NkeAXqC/Zb03N3cL7Zly9PwZw5BBiyYBYB3+YnPKY
23 | 45XxOG6orijf7vuc22fJOZiNa4DhfkmYwUIfyibXRgiQkeOxojhhpjDyRP4gjt0z
24 | KNlBXa3v2SE4tFz9AB1rFVnMIsxSHnt3Z4dJSe/BAoGBAI2t0eM3Xy7/8rT9cSiO
25 | wRhWqWfD2D4iXVf2RhaLg8oc6ihztK5Y/eOVJpPAozMU+w14le0NYwp8luAPXLL1
26 | hAjNUSRO9Ujxhws8kxfCJiQptM45pxiG20ZVZg8vSjEX8udHxe3DfRjMxqYFuYGe
27 | h7ebMfu06yuz0iEVNtzDmjzf
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/golang/echoservice/echoservice.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "strings"
5 | "fmt"
6 | "log"
7 | "net"
8 | "net/http"
9 | "github.com/gorilla/mux"
10 | )
11 |
12 | func main() {
13 |
14 | router := mux.NewRouter().StrictSlash(true)
15 | router.HandleFunc("/hello/{name}", hello).Methods("GET")
16 |
17 | // want to start server, BUT
18 | // not on loopback or internal "10.x.x.x" network
19 | DoesNotStartWith := "10."
20 | IP := GetLocalIP(DoesNotStartWith)
21 |
22 | // start listening server
23 | log.Printf("creating listener on %s:%d",IP,8080)
24 | log.Fatal(http.ListenAndServe(fmt.Sprintf("%s:8080",IP), router))
25 | }
26 |
27 | func hello(w http.ResponseWriter, r *http.Request) {
28 | log.Println("Responding to /hello request")
29 | log.Println(r.UserAgent())
30 |
31 | // request variables
32 | vars := mux.Vars(r)
33 | log.Println("request:",vars)
34 |
35 | // query string parameters
36 | rvars := r.URL.Query()
37 | log.Println("query string",rvars)
38 |
39 | name := vars["name"]
40 | if name == "" {
41 | name = "world"
42 | }
43 |
44 | w.WriteHeader(http.StatusOK)
45 | fmt.Fprintf(w, "Hello %s\n", name)
46 | }
47 |
48 | // GetLocalIP returns the non loopback local IP of the host
49 | // http://stackoverflow.com/questions/23558425/how-do-i-get-the-local-ip-address-in-go
50 | func GetLocalIP(DoesNotStartWith string) string {
51 | addrs, err := net.InterfaceAddrs()
52 | if err != nil {
53 | return ""
54 | }
55 | for _, address := range addrs {
56 | // check the address type and if it is not a loopback the display it
57 | if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() && !strings.HasPrefix(ipnet.IP.String(),DoesNotStartWith) {
58 | if ipnet.IP.To4() != nil {
59 | return ipnet.IP.String()
60 | }
61 | }
62 | }
63 | return ""
64 | }
65 |
--------------------------------------------------------------------------------
/k8s/tiny-tools-nodeselector-tolerations.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: tiny-tools
6 | # not going to specify namespace, so that 'default' is implied
7 | # it can be overridden at command line if necessary
8 | # kubectl apply -f tiny-tools-per-node.yaml -n kube-system
9 | #namespace: default
10 | spec:
11 | selector:
12 | matchLabels:
13 | app: tiny-tools
14 |
15 | # can scale across all nodes later
16 | replicas: 1
17 |
18 | template:
19 | metadata:
20 | labels:
21 | app: tiny-tools
22 | annotations:
23 | # to avoid istio sidecar auto-injection (if applicable)
24 | sidecar.istio.io/inject: "false"
25 | spec:
26 |
27 | # place on node with this label
28 | nodeSelector:
29 | purpose: batch
30 |
31 | # allow on node with this taint
32 | tolerations:
33 | - key: processingtype
34 | #operator: Exists
35 | operator: Equal
36 | value: batch
37 | effect: NoSchedule
38 |
39 | containers:
40 | - name: tiny-tools
41 | # https://github.com/giantswarm/tiny-tools
42 | # https://hub.docker.com/r/giantswarm/tiny-tools/
43 | image: giantswarm/tiny-tools:3.12
44 | args:
45 | - /bin/sh
46 | - -c
47 | - touch /tmp/healthy; date; echo "initializing..."; sleep 30d
48 | livenessProbe:
49 | exec:
50 | command:
51 | - cat
52 | - /tmp/healthy
53 | initialDelaySeconds: 5
54 | periodSeconds: 10
55 | readinessProbe:
56 | exec:
57 | command:
58 | - cat
59 | - /tmp/healthy
60 | initialDelaySeconds: 5
61 | periodSeconds: 10
62 | # this is the only supported value for 'Deployment'
63 | restartPolicy: Always
64 |
--------------------------------------------------------------------------------
/ruby-logstash.conf:
--------------------------------------------------------------------------------
1 | input {
2 | stdin { }
3 | }
4 |
5 | filter {
6 |
7 | grok {
8 | match => { "message" => "%{DATA:justtime} %{DATA:logsource} %{GREEDYDATA:msg}" }
9 | } #
10 |
11 | # incorrectly autopopulates to first day of year
12 | date {
13 | match => [ "justtime", "HH:mm.ss" ]
14 | target => "incorrectfulldatetime"
15 | timezone => "America/Los_Angeles"
16 | } # date
17 |
18 | # use ruby to augment with current day
19 | ruby {
20 | code => "
21 | event['fulldatetime'] = Time.now.strftime('%Y-%m-%d') + ' ' + event['justtime']
22 | "
23 | }
24 | date {
25 | match => [ "fulldatetime", "YYYY-MM-dd HH:mm.ss" ]
26 | # target => "correctfulldatetime"
27 | target => "@timestamp"
28 | timezone => "America/Los_Angeles"
29 | } # date
30 |
31 |
32 |
33 | # split apart log source to extract service name
34 | ruby {
35 | code => "
36 | fpath = event['logsource'].split('/')
37 | event['serviceName'] = fpath[fpath.length-2].downcase
38 | "
39 | }
40 |
41 |
42 | # append msg field to disk
43 | ruby {
44 | code => "
45 | File.open('/tmp/mydebug.log','a') { |f| f.puts event['msg'] }
46 | "
47 | }
48 |
49 |
50 | # call out to REST based echo service
51 | ruby {
52 | init => "
53 | require 'net/http'
54 | require 'json'
55 | "
56 | code => "
57 | firstWord = event['msg'].split(' ')[0]
58 | uri = URI.parse('http://echo.jsontest.com/word/' + firstWord)
59 | response = Net::HTTP.get_response(uri)
60 | if response.code == '200'
61 | result = JSON.parse(response.body)
62 | returnWord = result['word']
63 | event['echo'] = firstWord + ' echoed back as: ' + returnWord
64 | else
65 | event['echo'] = 'ERROR reaching web service'
66 | end
67 | "
68 | }
69 |
70 |
71 | } # filter
72 |
73 | output {
74 | stdout { codec => rubydebug }
75 | }
76 |
77 |
--------------------------------------------------------------------------------
/ansible/playbook-211-213-changed.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Shows change in Ansible 2.13 in evaluation of loop expression
3 | #
4 | # Show it working in Ansible 2.11 (2.12 works also)
5 | #mkdir ansible-test && cd $_
6 | #python3 -m venv .
7 | #. bin/activate
8 | #pip install ansible-core==2.11.0
9 | #ansible --version | head -n1
10 | #ansible-playbook playbook-211-213-changed.yml
11 | #
12 | # Move to Ansible 2.13 and it breaks
13 | #pip install ansible-core==2.13.9
14 | #ansible --version | head -n1
15 | #ansible-playbook playbook-211-213-changed.yml
16 | #
17 | #deactivate; cd ..
18 | #rm -fr ansible-test
19 | ---
20 |
21 | - hosts: localhost
22 | connection: local
23 | become: no
24 | gather_facts: no
25 |
26 | vars:
27 | myarray:
28 | - one
29 | - two
30 | myatomicvalue: three
31 |
32 | tasks:
33 |
34 | - set_fact:
35 | # if you using this type of syntax to add to an array, it will fail at Ansible 2.13
36 | group_list_211: "['{{myatomicvalue}}'] + {{myarray}}"
37 | # this syntax works for both 2.11 and 2.13
38 | group_list_213: "{{ [myatomicvalue] + myarray|default([]) }}"
39 | - debug:
40 | msg: |
41 | group_list_211 = {{group_list_211}}
42 | group_list_213 = {{group_list_213}}
43 | - pause:
44 |
45 | - name: loop works with ansible 2.11-2.13 using fact and loop
46 | debug:
47 | msg: "{{item}}"
48 | loop: "{{group_list_213}}"
49 |
50 | - name: loop works with ansible 2.11-2.13 using list evaluated in loop
51 | debug:
52 | msg: "{{item}}"
53 | loop: "{{ [myatomicvalue] + myarray|default([]) }}"
54 |
55 |
56 | - name: loop works only with ansible 2.11 using fact and loop
57 | debug:
58 | msg: "{{item}}"
59 | loop: "{{group_list_211}}"
60 |
61 | - name: loop works only with ansible 2.11 using list evaulated in loop
62 | debug:
63 | msg: "{{item}}"
64 | loop: "['{{myatomicvalue}}'] + {{myarray}}"
65 |
--------------------------------------------------------------------------------
/putty/createPuttySessions.ps1:
--------------------------------------------------------------------------------
1 | # Script to prepopulate the saved sessions for PuTTy on Windows
2 | #
3 | # 1. reads in text file where each line is: ,
4 | # 2. uses template registry file to create registry file specific to target host
5 | # 3. imports .reg
6 | #
7 | param($file="puttyhosts.txt",$templateFile="template.reg")
8 |
9 | # ensure list of hosts exists
10 | if( ! (Test-Path $file -PathType Leaf) ) {
11 | write-host "ERROR: did not find file $file"
12 | exit(1)
13 | }
14 | # ensure template registry file exists
15 | if( ! (Test-Path $templateFile -PathType Leaf) ) {
16 | write-host "ERROR: did not find template file $templateFile"
17 | exit(1)
18 | }
19 |
20 | # take template file, merge with properties map
21 | function evalTemplate($file,$map) {
22 | $s = Get-Content $file
23 | foreach($key in $map.keys) {
24 | $s = $s -replace "<<${key}>>", "$($map[$key])"
25 | }
26 | return $s
27 | }
28 |
29 | # not all characters are valid, do substitution
30 | function fixupName($name) {
31 | return $name -replace " ","%20"
32 | }
33 |
34 | #################### MAIN ############################
35 |
36 | foreach($line in Get-Content $file) {
37 | # get putty session name and FQDN from line
38 | $name,$fqdn = $line.split(',',2)
39 | write-output "CREATING PuTTy session name: ${name} FQDN: ${fqdn}"
40 |
41 | # merge with template registry entry to produce one specific for this host
42 | $props = @{
43 | sessionName=fixupName $name
44 | actualHostName="$fqdn"
45 | }
46 | $output = evalTemplate $templateFile $props
47 | Set-Content "${name}.reg" $output
48 |
49 | # use reg.exe to import file into registry
50 | $argsArray = "IMPORT","${name}.reg"
51 | & reg.exe $argsArray
52 | write-host "Registry import on ${name}.reg finished with exit code $LASTEXITCODE"
53 | if($LASTEXITCODE -ne 0) {
54 | write-host "ERROR doing registry import on ${name}.reg"
55 | exit(2)
56 | }
57 | }
58 |
59 |
60 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/windows/make-ip-static.ps1:
--------------------------------------------------------------------------------
1 | #
2 | # Sets IP address to static
3 | #
4 | # https://adamtheautomator.com/powershell-get-ip-address/
5 | # https://docs.microsoft.com/en-us/powershell/module/nettcpip/set-netipaddress?view=windowsserver2022-ps
6 | # https://techexpert.tips/powershell/powershell-configure-static-ip-address/
7 |
8 |
9 | # retrieve network information from what may be dynamically assigned IP
10 | # if not static IP, then PrefixOrigin=Dhcp SuffixOrigin=Dhcp
11 | $interfaceIndex=(Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet).InterfaceIndex
12 | $prefixLen=(Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet).PrefixLength
13 | $interfaceIndexAlternate=(get-netadapter).InterfaceIndex
14 | $ipAddress=(Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet).IPAddress
15 | write-host "The interface index $interfaceIndex/$interfaceIndexAlternate has IP address of $ipAddress"
16 |
17 | $gateway=(Get-NetIPConfiguration | select-object -first 1).IPv4DefaultGateway.NextHop
18 | write-host "default gateway is $gateway"
19 |
20 | $dnsServer=(Get-DnsClientServerAddress -InterfaceIndex $interfaceIndex).ServerAddresses
21 | write-host "DNS server is $dnsServer"
22 |
23 |
24 | $prefixOrigin=(Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet).PrefixOrigin
25 | if ($prefixOrigin -eq "dhcp") {
26 |
27 | # make IP address static
28 | # must remove first, then add new
29 | Remove-NetIPAddress -InterfaceIndex $interfaceIndex -Confirm:$false
30 | New-NetIPAddress -IPAddress $ipAddress -DefaultGateway $gateway -PrefixLength $prefixLen -InterfaceIndex $interfaceIndex
31 |
32 | Set-DNSClientServerAddress -InterfaceIndex $interfaceIndex -ServerAddress $dnsServer
33 |
34 | write-host "===NEW INFO======================================"
35 | Get-NetIPAddress -AddressFamily IPv4 -InterfaceAlias Ethernet
36 |
37 | }else {
38 | write-host "SKIP prefix origin was not dhcp, it was $prefixOrigin so not going to make any changes"
39 | }
40 |
41 |
42 |
--------------------------------------------------------------------------------
/gcloud/create_serviceaccount.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Uses gcloud to create service account, download key, assign IAM roles
4 | #
5 | # blog: https://fabianlee.org/2021/03/17/gcp-creating-gcp-service-account-with-iam-roles-using-gcloud/
6 | #
7 |
8 | if [ $# -eq 0 ]; then
9 | echo "usage: gcpProjectName"
10 | exit 1
11 | fi
12 | project="$1"
13 |
14 | newServiceAccount="mytest1-$project"
15 |
16 | # resolve project name to project id (can be different)
17 | projectId=$(gcloud projects list --filter="name=$project" --format='value(project_id)')
18 | echo "project/projectId=$project/$projectId"
19 | gcloud config set project $projectId
20 |
21 | # check if service account already exists
22 | alreadyExists=$(gcloud iam service-accounts list --filter="name ~ ${newServiceAccount}@" 2>/dev/null | wc -l)
23 | [ $alreadyExists -eq 0 ] || { echo "ABORTING the service account $newServiceAccount already exists!"; exit 0; }
24 |
25 | # create service account
26 | gcloud iam service-accounts create $newServiceAccount --display-name "test account" --project=$projectId
27 | echo "sleeping 30 seconds to allow consistency..."
28 | sleep 30
29 |
30 | # get full email identifier for service account
31 | accountEmail=$(gcloud iam service-accounts list --project=$projectId --filter=$newServiceAccount --format="value(email)")
32 |
33 | # download key
34 | gcloud iam service-accounts keys create $newServiceAccount-$projectId.json --iam-account $accountEmail
35 |
36 | # assign IAM roles
37 | for role in roles/storage.objectViewer roles/storage.objectCreator; do
38 | gcloud projects add-iam-policy-binding $projectId --member=serviceAccount:$accountEmail --role=$role > /dev/null
39 | done
40 |
41 | # show all roles for this service account
42 | echo "**********************************************"
43 | echo "Final roles for $newServiceAccount:"
44 | gcloud projects get-iam-policy $projectId --flatten="bindings[].members" --filter="bindings.members=serviceAccount:$accountEmail" --format="value(bindings.role)"
45 |
46 |
--------------------------------------------------------------------------------