├── LICENSE
├── README.md
├── ansible
├── .bashrc
├── README.md
├── ansible.cfg
├── compose
│ └── ansible.yaml
├── debian
│ ├── bookworm_install_steps.md
│ └── trixie_install_steps.md
├── inventory.md
├── playbooks
│ ├── ansible-alias.yaml
│ ├── ansible-essentials.yaml
│ ├── ansible-function.yaml
│ ├── ansible-kali.yaml
│ ├── ansible-sshd.yaml
│ ├── ansible-ssshd.yaml
│ ├── cache_logrotate.yaml
│ └── update-hosts.yaml
└── ssh.md
├── authentik
└── README.md
├── bind9
└── README.md
├── demo
├── .keep
└── docker-compose.yaml
├── docker
├── README.md
├── compose
│ ├── .bashrc
│ ├── README.md
│ ├── Radarr.yaml
│ ├── Sonarr.yaml
│ ├── ansible.yaml
│ ├── cloudflared.yaml
│ ├── gitlab.yaml
│ ├── homepage.yaml
│ ├── install.md
│ ├── jackett.yaml
│ ├── kali-linux.yaml
│ ├── metasploit-debian.yaml
│ ├── metasploittable2.yaml
│ ├── obsidian.yaml
│ ├── portainer.yaml
│ ├── template.yaml
│ ├── tracker.yaml
│ └── twingate.yaml
├── dockerfile
│ ├── cft.md
│ └── metasploit.md
├── lxc.conf
├── portainer
│ ├── README.md
│ └── docker-compose.yaml
├── source_build.md
└── swarm
│ └── README.md
├── elastic-stack
└── README.md
├── evebox
└── README.md
├── gitlab
├── README.md
├── abstracted_decomposed_classification_pattern_gitlab_CICD_process_chain.md
├── docker_compose.yaml
├── gitlab.rb
└── sign-up_restrictions.md
├── grafana
└── README.md
├── homebrew
├── README.md
└── source_build.md
├── homepage
├── .env.md
├── README.md
├── bookmarks.yaml
├── docker_compose.yaml
├── other_projects.md
├── services.yaml
├── settings.yaml
└── widgets.yaml
├── htop
├── README.md
├── Screenshot_20240730_233312.png
└── source_build.md
├── hypervisor
├── node-cluster
│ ├── README.md
│ ├── fio_script_testing.md
│ └── storage_hw_conf.md
└── proxmox
│ └── README.md
├── kestra
└── README.md
├── lm-sensor
├── README.md
├── format_sensors.sh
└── source_build.md
├── netbird
└── README.md
├── pi-hole
├── Pi-Hole.png
├── README.md
├── docker
│ └── README.md
├── install_steps.md
└── unbound
│ ├── README.md
│ ├── install_steps.md
│ └── unbound_config.md
├── prometheus
└── README.md
├── samba
├── DFS-R
│ ├── README.md
│ ├── crone_rsync.md
│ ├── groups_shares.md
│ └── smb.conf
├── README.md
├── dhcpd.md
├── domain_controller_source_build.md
├── samba.service
├── samba_enable.sh
└── selinux_iptables.md
├── semaphore
└── README.md
├── snort
└── README.md
├── sql
├── README.md
├── lite
│ └── README.md
├── ms
│ └── README.md
└── postgres
│ └── README.md
├── suricata
└── README.md
├── terminal-editor
├── README.md
├── bashrc
│ └── .bashrc
├── key_chain
│ ├── README.md
│ ├── clean_up.md
│ ├── remove_build.sh
│ ├── source_build.md
│ └── update_build.sh
├── oh_my_posh
│ ├── README.md
│ ├── bashrc.md
│ ├── lin_source_build.sh
│ ├── theme_list.md
│ └── win_source_build.ps
├── tmux_terminal
│ ├── .conf
│ ├── README.md
│ ├── cleanup_tmux.sh
│ ├── remove_build.md
│ └── source_build.md
├── warp_terminal
│ ├── .config
│ │ └── starship.toml
│ ├── Night_City_Terminal.png
│ ├── dark_city_bg.jpg
│ └── night_city.yaml
├── wave_terminal
│ └── README.md
└── zsh
│ └── README.md
├── traefik
├── README.md
├── dns_docker-compose.yaml
├── dns_traefik.yaml
├── http_docker_compose.yaml
├── http_traefik.yaml
├── tracker_labels_.yaml
└── traefik_labels.yaml
└── wazuh
├── README.md
├── docker-compose.yml
└── generate-indexer-certs.yml
/README.md:
--------------------------------------------------------------------------------
1 |
2 | 🄷🄾🄼🄴-🄻🄰🄱
3 | 🄲🄾🄻🄻🄴🄲🅃🄸🄾🄽 🄾🄵 🅂🄾🅄🅁🄲🄴 🄲🄾🄼🄿🄸🄻🄴🄳 🄰🄿🄿🅂, 🄿🅁🄾🄹🄴🄲🅃🅂, 🄻🄰🄱🅂, & 🄳🄾🄲🅄🄼🄴🄽🅃🄰🅃🄸🄾🄽
4 |
5 |
--------------------------------------------------------------------------------
/ansible/.bashrc:
--------------------------------------------------------------------------------
1 | # ansible_collection
2 | alias venv='source /ansible/venv/bin/activate'
3 | alias versiona='ansible --version'
4 | alias pinga='ansible all -m ping'
5 | alias playa='ansible-playbook'
6 | alias listhostsa='ansible all --list-hosts'
7 | alias testsyntaxa='ansible-playbook --syntax-check'
8 | alias checkinva='ansible-inventory --list -y'
9 | alias cmdalla='ansible all -a'
10 | alias playbookv='ansible-playbook -v'
11 | alias clearssh='rm -rf home/echo/.ssh/known_hosts'
12 | alias sshlist='ssh-add -l'
13 | alias restarta='sudo systemctl restart ansible'
14 |
15 |
16 | # ansible_function_collection
17 | function ansible_update() {
18 | ansible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/update-hosts.yaml
19 | }
20 |
21 | function ansible_kali() {
22 | ansible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/ansible-kali.yaml
23 | }
24 |
25 | function ansible_sshd() {
26 | asnible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/ansible-sshd.yaml
27 | }
28 |
--------------------------------------------------------------------------------
/ansible/README.md:
--------------------------------------------------------------------------------
1 | ## Ansible Wiki:
2 |
3 | **Ansible is an open-source automation tool that simplifies the management of IT infrastructure by automating tasks like configuration management, application deployment, and orchestration**. It’s agentless, meaning it operates over SSH without needing any special software installed on the managed nodes.
4 |
5 | **Ansible is written in Python and uses simple YAML-based playbooks to define tasks, making it accessible for both seasoned professionals and those new to automation**. Licensed under the GNU General Public License (`GPLv3`), Ansible has a strong community and is widely adopted in the DevOps world.
6 |
7 | **Ansible stands out due to its simplicity and powerful features like idempotency, which ensures that tasks are only executed when necessary, avoiding unnecessary changes**. Its modular architecture supports a wide range of integrations and allows users to create custom modules in various languages. Ansible’s extensive library of pre-built modules makes it highly adaptable for different environments, whether on-premises, in the cloud, or in hybrid setups.
8 |
9 | ## Security & Compliance:
10 |
11 | - ***Agentless Architecture*** - Since Ansible is agentless, it reduces the attack surface, as there’s no need to install additional software on managed nodes.
12 | - ***Role-Based Access Control*** - (`RBAC`): Ansible Tower, the enterprise version, provides RBAC to ensure that only authorized users can execute playbooks and make changes to the infrastructure.
13 | - ***Playbook Auditing*** - Every task executed by Ansible is logged, which is crucial for auditing and ensuring compliance with various security standards.
14 |
15 | ## Important Note:
16 |
17 | - (`Do NOT`) overlook the importance of testing playbooks in a controlled environment before deploying them in production. Unverified changes can lead to unintended disruptions. Always ensure that sensitive data, like passwords and API keys, are securely stored using Ansible Vault to avoid accidental exposure.
18 |
19 | ## Key Features:
20 |
21 | - ***Idempotency*** - Ansible ensures that tasks only apply changes when necessary, making it safe and predictable to run playbooks multiple times.
22 | - ***Playbooks*** - Use simple YAML files to define tasks, making automation scripts easy to write, read, and share.
23 | - ***Extensibility*** - Ansible's modular architecture allows for the creation of custom modules and plugins, supporting a wide range of environments.
24 | - ***Integration*** -Seamlessly integrates with popular cloud providers, CI/CD tools, and other DevOps platforms.
25 |
26 | ## Best Practices:
27 |
28 | - ***Test in Staging*** - Always test playbooks in a staging environment before rolling them out to production.
29 | - ***Use Ansible Vault*** - Securely store sensitive information like passwords and API keys using Ansible Vault.
30 | - ***Modular Playbooks*** - Break down complex tasks into smaller, reusable playbooks to enhance readability and maintainability.
31 | - ***Continuous Learning*** - Stay updated with the latest Ansible releases and best practices to ensure your automation processes are efficient and secure.
32 |
33 |
34 | ##
35 | > One cool thing about Ansible is its agentless architecture. Unlike many other automation tools that require agents to be installed on each managed node, Ansible operates over SSH, requiring no special software on the remote machines. This greatly simplifies the setup and reduces the overhead, making Ansible a lightweight yet powerful solution for automating tasks across diverse environments. This feature is particularly valuable in environments with strict security requirements, where minimizing installed software is crucial.
36 |
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | error_on_undefined_vars = True
3 | retry_files_enabled = False
4 | host_key_checking = False
5 | log_path = ansible.log
6 | stdout_callback = yaml
7 | remote_user = ansible
8 | inventory = inventory
9 | roles_path = roles
10 | become_method = sudo
11 | become_user = root
12 | become = True
13 | forks = 10
14 |
15 | [ssh_connection]
16 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s
17 | pipelining = True
18 | timeout = 10
19 |
--------------------------------------------------------------------------------
/ansible/compose/ansible.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | ansible:
4 | image: python:3.12-slim
5 | container_name: ansible
6 | volumes:
7 | - /compose/playbooks:/ansible/playbooks
8 | environment:
9 | - PUID=1000
10 | - PGID=1000
11 | - TZ=America/Calgary
12 | command: /bin/bash -c "pip install ansible && tail -f /dev/null"
13 | restart: unless-stopped
14 |
--------------------------------------------------------------------------------
/ansible/debian/bookworm_install_steps.md:
--------------------------------------------------------------------------------
1 | ***Path + Permissions + Bashrc***
2 |
3 | ```sh
4 | 1) mkdir /ansible
5 | 2) sudo chown echo:echo /ansible **amake sure user in sudoers group**
6 | 3) export ANSIBLE_HOME=/ansible **set ANSIBLE_HOME environment variable**
7 | 4) chmod 740 /ansible **lock to user or group**
8 | 5) export PATH=$PATH:$ANSIBLE_HOME/bin
9 | ```
10 |
11 | ***Dependecies + Codename + PPA***
12 |
13 | ```sh
14 | 1) sudo apt install wget gpg
15 | 2) UBUNTU_CODENAME=jammy **on Trixie 13 this will have to do**
16 | 3) wget -O- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x6125E2A8C77F2818F7BD15B93C4A3FD7BB9C367" | sudo gpg --dearmor -o /usr/share/keyrings/ansible-archive-keyring.gpg **PPA Key**
17 | 4) echo "deb [signed-by=/usr/share/keyrings/ansible-archive-keyring.gpg] http://ppa.launchpad.net/ansible/ansible/ubuntu $UBUNTU_CODENAME main" | sudo tee /etc/apt/sources.list.d/ansible.list ** PPA Resource**
18 | 5) sudo apt update && sudo apt install ansible
19 | 6) ansible --version **[core 2.17.3]**
20 | ```
21 |
--------------------------------------------------------------------------------
/ansible/debian/trixie_install_steps.md:
--------------------------------------------------------------------------------
1 | ***Package Isolation + Python Virtual Environment + Source List***
2 | ```sh
3 | 1) sudo apt install python3 python3-pip -y **you can compile your own or package**
4 | 2) sudo apt install python3-venv **Virtual Environment**
5 | 3) sudo mkdir /ansible ** or in /opt if you prefer**
6 | 4) chmod 740 /ansible **lock to user or group**
7 | 5) sudo chown -R echo:echo /ansible
8 | 6) python3 -m venv /ansible/venv **Create the env**
9 | 7) sudo chown -R echo:echo /ansible/venv **to install packages**
10 | 8) export PATH=$PATH:/ansible/bin
11 | 9) source /ansible/venv/bin/activate **Activate the env**
12 | 10) pip install ansible
13 | 11) pip install --upgrade pip setuptools **if not updated**
14 | 12) ansible --version **[core 2.17.3]**
15 | 13) python --version **Python 3.12.6**
16 | ```
17 |
18 | ***Updating***
19 | ```sh
20 | 1) source /ansible/venv/bin/activate **needed each time**
21 | 2) pip install --upgrade ansible
22 | 3) deactivate
23 | ```
24 |
--------------------------------------------------------------------------------
/ansible/inventory.md:
--------------------------------------------------------------------------------
1 | ***host List***
2 |
3 | ```sh
4 | [master]
5 | HS01.alprojects.tech ansible_python_interpreter=/usr/bin/python3
6 |
7 | [node]
8 | HS02.alprojects.tech ansible_python_interpreter=/usr/bin/python3
9 |
10 | [master:vars]
11 | ansible_ssh_user=root
12 | ansible_ssh_private_key_file=~/.ssh/id_ed25519
13 |
14 | [node:vars]
15 | ansible_ssh_user=root
16 | ansible_ssh_private_key_file=~/.ssh/id_ed25519
17 | ```
18 |
--------------------------------------------------------------------------------
/ansible/playbooks/ansible-alias.yaml:
--------------------------------------------------------------------------------
1 | # I been manually keeping a list and copy and pasting into my bashrc, its about time
2 |
3 | ---
4 | - name: custom aliases
5 | hosts: all
6 | become: true # Ensure we are using root for this operation
7 | tasks:
8 | - name: Add alias block to .bashrc
9 | ansible.builtin.blockinfile:
10 | path: "{{ ansible_env.HOME }}/.bashrc"
11 | block: |
12 | # Custom Aliases
13 |
14 | # package_collection
15 | alias update='sudo apt-get update && sudo apt-get upgrade -y'
16 | alias clean='sudo apt autoremove && sudo apt autoclean -y'
17 |
18 | # docker_collection
19 | alias dcu='docker compose up -d'
20 | alias dcb='docker compose build'
21 | alias dcd='docker compose down'
22 | alias dcl='docker compose logs'
23 | alias dps='docker compose ps'
24 | alias dce='docker compose exec'
25 | alias dnet='docker network ls'
26 | alias dclf='docker compose logs -f'
27 | alias dneti='docker network inspect'
28 | alias swarmkill='docker kill $(docker ps -q)'
29 |
30 | # ansible_collection
31 | alias Ansible='source /ansible/venv/bin/activate'
32 | alias versiona='ansible --version'
33 | alias pinga='ansible all -m ping'
34 | alias playa='ansible-playbook'
35 | alias listhostsa='ansible all --list-hosts'
36 | alias testsyntaxa='ansible-playbook --syntax-check'
37 | alias checkinva='ansible-inventory --list -y'
38 | alias cmdalla='ansible all -a'
39 | alias playbookv='ansible-playbook -v'
40 | alias clearssh='rm -rf home/echo/.ssh/known_hosts'
41 | alias sshlist='ssh-add -l'
42 | alias restarta='sudo systemctl restart ansible'
43 |
44 | # find_listing
45 | alias find='find . -name'
46 | alias findit='find . -name 2>/dev/null'
47 |
48 | # directory_change
49 | alias home='cd ~'
50 | alias ..='cd ..'
51 | alias ...='cd ../..'
52 | alias ....='cd ../../../../../../../../../../../../../..'
53 | alias compose='cd ../../../compose'
54 |
55 | # directory_list
56 | alias ll='ls -ali'
57 | alias la='ls -A'
58 | alias l='ls -CF'
59 |
60 | # misc
61 | alias rm='rm -i'
62 | alias rmd='sudo rm -rf'
63 | alias cls='clear'
64 |
65 | alias df='df -h'
66 | alias cms='free -m -l -t'
67 | alias dms='sudo ./scripts/clean_memory.sh'
68 |
69 | alias ping='ping -c 5'
70 | alias netstat='netstat -tuln'
71 |
72 | - name: Reload bash to apply aliases
73 | ansible.builtin.shell: "source {{ ansible_env.HOME }}/.bashrc"
74 | args:
75 | executable: /bin/bash
76 |
--------------------------------------------------------------------------------
/ansible/playbooks/ansible-essentials.yaml:
--------------------------------------------------------------------------------
1 | #Automation for install my basics
2 | ---
3 | - name: Install essential tools and utilities
4 | hosts: all
5 | become: true
6 | tasks:
7 | - name: Install git, curl, net-tools, htop, vim, wget, zip, and unzip
8 | ansible.builtin.package:
9 | name:
10 | - git
11 | - curl
12 | - net-tools
13 | - htop
14 | - vim
15 | - wget
16 | - zip
17 | - unzip
18 | state: present
19 |
--------------------------------------------------------------------------------
/ansible/playbooks/ansible-function.yaml:
--------------------------------------------------------------------------------
1 | # Collection of handy function to use
2 |
3 | ---
4 | - name: Custom functions on all hosts
5 | hosts: all
6 | become: true
7 | tasks:
8 | - name: Add function block to .bashrc
9 | ansible.builtin.blockinfile:
10 | path: "{{ ansible_env.HOME }}/.bashrc"
11 | block: |
12 | # Custom Functions
13 |
14 | # IT function collection:
15 | huntit() {
16 | sudo find / -name "$1*" 2>/dev/null
17 | }
18 |
19 | killit() {
20 | pkill -f "$1"
21 | }
22 |
23 | backit() {
24 | src="$1"
25 | dest="$2"
26 | filename=$(basename "$src")
27 |
28 | if [ -z "$src" ] || [ -z "$dest" ]; then
29 | echo "Usage: backit /path/to/source /path/to/backup/destination"
30 | return 1
31 | fi
32 |
33 | tar -czvf "$dest/$filename-$(date +%Y%m%d%H%M%S).tar.gz" "$src"
34 | }
35 |
36 | sendit() {
37 | src="$1"
38 | dest="$2"
39 |
40 | if [ -z "$src" ] || [ -z "$dest" ]; then
41 | echo "Usage: sendit /path/to/source user@remote:/path/to/destination"
42 | return 1
43 | fi
44 |
45 | rsync -avh --progress "$src" "$dest"
46 | }
47 |
48 | # Compose function collection
49 | function compose_ip() {
50 | container=$1
51 | docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $container
52 | }
53 |
54 | function compose_shell() {
55 | container=$1
56 | shell_command=${2:-/bin/sh} # Default to /bin/sh if no second argument is provided
57 | docker exec -it $(docker ps -qf "name=$container") $shell_command
58 | }
59 |
60 | function compose_inspect() {
61 | container=$1
62 | docker inspect $(docker ps -qf "name=$container")
63 | }
64 |
65 | # Ansible function collection
66 | function ansible_update() {
67 | ansible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/update-hosts.yaml
68 | }
69 |
70 | function ansible_kali() {
71 | ansible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/ansible-kali.yaml
72 | }
73 |
74 | function ansible_sshd() {
75 | ansible-playbook -i /ansible/venv/inventory /ansible/venv/playbooks/ansible-sshd.yaml
76 | }
77 |
78 | - name: Reload bash to apply functions
79 | ansible.builtin.shell: "source {{ ansible_env.HOME }}/.bashrc"
80 | args:
81 | executable: /bin/bash
82 |
--------------------------------------------------------------------------------
/ansible/playbooks/ansible-kali.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy via Docker Compose
3 | hosts: localhost
4 | tasks:
5 | - name: Ensure the /compose/kali directory exists
6 | file:
7 | path: /compose/kali
8 | state: directory
9 | mode: '0755'
10 |
11 | - name: Create Compose file for Kali
12 | copy:
13 | dest: /compose/kali/docker-compose.yml
14 | content: |
15 | ---
16 | services:
17 | kali-linux:
18 | image: lscr.io/linuxserver/kali-linux:latest
19 | container_name: kali-linux
20 | ports:
21 | - 3004:3004
22 | - 3002:3001
23 | volumes:
24 | - /compose/kali/data:/config
25 | - /tmp/.X11-unix:/tmp/.X11-unix
26 | - /var/run/docker.sock:/var/run/docker.sock:ro
27 | devices:
28 | - /dev/dri:/dev/dri
29 | shm_size: "1gb"
30 | security_opt:
31 | - seccomp=unconfined
32 | - no-new-privileges=false
33 | environment:
34 | - PUID=1000
35 | - PGID=1000
36 | - TZ=America/Calgary
37 | #- SUBFOLDER=/kali
38 | - TITLE="Gizmo Linux"
39 | healthcheck:
40 | test: ["CMD-SHELL", "curl --fail http://localhost:3001 || exit 0"]
41 | interval: 40s
42 | timeout: 10s
43 | retries: 3
44 | restart: unless-stopped
45 |
46 | - name: Run docker-compose up
47 | shell: |
48 | docker-compose -f /compose/kali/docker-compose.yml up -d
49 | args:
50 | chdir: /compose/kali
51 |
52 |
--------------------------------------------------------------------------------
/ansible/playbooks/ansible-sshd.yaml:
--------------------------------------------------------------------------------
1 | # best way I do this is create a multiple auto_sshd for different groups, and than write a function in bash_rc to be basic hotkey VERY long Ansible commands
2 |
3 | ---
4 | - name: Configure SSHD on all hosts
5 | hosts: all
6 | become: true
7 | tasks:
8 | - name: Ensure SSH is listening on the correct port
9 | ansible.builtin.lineinfile:
10 | path: /etc/ssh/sshd_config
11 | regexp: '^Port'
12 | line: 'Port {{ ansible_port | default(PORT) }}'
13 | state: present
14 |
15 | - name: Allow root login
16 | ansible.builtin.lineinfile:
17 | path: /etc/ssh/sshd_config
18 | regexp: '^PermitRootLogin'
19 | line: 'PermitRootLogin yes'
20 | state: present
21 |
22 | - name: Enable public key authentication
23 | ansible.builtin.lineinfile:
24 | path: /etc/ssh/sshd_config
25 | regexp: '^PubkeyAuthentication'
26 | line: 'PubkeyAuthentication yes'
27 | state: present
28 |
29 | - name: Disable password authentication
30 | ansible.builtin.lineinfile:
31 | path: /etc/ssh/sshd_config
32 | regexp: '^PasswordAuthentication'
33 | line: 'PasswordAuthentication no'
34 | state: present
35 |
36 | - name: Disable empty passwords
37 | ansible.builtin.lineinfile:
38 | path: /etc/ssh/sshd_config
39 | regexp: '^PermitEmptyPasswords'
40 | line: 'PermitEmptyPasswords no'
41 | state: present
42 |
43 | - name: Set MaxAuthTries to 1
44 | ansible.builtin.lineinfile:
45 | path: /etc/ssh/sshd_config
46 | regexp: '^MaxAuthTries'
47 | line: 'MaxAuthTries 1'
48 | state: present
49 |
50 | - name: Set MaxSessions to 1
51 | ansible.builtin.lineinfile:
52 | path: /etc/ssh/sshd_config
53 | regexp: '^MaxSessions'
54 | line: 'MaxSessions 1'
55 | state: present
56 |
57 | - name: Set LoginGraceTime to 1m
58 | ansible.builtin.lineinfile:
59 | path: /etc/ssh/sshd_config
60 | regexp: '^LoginGraceTime'
61 | line: 'LoginGraceTime 1m'
62 | state: present
63 |
64 | - name: Set AddressFamily to inet (IPv4 only)
65 | ansible.builtin.lineinfile:
66 | path: /etc/ssh/sshd_config
67 | regexp: '^AddressFamily'
68 | line: 'AddressFamily inet'
69 | state: present
70 |
71 | - name: Set UsePAM to yes
72 | ansible.builtin.lineinfile:
73 | path: /etc/ssh/sshd_config
74 | regexp: '^UsePAM'
75 | line: 'UsePAM yes'
76 | state: present
77 |
78 | - name: Restart SSH service to apply changes
79 | ansible.builtin.service:
80 | name: sshd
81 | state: restarted
82 |
--------------------------------------------------------------------------------
/ansible/playbooks/cache_logrotate.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Logrotate configuration for memory cache cleanup
3 | hosts: all
4 | become: yes
5 |
6 | tasks:
7 | - name: Ensure logrotate is installed
8 | apt:
9 | name: logrotate
10 | state: present
11 |
12 | - name: Ensure clear_mem_cache.log exists
13 | file:
14 | path: /var/log/clear_mem_cache.log
15 | state: touch
16 | owner: root
17 | group: root
18 | mode: '0640'
19 |
20 | - name: Ensure /scripts directory exists
21 | file:
22 | path: /scripts
23 | state: directory
24 | owner: root
25 | group: root
26 | mode: '0740'
27 |
28 | - name: Create logrotate configuration file for clear_mem_cache
29 | copy:
30 | dest: /etc/logrotate.d/clear_mem_cache
31 | content: |
32 | /var/log/clear_mem_cache.log {
33 | weekly
34 | rotate 4
35 | compress
36 | delaycompress
37 | missingok
38 | notifempty
39 | create 640 root root
40 | maxsize 50M
41 | }
42 | owner: root
43 | group: root
44 | mode: '0640'
45 |
46 | - name: Verify logrotate configuration is valid
47 | shell: logrotate -d /etc/logrotate.d/clear_mem_cache
48 | register: logrotate_debug_output
49 | failed_when: "'error' in logrotate_debug_output.stderr"
50 |
51 | - name: Print logrotate debug output
52 | debug:
53 | var: logrotate_debug_output.stdout
54 |
55 | - name: Set up mem-cache clearing script
56 | copy:
57 | dest: /scripts/clear_mem_cache.sh
58 | content: |
59 | #!/bin/bash
60 | LOG_FILE="/var/log/clear_mem_cache.log"
61 |
62 | # Ensure log file exists & has correct permissions
63 | if [ ! -f "$LOG_FILE" ]; then
64 | touch $LOG_FILE
65 | chmod 640 $LOG_FILE
66 | fi
67 |
68 | # Log action with mem-status before & after
69 | echo "$(date '+%Y-%m-%d %H:%M:%S') - Clearing mem cache. Free mem before: $(free -h | grep Mem | awk '{print $4}')" >> $LOG_FILE
70 | sync; echo 3 > /proc/sys/vm/drop_caches
71 | echo "$(date '+%Y-%m-%d %H:%M:%S') - Mem cache cleared successfully. Free mem after: $(free -h | grep Mem | awk '{print $4}')" >> $LOG_FILE
72 | owner: root
73 | group: root
74 | mode: '0740'
75 |
76 | - name: Create systemd service for clear_mem_cache
77 | copy:
78 | dest: /etc/systemd/system/clear_mem_cache.service
79 | content: |
80 | [Unit]
81 | Description=Run memory cache clearing script
82 | After=multi-user.target
83 |
84 | [Service]
85 | Type=oneshot
86 | ExecStart=/scripts/clear_mem_cache.sh
87 | owner: root
88 | group: root
89 | mode: '0640'
90 |
91 | - name: Create systemd timer for clear_mem_cache
92 | copy:
93 | dest: /etc/systemd/system/clear_mem_cache.timer
94 | content: |
95 | [Unit]
96 | Description=Run clear_mem_cache.service periodically
97 |
98 | [Timer]
99 | OnCalendar=weekly
100 | Persistent=true
101 |
102 | [Install]
103 | WantedBy=timers.target
104 | owner: root
105 | group: root
106 | mode: '0640'
107 |
108 | - name: Reload systemd daemon
109 | shell: systemctl daemon-reload
110 |
111 | - name: Enable & start the systemd timer
112 | systemd:
113 | name: clear_mem_cache.timer
114 | enabled: true
115 | state: started
116 |
--------------------------------------------------------------------------------
/ansible/playbooks/update-hosts.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Update Proxmox Hosts
3 | hosts: all
4 | become: true
5 | tasks:
6 | - name: Update package list
7 | apt:
8 | update_cache: yes
9 |
10 | - name: Upgrade packages list
11 | apt:
12 | upgrade: dist
13 | autoremove: yes
14 | autoclean: yes
15 |
--------------------------------------------------------------------------------
/ansible/ssh.md:
--------------------------------------------------------------------------------
1 | ***ED-SSH-PUB***
2 |
3 | ```
4 | 1) ssh-keygen -t ed25519 -C "your_email@example.com"
5 | 2) chmod 700 ~/.ssh
6 | 3) chmod 600 ~/.ssh/id_ed25519
7 | 4) chmod 644 ~/.ssh/id_ed25519.pub
8 | 5) ssh-copy-id -i ~/.ssh/id_ed25519.pub root@HS01.alprojects.tech
9 | 6) ssh-copy-id -i ~/.ssh/id_ed25519.pub root@HS02.alprojects.tech
10 | ```
11 |
12 | ***Passphrase add***
13 | ```
14 | 1) eval $(ssh-agent)
15 | 2) ssh-add ~/.ssh/id_ed25519 **will be prompted to enter the passphrase**
16 | 3) ssh-add -l
17 | ```
18 |
--------------------------------------------------------------------------------
/authentik/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bind9/README.md:
--------------------------------------------------------------------------------
1 | ## BIND9 Wiki:
2 |
3 | **BIND9 (`Berkeley Internet Name Domain`) is a widely-used, open-source Domain Name System (`DNS`) software that enables computers to translate human-friendly domain names into IP addresses**. Developed and maintained by the Internet Systems Consortium (`ISC`).
4 |
5 | **BIND9 has been the de facto standard for DNS services for decades. It supports both authoritative DNS server functions and recursive resolver capabilities, making it a versatile choice for managing DNS in various environments**. BIND9 is licensed under the ISC License, which allows for wide community contributions and customizations.
6 |
7 | BIND9 is particularly known for its robustness and flexibility, supporting advanced features like DNSSEC (`DNS Security Extensions`) to ensure the integrity and authenticity of DNS queries. It can be configured to serve as a primary or secondary server, allowing for redundancy and load balancing. Its modular architecture also allows for extensive customization to meet specific network requirements.
8 |
9 | ## Security & Compliance:
10 |
11 | - ***DNSSEC Support*** - BIND9 offers full support for DNSSEC, ensuring that DNS data is authenticated and tamper-resistant, which is crucial for maintaining trust in DNS responses.
12 | - ***Access Controls*** - BIND9 allows administrators to implement access control lists (`ACLs`) to restrict who can query or modify DNS records, enhancing security and compliance.
13 | - ***Logging & Auditing*** - BIND9 provides detailed logging options, enabling administrators to monitor and audit DNS queries and responses, which is essential for meeting compliance requirements.
14 |
15 | ## Important Note:
16 |
17 | - (`Do NOT`) forget to properly secure BIND9 configurations. Misconfigurations can expose your DNS infrastructure to cache poisoning, (`DDoS`) attacks, or unauthorized access. Always ensure that your DNS zones are signed with DNSSEC, and that access controls are strictly enforced to prevent unauthorized modifications or queries.
18 |
19 | ## Key Features:
20 |
21 | - ***Authoritative DNS Server*** - BIND9 can serve as an authoritative server, providing official answers to DNS queries for domain names it manages.
22 | - ***Recursive DNS Resolver*** - BIND9 can be configured to act as a recursive resolver, processing DNS queries and caching the results to improve performance.
23 | - ***Zone Transfers*** - BIND9 supports zone transfers (AXFR and IXFR) between primary and secondary servers, ensuring DNS data is synchronized and up-to-date across all servers.
24 | - ***Dynamic DNS (`DDNS`)*** - BIND9 allows for the automatic updating of DNS records, making it easier to manage environments with frequently changing IP addresses.
25 |
26 | ## Best Practices:
27 |
28 | - ***Secure Configuration*** - Always configure BIND9 with secure defaults, including enabling DNSSEC, implementing ACLs, and restricting zone transfers to trusted servers.
29 | - ***Regular Updates*** - Keep BIND9 up-to-date with the latest security patches to protect against vulnerabilities.
30 | - ***Monitoring & Logging*** - Regularly monitor BIND9's logs and performance metrics to identify and address any potential issues early.
31 | - ***Backup Configurations*** - Regularly back up BIND9 configurations and DNS zone files to prevent data loss in case of hardware failures or misconfigurations.
32 |
33 | ##
34 | > One cool thing about BIND9 is its support for DNSSEC, which adds an extra layer of security by enabling DNS data to be cryptographically signed. This ensures that the DNS responses you receive are authentic and haven't been tampered with, which is critical in preventing attacks like DNS cache poisoning. DNSSEC is a game-changer in making the internet more secure by guaranteeing that users reach the legitimate websites they intend to visit.
35 |
--------------------------------------------------------------------------------
/demo/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alprojects1/home-lab/cbc1381d77b0c56ccac6ddb8f21576e6af77427c/demo/.keep
--------------------------------------------------------------------------------
/demo/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | # this is just a demo
2 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | ## Docker Wiki:
2 | **Docker is an open-source platform designed to automate the deployment, scaling, and management of applications in containers.** Containers are lightweight, portable, and self-sufficient units that include everything needed to run a piece of software, including the code, runtime, system tools, libraries, and settings.
3 |
4 | Docker was originally developed by **Solomon Hykes as an internal project within dotCloud and released to the public in March 2013 and is licensed under the Apache License Version 2.0.** It has since evolved and become the cornerstone of many modern `DevOps` and continuous integration/continuous deployment (`CI/CD`) processes.rce
5 |
6 | **Docker is open-source however, for commercial use of Docker Engine obtained via Docker Desktop within larger enterprises (`exceeding 250 employees OR with annual revenue surpassing $10 million USD`), a paid subscription is required.** The `CLI` uses Docker APIs to control or interact with the Docker daemon through scripting or direct commands. Many other Docker applications use the underlying `API` and `CLI`. The daemon creates and manages Docker objects, such as images, containers, networks, and volumes.
7 |
8 | ## Security & Compliance Requirements:
9 |
10 | - Ensuring the security of Docker containers involves multiple layers, including the host operating system, Docker daemon configuration, and container configurations. Some key practices are:
11 |
12 | - ***Host Security*** - The security of Docker containers is closely tied to the security of the host operating system. Use a minimal host OS, apply regular updates, and configure it according to security best practices.
13 | - ***Docker Daemon Configuration*** - Secure the Docker daemon by configuring it to use `TLS` for communication, limiting access to the Docker `API`, and running it with the least privileges necessary.
14 | - ***Namespace Isolation*** - Docker uses namespaces to provide isolation between containers. Ensure that containers run with the appropriate namespaces to prevent unauthorized access to resources
15 | - ***Immutable Containers & Regular Audits*** - Build containers to be immutable by minimizing the use of writable layers. This ensures that containers are not modified at runtime, also conduct regular security audits of Docker configurations and containers. Use tools like Docker Bench for Security to check for compliance with best practices.
16 | - ***Network Security*** - Use Docker's network features to create isolated networks for containers. Configure firewalls and network policies to control traffic between containers and external systems.
17 | - ***Compliance*** - Ensure that Docker configurations and practices comply with relevant regulatory requirements (`GDPR, HIPAA, SOC2`) by implementing appropriate security controls and maintaining thorough documentation and audit trails.
18 |
19 | ## Best Practices:
20 |
21 | - When working with Docker, it's crucial to follow best practices to ensure security, efficiency, and maintainability. Best practices are essential for maintaining a secure, efficient, and reliable containerized environment. IT professionals can ensure their Docker deployments are robust and manageable. Here are the key practices:
22 |
23 | - ***Use Official Images*** - Always start with official base images from trusted sources like Docker Hub. This reduces security risks and ensures you are working with well-maintained and regularly updated images. https://www.linuxserver.io/
24 | - ***Minimize Image Size*** - Keep Docker images small by using lightweight base images and multi-stage builds. Smaller images are easier to manage, deploy, and have a smaller attack surface.
25 | - ***Avoid Running as Root*** - Configure containers to run as non-root users. Running as root inside a container can pose significant security risks if the container is compromised.
26 | - ***Regularly Update Images and Dependencies*** - Ensure that base images and dependencies are regularly updated to include the latest security patches and improvements. This helps in mitigating potential vulnerabilities.
27 | - ***Use Volumes for Persistent Storage*** - Use Docker volumes for persistent storage to ensure data is not lost when containers are stopped or removed. Volumes also make it easier to manage and back up data.
28 | ##
29 | > By taking advantage of Docker’s methodologies for shipping, testing, and deploying code quickly, you can significantly reduce the delay between writing code and running it in production.
30 |
--------------------------------------------------------------------------------
/docker/compose/.bashrc:
--------------------------------------------------------------------------------
1 | # compose_collection
2 | alias dcu='docker compose up -d'
3 | alias dcb='docker compose build'
4 | alias dcd='docker compose down'
5 | alias dcl='docker compose logs'
6 | alias dps='docker compose ps'
7 | alias dce='docker compose exec'
8 | alias dnet='docker network ls'
9 | alias dclf='docker compose logs -f'
10 | alias dneti='docker network inspect'
11 | alias swarmkill='docker kill $(docker ps -q)'
12 |
13 | # function_collection
14 | function compose_ip() {
15 | container=$1
16 | docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $container
17 | }
18 |
19 | function compose_shell() {
20 | container=$1
21 | shell_command=${2:-/bin/sh} # Default to /bin/sh if no second argument is provided
22 | docker exec -it $(docker ps -qf "name=$container") $shell_command
23 | }
24 |
25 | function compose_inspect() {
26 | container=$1
27 | docker inspect $(docker ps -qf "name=$container")
28 | }
29 |
30 |
31 |
--------------------------------------------------------------------------------
/docker/compose/README.md:
--------------------------------------------------------------------------------
1 | ## Docker Compose Wiki:
2 |
3 | **Docker Compose is a tool that simplifies the management of multi-container Docker applications. It uses a YAML file to define the services, networks, and volumes needed for your application, allowing you to manage complex environments with a single command**. Docker Compose is written in Go and integrates seamlessly with Docker, making it an essential tool for developers and system administrators who need to manage multiple services in a consistent and repeatable way.
4 |
5 | **Docker Compose allows you to define your entire application stack in a single docker-compose.yml file, which includes the configuration for all the containers, networks, and storage volumes your app needs**. This file can be version-controlled, shared, and reused, making it an excellent tool for both development and production environments.
6 |
7 | ## Security & Compliance:
8 |
9 | - ***Isolated Environments*** - Docker Compose enables you to isolate your services in separate containers, minimizing the risk of cross-service vulnerabilities.
10 | - ***Encrypted Communication*** - Docker Compose supports configuring encrypted communication between services using TLS, enhancing the security of your application stack.
11 | - ***Secret Management*** - Securely manage sensitive data like API keys and passwords by using Docker secrets in your Compose file, ensuring that sensitive information is not exposed.
12 |
13 | ## Important Note:
14 |
15 | - (`Do NOT`) forget to clean up unused containers, networks, and volumes after scaling down or updating your services. Leftover resources can clutter your environment and lead to unexpected behavior. Always use docker-compose down with appropriate flags to ensure a clean shutdown and removal of all related resources.
16 |
17 |
18 | ## Key Features:
19 |
20 | - ***Multi-Container Orchestration*** -Define and manage multi-container Docker applications with a single YAML file.
21 | - ***Service Scaling*** - Easily scale services up or down with a single command, ideal for handling varying workloads.
22 | - ***Network Management*** - Define custom networks to control how containers communicate with each other.
23 | - ***Volume Management*** - Use Docker volumes to persist data, ensuring that it remains intact even when containers are updated or restarted.
24 | - ***Environment Configuration*** - Define environment variables in the Compose file to configure services dynamically.
25 |
26 | # Best Practices:
27 |
28 | - ***Use Named Volumes*** - Always use named volumes for persistent data to ensure consistency across container restarts.
29 | - ***Leverage Docker Secrets*** - Store sensitive information like passwords and API keys securely using Docker secrets.
30 | - ***Modularize Your Compose Files*** - Split complex configurations into multiple Compose files using the extends keyword to enhance readability and maintainability.
31 | - ***Monitor Resource Usage*** - Regularly monitor the resource usage of your containers to avoid performance bottlenecks.
32 | ##
33 | > One cool feature of Docker Compose is its Service Dependency Management. With Docker Compose, you can define service dependencies, ensuring that certain containers wait for others to start before they do. This is particularly useful for applications with complex startup sequences, like databases that need to be ready before the application server starts. Docker Compose’s ability to manage these dependencies automatically makes it an indispensable tool for running reliable, multi-container Docker applications.
34 |
35 |
36 |
--------------------------------------------------------------------------------
/docker/compose/Radarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | radarr:
4 | image: lscr.io/linuxserver/radarr:5.17.2.9580-ls255
5 | container_name: radarr
6 | ports:
7 | - 7878:7878
8 | volumes:
9 | - /compose/radarr/data:/config
10 | - /mnt/iscsi/Movies:/movies
11 | - /mnt/iscsi:/mnt/iscsi
12 | - /mnt/iscsi/Anime-Movies:/anime-movies
13 | environment:
14 | - PUID=1000
15 | - PGID=1000
16 | - TZ=Etc/UTC
17 | healthcheck:
18 | test: ["CMD-SHELL", "curl --fail http://**************:7878 || exit 0"]
19 | interval: 40s
20 | timeout: 10s
21 | retries: 3
22 | restart: unless-stopped
23 |
--------------------------------------------------------------------------------
/docker/compose/Sonarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | sonarr:
4 | image: lscr.io/linuxserver/sonarr:4.0.12.2823-ls267
5 | container_name: sonarr
6 | ports:
7 | - 8989:8989
8 | volumes:
9 | - /compose/sonarr/data:/config
10 | - /mnt/iscsi/Shows:/shows
11 | - /mnt/iscsi:/mnt/iscsi
12 | - /mnt/iscsi/Anime-Shows:/anime-shows
13 | environment:
14 | - PUID=1000
15 | - PGID=1000
16 | - TZ=Etc/UTC
17 | healthcheck:
18 | test: ["CMD-SHELL", "curl --fail http://**************:8989 || exit 0"]
19 | interval: 40s
20 | timeout: 10s
21 | retries: 3
22 | restart: unless-stoppe
23 |
--------------------------------------------------------------------------------
/docker/compose/ansible.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | ansible:
4 | image: python:3.12-slim
5 | container_name: ansible
6 | volumes:
7 | - /compose/playbooks:/ansible/playbooks
8 | environment:
9 | - PUID=1000
10 | - PGID=1000
11 | - TZ=America/Calgary
12 | command: /bin/bash -c "pip install ansible && tail -f /dev/null"
13 | restart: unless-stopped
14 |
--------------------------------------------------------------------------------
/docker/compose/cloudflared.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | argo_tunnel_1:
4 | image: cloudflare/cloudflared:latest
5 | container_name: _argo
6 | command: tunnel --no-autoupdate run --token
7 | environment:
8 | - PUID=1000
9 | - PGID=1000
10 | - TZ=America/Calgary
11 | restart: unless-stopped
12 |
13 | argo_tunnel_2:
14 | image: cloudflare/cloudflared:latest
15 | container_name: _argo
16 | command: tunnel --no-autoupdate run --token
17 | environment:
18 | - PUID=1000
19 | - PGID=1000
20 | - TZ=America/Calgary
21 | restart: unless-stopped
22 |
23 | argo_tunnel_3:
24 | image: cloudflare/cloudflared:latest
25 | container_name: _argo
26 | command: tunnel --no-autoupdate run --token
27 | environment:
28 | - PUID=1000
29 | - PGID=1000
30 | - TZ=America/Calgary
31 | restart: unless-stopped
32 |
33 | argo_tunnel_4:
34 | image: cloudflare/cloudflared:latest
35 | container_name: _argo
36 | command: tunnel --no-autoupdate run --token
37 | environment:
38 | - PUID=1000
39 | - PGID=1000
40 | - TZ=America/Calgary
41 | restart: unless-stopped
42 |
43 | argo_tunnel_5:
44 | image: cloudflare/cloudflared:latest
45 | container_name: _argo
46 | command: tunnel --no-autoupdate run --token
47 | environment:
48 | - PUID=1000
49 | - PGID=1000
50 | - TZ=America/Calgary
51 | restart: unless-stopped
52 |
53 | argo_tunnel_6:
54 | image: cloudflare/cloudflared:latest
55 | container_name: _argo
56 | command: tunnel --no-autoupdate run --token
57 | environment:
58 | - PUID=1000
59 | - PGID=1000
60 | - TZ=America/Calgary
61 | restart: unless-stopped
62 |
63 | argo_tunnel_7:
64 | image: cloudflare/cloudflared:latest
65 | container_name: _argo
66 | command: tunnel --no-autoupdate run --token
67 | environment:
68 | - PUID=1000
69 | - PGID=1000
70 | - TZ=America/Calgary
71 | restart: unless-stopped
72 |
73 | argo_tunnel_8:
74 | image: cloudflare/cloudflared:latest
75 | container_name: _argo
76 | command: tunnel --no-autoupdate run --token
77 | environment:
78 | - PUID=1000
79 | - PGID=1000
80 | - TZ=America/Calgary
81 | restart: unless-stopped
82 |
--------------------------------------------------------------------------------
/docker/compose/gitlab.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | gitlab:
4 | image: gitlab/gitlab-ce:17.5.5-ce.0
5 | container_name: gitlab
6 | ports:
7 | - "3200:22"
8 | - "8929:8929" #Managed by Traefik
9 | - "4435:443" #Managed by Traefik
10 | volumes:
11 | - /compose/gitlab/config:/etc/gitlab
12 | - /compose/gitlab/logs:/var/log/gitlab
13 | - /compose/gitlab/data:/var/opt/gitlab
14 | shm_size: "256m"
15 | healthcheck:
16 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS || exit 0"]
17 | interval: 40s
18 | timeout: 10s
19 | retries: 3
20 | restart: unless-stopped
21 | #labels:
22 | # - "traefik.enable=true"
23 | # - "traefik.http.routers.gitlab-https.rule=Host('gitlab.alprojects.org')"
24 | # - "traefik.http.routers.gitlab-https.entrypoints=asgard"
25 | # - "traefik.http.routers.gitlab-https.tls.certresolver=production"
26 | # - "traefik.http.services.gitlab-service.loadbalancer.server.port=80"
27 | #networks:
28 | # - traefik_default
29 |
30 | #networks:
31 | #traefik_default:
32 | #external: true
33 |
--------------------------------------------------------------------------------
/docker/compose/homepage.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | homepage:
4 | image: ghcr.io/gethomepage/homepage:latest
5 | env_file:
6 | - .env
7 | container_name: homepage
8 | ports:
9 | - 3000:3000
10 | volumes:
11 | - /compose/homepage:/app/config
12 | - /compose/homepage:/app/public/images
13 | - /compose/homepage:/app/public/icons
14 | #- /var/run/docker.sock:/var/run/docker.sock
15 | environment:
16 | PUID: $PUID
17 | PGID: $PGID
18 | restart: unless-stopped
19 |
--------------------------------------------------------------------------------
/docker/compose/install.md:
--------------------------------------------------------------------------------
1 | ### install compose repo once docker engine is install
2 |
3 | ```sh
4 | 1) curl -L "https://github.com/docker/compose/releases/download/$(curl -s https://api.github.com/repos/docker/compose/releases/latest | grep -Po '"tag_name": "\K.*?(?=")')/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
5 | 2) chmod +x /usr/local/bin/docker-compose **repo location**
6 | 3) docker-compose --version **Docker Compose version v2.29.1**
7 |
--------------------------------------------------------------------------------
/docker/compose/jackett.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | jackett:
4 | image: lscr.io/linuxserver/jackett:latest
5 | container_name: jackett
6 | ports:
7 | - 9117:9117
8 | volumes:
9 | - /compose/jackett/data:/config
10 | - /compose/jackett/blackhole:/downloads
11 | environment:
12 | - PUID=1000
13 | - PGID=1000
14 | - TZ=Etc/Toronto
15 | healthcheck:
16 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:8989 || exit 0"]
17 | interval: 40s
18 | timeout: 10s
19 | retries: 3
20 | restart: unless-stopped
21 |
--------------------------------------------------------------------------------
/docker/compose/kali-linux.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | kali-linux:
4 | image: lscr.io/linuxserver/kali-linux:latest
5 | container_name: kali-linux
6 | hostname: KALI01
7 | ports:
8 | - 3004:3004
9 | - 3002:3001
10 | - 5901:5901
11 | volumes:
12 | - /compose/kali/data:/config
13 | - /compose/kali/home:/home/echo
14 | - /tmp/.X11-unix:/tmp/.X11-unix
15 | - /etc/localtime:/etc/localtime:ro
16 | - /compose/kali/etc/hosts:/etc/hosts
17 | - /compose/kali/var/lib/apt:/var/lib/apt
18 | - /compose/kali/var/lib/dpkg:/var/lib/dpkg
19 | - /compose/kali/var/cache/apt:/var/cache/apt
20 | - /compose/kali/etc/hostname:/etc/hostname
21 | - /var/run/docker.sock:/var/run/docker.sock:ro
22 | - /compose/kali/etc/resolv.conf:/etc/resolv.conf
23 | - /compose/kali/data/ssl/cert.pem:/etc/nginx/cert.pem
24 | - /compose/kali/data/ssl/cert.key:/etc/nginx/cert.key
25 | devices:
26 | - /dev/dri:/dev/dri
27 | shm_size: "1gb"
28 | security_opt:
29 | - seccomp=unconfined
30 | - no-new-privileges=false
31 | environment:
32 | - PUID=1000
33 | - PGID=1000
34 | - TZ=America/Calgary
35 | #- SUBFOLDER=/kali
36 | - TITLE="Gizmo_Linux"
37 | healthcheck:
38 | test: ["CMD-SHELL", "curl --fail http://localhost:3001 || exit 0"]
39 | interval: 40s
40 | timeout: 10s
41 | retries: 3
42 | restart: unless-stopped
43 |
--------------------------------------------------------------------------------
/docker/compose/metasploit-debian.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | metasploit:
3 | build: .
4 | container_name: metas2-debian
5 | ports:
6 | - 5432:5432
7 | volumes:
8 | - /compose/metas-debian/data:/data
9 | - /compose/metas-debian/config:/opt/metasploit-framework/config
10 | environment:
11 | - PUID=1000
12 | - PGID=1000
13 | - TZ=America/Calgary
14 | stdin_open: true
15 | tty: true
16 | healthcheck:
17 | test: ["CMD-SHELL", "curl --fail http://127.0.0.1:5432 || exit 0"]
18 | interval: 40s
19 | timeout: 10s
20 | retries: 3
21 | restart: unless-stopped
22 | networks:
23 | - kali_default
24 |
25 | networks:
26 | kali_default:
27 | external: true
28 |
--------------------------------------------------------------------------------
/docker/compose/metasploittable2.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | metasploitable2:
4 | image: tleemcjr/metasploitable2
5 | container_name: metas2
6 | ports:
7 | - 8888:8888
8 | - 8787:80
9 | - 4422:22
10 | volumes:
11 | - /compose/metas/data:/data
12 | - /compose/metas/certs:/usr/local/share/ca-certificates
13 | environment:
14 | - PUID=1000
15 | - PGID=1000
16 | - TZ=America/Calgary
17 | healthcheck:
18 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:80 || exit 0"]
19 | interval: 40s
20 | timeout: 10s
21 | retries: 3
22 | restart: unless-stopped
23 | networks:
24 | - kali_default
25 |
26 | networks:
27 | kali_default:
28 | external: true
29 |
--------------------------------------------------------------------------------
/docker/compose/obsidian.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | obsidian:
4 | image: lscr.io/linuxserver/obsidian:latest
5 | container_name: obsidian
6 | security_opt:
7 | - seccomp:unconfined
8 | ports:
9 | - "33000:3000"
10 | - "33001:3001"
11 | volumes:
12 | - /compose/obsidian/config:/config
13 | - /compose/obsidian/dir:/dir
14 | environment:
15 | - PUID=1000
16 | - PGID=1000
17 | - TZ=America/Calgary
18 | - CUSTOM_PORT=3000
19 | - CUSTOM_HTTPS_PORT=3001
20 | - CUSTOM_USER=USER_NAME_HERE
21 | - PASSWORD=PASSWORD_HERE
22 | - TITLE="Gizmo's Obsidian Secure Notes"
23 | devices:
24 | - /dev/dri:/dev/dri
25 | shm_size: "1gb"
26 | healthcheck:
27 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:33000 || exit 0"]
28 | interval: 40s
29 | timeout: 10s
30 | retries: 3
31 | restart: unless-stopped
32 |
--------------------------------------------------------------------------------
/docker/compose/portainer.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | portainer:
4 | image: portainer/portainer-ce:2.21.5
5 | container_name: portainer
6 | ports:
7 | - 8000:8000
8 | - 9443:9443
9 | # - 9000:9000 # Optional: Open port 9000 for HTTP access
10 | volumes:
11 | - /var/run/docker.sock:/var/run/docker.sock:ro
12 | - /compose/portainer/portainer_data:/data
13 | #healthcheck:
14 | #test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:9000 || exit 0"]
15 | #interval: 40s
16 | #timeout: 10s
17 | #retries: 3
18 | restart: unless-stopped
19 |
--------------------------------------------------------------------------------
/docker/compose/template.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | SERVICE_NAME:
4 | image:
5 | container_name:
6 | ports:
7 | - *:*
8 | volumes:
9 | - /*/*/*:/*
10 | environment:
11 | - PUID=*
12 | - PGID=*
13 | - TZ=America/Calgary
14 | healthcheck:
15 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:8989 || exit 0"]
16 | interval: 40s
17 | timeout: 10s
18 | retries: 3
19 | restart: unless-stopped
20 |
--------------------------------------------------------------------------------
/docker/compose/tracker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | tracker:
4 | image: lscr.io/linuxserver/speedtest-tracker:0.21.2
5 | container_name: speedtest-tracker
6 | ports:
7 | - 8686:80
8 | - 8443:443
9 | volumes:
10 | - /compose/tracker/config:/config
11 | - /compose/tracker/ssl-keys:/config/keys
12 | environment:
13 | - PUID=1000
14 | - PGID=1000
15 | - APP_KEY=KEY
16 | - DB_CONNECTION=sqlite
17 | - SPEEDTEST_SCHEDULE=0 */1 * * *
18 | - PRUNE_RESULTS_OLDER_THAN=30
19 | - CHART_DATETIME_FORMAT=Y-m-d H:i #corrected format from wiki link
20 | - DATETIME_FORMAT=Y-m-d H:i:s #corrected standard format from wiki link
21 | - APP_TIMEZONE=America/New_York
22 | - APP_NAME=MySpeedTracker
23 | - APP_URL=https://speedtest.net
24 | - DISPLAY_TIMEZONE=America/New_York
25 | - CONTENT_WIDTH=7xl
26 | - PUBLIC_DASHBOARD=false
27 | - DASHBOARD_POLLING=60s
28 | - NOTIFICATION_POLLING=60s
29 | - RESULTS_POLLING=false
30 | healthcheck:
31 | test: ["CMD-SHELL", "curl -f http://10.100.100.234:8080 || exit 1"]
32 | interval: 1m30s
33 | timeout: 10s
34 | retries: 3
35 | restart: unless-stopped
36 |
37 |
38 |
--------------------------------------------------------------------------------
/docker/compose/twingate.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | twingate_connector:
4 | image: twingate/connector:1
5 | container_name: twingate_calm-perch
6 | environment:
7 | - TZ=America/Calgary
8 | - TWINGATE_NETWORK=cl0ckwerk
9 | - TWINGATE_ACCESS_TOKEN=
10 | - TWINGATE_REFRESH_TOKEN=
11 | - TWINGATE_LABEL_HOSTNAME=DK03
12 | - TWINGATE_LABEL_DEPLOYED_BY=docker
13 | restart: unless-stopped
14 |
15 | twingate_connector:
16 | image: twingate/connector:2
17 | container_name: twingate_calm-perch
18 | environment:
19 | - TZ=America/Calgary
20 | - TWINGATE_NETWORK=cl0ckwerk
21 | - TWINGATE_ACCESS_TOKEN=
22 | - TWINGATE_REFRESH_TOKEN=
23 | - TWINGATE_LABEL_HOSTNAME=DK03
24 | - TWINGATE_LABEL_DEPLOYED_BY=docker
25 | restart: unless-stopped
26 |
27 | twingate_connector:
28 | image: twingate/connector:2
29 | container_name: twingate_calm-perch
30 | environment:
31 | - TZ=America/Calgary
32 | - TWINGATE_NETWORK=cl0ckwerk
33 | - TWINGATE_ACCESS_TOKEN=
34 | - TWINGATE_REFRESH_TOKEN=
35 | - TWINGATE_LABEL_HOSTNAME=DK03
36 | - TWINGATE_LABEL_DEPLOYED_BY=docker
37 | restart: unless-stopped
38 |
39 | twingate_connector:
40 | image: twingate/connector:2
41 | container_name: twingate_calm-perch
42 | environment:
43 | - TZ=America/Calgary
44 | - TWINGATE_NETWORK=cl0ckwerk
45 | - TWINGATE_ACCESS_TOKEN=
46 | - TWINGATE_REFRESH_TOKEN=
47 | - TWINGATE_LABEL_HOSTNAME=DK03
48 | - TWINGATE_LABEL_DEPLOYED_BY=docker
49 | restart: unless-stopped
50 |
51 | twingate_connector:
52 | image: twingate/connector:3
53 | container_name: twingate_calm-perch
54 | environment:
55 | - TZ=America/Calgary
56 | - TWINGATE_NETWORK=cl0ckwerk
57 | - TWINGATE_ACCESS_TOKEN=
58 | - TWINGATE_REFRESH_TOKEN=
59 | - TWINGATE_LABEL_HOSTNAME=DK03
60 | - TWINGATE_LABEL_DEPLOYED_BY=docker
61 | restart: unless-stopped
62 |
63 | twingate_connector:
64 | image: twingate/connector:4
65 | container_name: twingate_calm-perch
66 | environment:
67 | - TZ=America/Calgary
68 | - TWINGATE_NETWORK=cl0ckwerk
69 | - TWINGATE_ACCESS_TOKEN=
70 | - TWINGATE_REFRESH_TOKEN=
71 | - TWINGATE_LABEL_HOSTNAME=DK03
72 | - TWINGATE_LABEL_DEPLOYED_BY=docker
73 | restart: unless-stopped
74 |
75 | twingate_connector:
76 | image: twingate/connector:5
77 | container_name: twingate_calm-perch
78 | environment:
79 | - TZ=America/Calgary
80 | - TWINGATE_NETWORK=cl0ckwerk
81 | - TWINGATE_ACCESS_TOKEN=
82 | - TWINGATE_REFRESH_TOKEN=
83 | - TWINGATE_LABEL_HOSTNAME=DK03
84 | - TWINGATE_LABEL_DEPLOYED_BY=docker
85 | restart: unless-stopped
86 |
87 |
88 | twingate_connector:
89 | image: twingate/connector:6
90 | container_name: twingate_calm-perch
91 | environment:
92 | - TZ=America/Calgary
93 | - TWINGATE_NETWORK=cl0ckwerk
94 | - TWINGATE_ACCESS_TOKEN=
95 | - TWINGATE_REFRESH_TOKEN=
96 | - TWINGATE_LABEL_HOSTNAME=DK03
97 | - TWINGATE_LABEL_DEPLOYED_BY=docker
98 | restart: unless-stopped
99 |
100 | twingate_connector:
101 | image: twingate/connector:7
102 | container_name: twingate_calm-perch
103 | environment:
104 | - TZ=America/Calgary
105 | - TWINGATE_NETWORK=cl0ckwerk
106 | - TWINGATE_ACCESS_TOKEN=
107 | - TWINGATE_REFRESH_TOKEN=
108 | - TWINGATE_LABEL_HOSTNAME=DK03
109 | - TWINGATE_LABEL_DEPLOYED_BY=docker
110 | restart: unless-stopped
111 |
112 | twingate_connector:
113 | image: twingate/connector:8
114 | container_name: twingate_calm-perch
115 | environment:
116 | - TZ=America/Calgary
117 | - TWINGATE_NETWORK=cl0ckwerk
118 | - TWINGATE_ACCESS_TOKEN=
119 | - TWINGATE_REFRESH_TOKEN=
120 | - TWINGATE_LABEL_HOSTNAME=DK03
121 | - TWINGATE_LABEL_DEPLOYED_BY=docker
122 | restart: unless-stopped
123 |
124 | twingate_connector:
125 | image: twingate/connector:9
126 | container_name: twingate_calm-perch
127 | environment:
128 | - TZ=America/Calgary
129 | - TWINGATE_NETWORK=cl0ckwerk
130 | - TWINGATE_ACCESS_TOKEN=
131 | - TWINGATE_REFRESH_TOKEN=
132 | - TWINGATE_LABEL_HOSTNAME=DK03
133 | - TWINGATE_LABEL_DEPLOYED_BY=docker
134 | restart: unless-stopped
135 |
136 | twingate_connector:
137 | image: twingate/connector:10
138 | container_name: twingate_calm-perch
139 | environment:
140 | - TZ=America/Calgary
141 | - TWINGATE_NETWORK=cl0ckwerk
142 | - TWINGATE_ACCESS_TOKEN=
143 | - TWINGATE_REFRESH_TOKEN=
144 | - TWINGATE_LABEL_HOSTNAME=DK03
145 | - TWINGATE_LABEL_DEPLOYED_BY=docker
146 | restart: unless-stopped
147 |
--------------------------------------------------------------------------------
/docker/dockerfile/cft.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | # alpine base lightweight
3 | FROM debian:bullseye-slim
4 |
5 | # Install dependencies & packages
6 | RUN apt-get update && apt-get install -y curl dpkg git bash \
7 | && rm -rf /var/lib/apt/lists/*
8 |
9 | # install Cloudflared
10 | RUN curl -L --output cloudflared.deb https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb \
11 | && dpkg -i cloudflared.deb
12 |
13 | # CMD + variable secret
14 | CMD ["cloudflared", "tunnel", "--no-autoupdate", "run", "--token", "${CLOUDFLARED_TOKEN}"]
15 |
--------------------------------------------------------------------------------
/docker/dockerfile/metasploit.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | FROM debian:12-slim
3 |
4 | # Set environment variables
5 | ENV USER msf_user
6 | ENV HOME /home/${USER}
7 | ENV SHELL /bin/bash
8 | ENV TZ America/Calgary
9 |
10 | # Update system & install necessary packages
11 | RUN apt-get update && apt-get upgrade -y && \
12 | apt-get install -y \
13 | build-essential \
14 | zlib1g zlib1g-dev \
15 | libxml2 libxml2-dev \
16 | libxslt-dev \
17 | libreadline-dev \
18 | libcurl4-openssl-dev \
19 | git-core \
20 | libssl-dev \
21 | libyaml-dev \
22 | autoconf \
23 | libtool \
24 | ncurses-dev \
25 | bison \
26 | wget \
27 | vim \
28 | curl \
29 | net-tools \
30 | iputils-ping \
31 | nmap \
32 | python3-pip \
33 | postgresql \
34 | postgresql-contrib \
35 | sudo \
36 | tzdata && \
37 | apt-get clean
38 |
39 | # Ensure necessary directories exist & have correct ownership
40 | RUN mkdir -p /etc/sudoers.d /var/run/postgresql /var/lib/postgresql/15/main /opt/logs && \
41 | chown -R postgres:postgres /var/run/postgresql /var/lib/postgresql /opt/logs
42 |
43 | # Add non-root user
44 | RUN useradd -m ${USER} && \
45 | gpasswd -a ${USER} sudo && \
46 | echo "${USER}:msf_pass" | chpasswd && \
47 | echo "${USER} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/${USER} && \
48 | chmod 440 /etc/sudoers.d/${USER}
49 |
50 | # Initialize PostgreSQL database
51 | USER postgres
52 | RUN rm -rf /var/lib/postgresql/15/main/* && \
53 | /usr/lib/postgresql/15/bin/initdb -D /var/lib/postgresql/15/main && \
54 | /usr/lib/postgresql/15/bin/pg_ctl -D /var/lib/postgresql/15/main -l /opt/logs/postgres.log start && \
55 | psql --command "CREATE USER msf_user WITH SUPERUSER PASSWORD 'msf_pass';" && \
56 | createdb --owner=msf_user msf_database && \
57 | /usr/lib/postgresql/15/bin/pg_ctl -D /var/lib/postgresql/15/main stop
58 |
59 | # Configure PostgreSQL to allow remote connections
60 | USER root
61 | RUN echo "host all all 0.0.0.0/0 md5" >> /var/lib/postgresql/15/main/pg_hba.conf && \
62 | echo "listen_addresses='*'" >> /var/lib/postgresql/15/main/postgresql.conf
63 |
64 | # Install Metasploit Framework
65 | RUN curl https://raw.githubusercontent.com/rapid7/metasploit-omnibus/master/config/templates/metasploit-framework-wrappers/msfupdate.erb -o /tmp/msfinstall && \
66 | chmod +x /tmp/msfinstall && \
67 | /tmp/msfinstall && \
68 | rm -rf /tmp/msfinstall && \
69 | chown -R ${USER}:${USER} /opt/metasploit-framework
70 |
71 | # Expose necessary ports
72 | EXPOSE 5432
73 |
74 | # Set working directory
75 | WORKDIR /opt/metasploit-framework/
76 |
77 | # Default command
78 | CMD ["bash"]
79 | ```
80 |
--------------------------------------------------------------------------------
/docker/lxc.conf:
--------------------------------------------------------------------------------
1 |
2 | lxc.apparmor.profile: unconfined
3 | lxc.cgroup.devices.allow: a
4 | features: nesting=1
5 | lxc.cap.drop:
6 |
7 |
--------------------------------------------------------------------------------
/docker/portainer/README.md:
--------------------------------------------------------------------------------
1 | ## Portainer Wiki:
2 |
3 | **Portainer is a powerful, open-source container management tool designed to simplify the process of managing Docker and Kubernetes environments through an intuitive graphical user interface (`GUI`)**. Built to reduce the complexity of container orchestration, Portainer offers an easy-to-use dashboard where users **can deploy, manage, and monitor their containers, stacks, and services. It's perfect for both beginners and advanced users, allowing them to manage their containerized applications with ease and efficiency**. Portainer supports Docker Swarm, Kubernetes, and standalone Docker environments, making it highly versatile.
4 |
5 | ## Security & Compliance:
6 |
7 | - ***Role-Based Access Control (`RBAC`)*** - Portainer allows administrators to define roles and permissions, ensuring that only authorized users can access and manage specific resources.
8 | - ***Secure Endpoint Management*** - Portainer manages access to Docker and Kubernetes endpoints securely, using encrypted communication channels.
9 | - ***Audit Logs*** - Portainer provides detailed logs of user actions and changes, helping organizations meet compliance requirements and maintain a secure environment.
10 |
11 | ## Important Note:
12 |
13 | - (`Do NOT`) neglect the importance of securing your Portainer instance. Ensure that your Portainer UI is protected with strong authentication mechanisms and that access is limited to trusted users. Exposing your Portainer dashboard to the internet without proper security measures can lead to unauthorized access and potential compromise of your container environments.
14 |
15 | ## Key Features:
16 |
17 | - ***User-Friendly Interface*** - Manage your Docker and Kubernetes environments through a simple and intuitive web-based UI.
18 | - ***Multi-Environment Support*** - Seamlessly switch between Docker, Docker Swarm, and Kubernetes environments.
19 | - ***Container Management*** -Easily deploy, start, stop, and monitor your containers with just a few clicks.
20 | - ***Stack Management*** - Deploy and manage multi-container applications using Docker Compose or Kubernetes manifests.
21 | - ***Resource Monitoring*** - Get real-time insights into the resource usage of your containers and nodes.
22 |
23 | ## Best Practices:
24 |
25 | - ***Enable RBAC*** - Implement role-based access control to restrict access and define clear user roles within your Portainer environment.
26 | - ***Regular Backups*** - Regularly back up your Portainer configuration and settings to avoid data loss in case of a failure.
27 | - ***Use SSL*** - Always secure your Portainer instance with SSL to protect communication between your browser and the Portainer server.
28 | - ***Update Regularly*** - Keep Portainer up to date with the latest releases to benefit from new features and security patches.
29 | - ***Monitor Logs*** - Regularly review audit logs to track user activity and detect any unauthorized access or changes.
30 |
31 | ##
32 | > One of the coolest features of Portainer is its Stack Management capability. With Portainer, you can deploy and manage entire application stacks using Docker Compose files or Kubernetes manifests directly from the UI. Making it incredibly fast to spin up complex, multi-container applications with minimal effort. Even better, Portainer’s visual editor allows you to tweak and adjust your stack configurations on the fly, making it a powerful tool for both development and production environments. This feature bridges the gap between simplicity and power, making container orchestration accessible to everyone.
33 |
--------------------------------------------------------------------------------
/docker/portainer/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | portainer:
4 | image: portainer/portainer-ce:2.21.5
5 | container_name: portainer
6 | ports:
7 | - 8000:8000
8 | - 9443:9443
9 | # - 9000:9000 # Optional: Open port 9000 for HTTP access
10 | volumes:
11 | - /var/run/docker.sock:/var/run/docker.sock:ro
12 | - /compose/portainer/portainer_data:/data
13 | #healthcheck:
14 | #test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS:9000 || exit 0"]
15 | #interval: 40s
16 | #timeout: 10s
17 | #retries: 3
18 | restart: unless-stopped
19 |
--------------------------------------------------------------------------------
/docker/source_build.md:
--------------------------------------------------------------------------------
1 | ***Install + Permissions + Lxc***
2 |
3 | #### OS: Debian 13 (Trixie)
4 | #### environment: su
5 | #### GPG Keys for different distro: https://download.docker.com/linux/
6 |
7 | ```sh
8 | 0) apt-get update && apt-get upgrade -y **good habit not a step**
9 | 1) apt-get install apt-transport-https ca-certificates curl git gnupg lsb-release -y **All tools needed**
10 | 2) install -m 0755 -d /etc/apt/keyrings **keyrings directory**
11 | 3) curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg **offical key**
12 | 4) echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null **in my case since docker doesnt support 13 yet I had to add Bookworm repo "echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null"
13 | 5) chmod a+r /etc/apt/keyrings/docker.gpg **permisions**
14 | 6) apt-get update -y
15 | 7) apt-get install docker-ce docker-ce-cli containerd.io docker-ce-rootless-extras -y
16 | 8) systemctl status docker **green**
17 | 9) docker --version **Docker version 27.1.1, build 6312585**
18 | 10) docker run --privileged hello-world **on promox this will work with out adding "lxc.apparmor.profile: unconfined" "lxc.cgroup.devices.allow: a" and "lxc.cap.drop:" to the .conf file of lxc
19 | 11) docker network ls
20 | 12) docker ps
21 | ```
22 |
23 | ***Uninstall Docker***
24 | ```sh
25 | 1) sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-ce-rootless-extras
26 | ```
27 |
--------------------------------------------------------------------------------
/docker/swarm/README.md:
--------------------------------------------------------------------------------
1 | ## Docker Swarm Wiki:
2 |
3 | **Docker Swarm is Docker’s native clustering and orchestration tool that enables you to manage a cluster of Docker engines, transforming them into a single virtual Docker Engine**. Swarm allows for the deployment, management, and scaling of services across multiple Docker hosts. It integrates seamlessly with Docker CLI and API**, making it easy to transition from single-host setups to multi-host environments.
4 |
5 | **Swarm provides high availability, load balancing, and service discovery, ensuring that your containerized applications are resilient and scalable. Unlike Docker Compose, which is primarily used for managing multi-container applications on a single host**, Docker Swarm is designed for managing distributed applications across multiple hosts, providing built-in orchestration and clustering capabilities.
6 |
7 | ## Comparison to Docker Compose:
8 |
9 | - ***Scope*** - Docker Compose is ideal for single-host, multi-container applications, while Docker Swarm is built for managing distributed applications across multiple hosts.
10 | - ***Scalability*** - Docker Swarm offers automatic scaling and load balancing across clusters, whereas Docker Compose scales services on a single host.
11 | - ***Orchestration*** - Docker Swarm provides built-in orchestration features like auto-healing and rolling updates, which are not available in Docker Compose.
12 |
13 | ## Security & Compliance:
14 |
15 | - ***Encrypted Overlay Networks*** - Docker Swarm supports encrypted communication between containers across different nodes using overlay networks, ensuring that data transmitted between containers remains secure.
16 | - ***Role-Based Access Control (`RBAC`)*** - Swarm mode integrates with Docker's RBAC, enabling fine-grained control over who can perform operations on the cluster, which is crucial for compliance in larger organizations.
17 | - ***Automatic Certificate Management*** - Docker Swarm manages certificates automatically, ensuring secure communication between Swarm nodes without manual intervention.
18 |
19 | ## Important Note:
20 |
21 | - (`Do NOT`) overlook Swarm’s auto-healing capabilities. If a node fails, Docker Swarm automatically reschedules services on available nodes to maintain the desired state, but this requires proper node labeling and resource allocation to function optimally. Always ensure your cluster is adequately prepared for failover scenarios.
22 |
23 | ## Key Features:
24 |
25 | - ***Cluster Management*** - Easily manage multiple Docker hosts as a single entity, enabling seamless orchestration across a cluster.
26 | - ***Service Scaling*** - Automatically scale services up or down across the cluster based on demand, with load balancing ensuring even distribution of workloads.
27 | - ***Service Discovery*** - Integrated service discovery allows containers to communicate with each other using simple DNS names, without needing to know the specific IP addresses.
28 | - ***Rolling Updates*** -Deploy updates to services without downtime by rolling out changes incrementally across the cluster.
29 |
30 | ## Best Practices:
31 |
32 | - ***Label Nodes Wisely*** - Use node labels to control where services are deployed, ensuring that the right workloads run on the appropriate hardware.
33 | - ***Use Overlay Networks*** - Secure your multi-host network communication by leveraging encrypted overlay networks.
34 | - ***Monitor Swarm Health*** - Regularly check the health of your nodes and services using Docker’s monitoring tools to prevent and respond to failures quickly.
35 | - ***Backup Cluster State*** - Regularly back up the cluster state to ensure you can recover from any catastrophic failures without data loss.
36 |
37 | ##
38 | > One of the coolest features of Docker Swarm is its Declarative Service Model. With Docker Swarm, you define the desired state of your services, and Swarm ensures that the cluster maintains this state. For instance, if you specify that you need five replicas of a service running, Swarm automatically handles scaling and rescheduling tasks to maintain exactly five replicas, even if some nodes go down. This self-managing capability makes Swarm a powerful tool for maintaining high availability and service reliability in production environments.
39 |
--------------------------------------------------------------------------------
/elastic-stack/README.md:
--------------------------------------------------------------------------------
1 | ## Elastic Stack Wiki:
2 |
3 | **Elastic Stack (`formerly ELK Stack`) is an open-source suite of tools developed by Elastic, designed for search, analysis, and visualization of log and event data in real-time**. It includes Elasticsearch for search and analytics, Logstash for log processing, and Kibana for visualization. This powerful stack is widely adopted for monitoring, troubleshooting, and securing IT environments.
4 |
5 | **Elastic Stack is licensed under the Elastic License, which encourages community contributions while allowing extensive customization. Additionally, Elastic offers a commercial version with advanced features for enterprises**.
6 |
7 | **The Elastic Stack excels in providing real-time data insights, enabling organizations to efficiently manage and analyze large volumes of data**. It is particularly favored for its scalability, making it ideal for both small and large-scale deployments.
8 |
9 | ## Security & Compliance:
10 |
11 | - ***Centralized Logging*** - Elastic Stack provides a centralized platform for collecting, indexing, and storing logs from multiple sources, making it easier to track and audit system activities.
12 | - ***Data Encryption*** - Elastic Stack supports encryption of data both in transit and at rest, ensuring that sensitive information is protected throughout its lifecycle.
13 | - ***Compliance Monitoring*** - With Elastic Stack, you can create custom dashboards to monitor compliance metrics and generate detailed reports to meet regulatory standards such as (`GDPR`), (`HIPAA`), and (`PCI-DSS`).
14 | - ***Role-Based Access Control (`RBAC`)** - Elastic Stack allows fine-grained access control, enabling organizations to restrict access to sensitive data and functions based on user roles, which is crucial for maintaining compliance and security.
15 |
16 | ## Important Note:
17 |
18 | - (`Do NOT`) overlook the importance of data retention policies in Elastic Stack. Storing large volumes of log data without proper retention policies can lead to storage bloat and potential performance issues. Implement data retention and lifecycle management strategies to ensure that your Elastic Stack deployment remains efficient and scalable. Regularly review and update your index lifecycle management (`ILM`) policies to align with your organization's data retention requirements.
19 |
20 | ## Key Features:
21 |
22 | - ***Elasticsearch*** - The core of Elastic Stack, Elasticsearch provides powerful full-text search and analytics capabilities, allowing you to query and analyze massive datasets in real-time.
23 | - ***Logstash*** - A versatile log processing pipeline, Logstash collects, parses, and transforms log data from various sources before feeding it into Elasticsearch.
24 | - ***Kibana*** - The visualization layer of Elastic Stack, Kibana offers interactive dashboards, data exploration, and reporting features, making it easy to visualize and analyze your data.
25 | - ***Beats*** - Lightweight data shippers that send data from hundreds of machines to Logstash or Elasticsearch, enabling you to gather and centralize data from a diverse array of sources.
26 |
27 | ## Best Practices:
28 |
29 | - ***Optimize Index Management*** - Use index lifecycle management (ILM) policies to automate index creation, rollover, and deletion based on your data retention needs.
30 | - ***Secure Your Deployment***- Enable encryption, authentication, and RBAC to protect your Elastic Stack environment from unauthorized access.
31 | - ***Regularly Update*** - Keep your Elastic Stack components up-to-date with the latest versions to take advantage of new features, performance improvements, and security patches.
32 | - ***Monitor Resource Usage*** - Continuously monitor the resource usage of your Elastic Stack deployment to ensure optimal performance, especially in large-scale environments.
33 |
34 | ##
35 | > One cool feature of Elastic Stack is Machine Learning (`ML`) in Elasticsearch, which allows you to automatically detect anomalies in your data. This feature helps you identify unusual patterns or deviations in real-time, making it an invaluable tool for proactive threat detection and system monitoring. Whether it’s spotting spikes in network traffic or unusual login attempts, the built-in ML capabilities help you stay ahead of potential issues before they escalate into major problems.
36 |
--------------------------------------------------------------------------------
/evebox/README.md:
--------------------------------------------------------------------------------
1 | ## EveBox Wiki:
2 |
3 | **EveBox is an open-source web application designed for managing and analyzing security events generated by Suricata. It offers a user-friendly interface for both searching and managing alerts**, helping you stay on top of your network’s security status.
4 |
5 | **Built using the Go programming language, EveBox leverages Suricata’s JSON output for seamless event integration. Its open-source nature under GNU GPLv3** ensures continuous improvement through community contributions.
6 |
7 | **Whether you're a seasoned pro or new to security monitoring, EveBox provides powerful tools like alert management and event triage, making it easier to filter through noise and focus on what matters most**.
8 |
9 | ## Security & Compliance:
10 |
11 | - ***Advanced Event Triage*** - EveBox's event triage feature goes beyond basic alert handling. It allows for tagging, classifying, and prioritizing alerts, ensuring that your incident response team can focus on the most critical threats. This structured approach helps in reducing noise and improving the efficiency of your security operations.
12 |
13 | - ***Detailed Audit Logs*** - Every action within EveBox is meticulously logged, including who accessed what and when. These logs are timestamped and immutable, making them valuable for forensic investigations and ensuring compliance with rigorous security standards.
14 |
15 | - ***Data Integrity Assurance*** - EveBox employs cryptographic techniques to ensure that the integrity of security event data is maintained. This prevents tampering or unauthorized modifications, which is crucial for reliable security analytics and maintaining trust in your security systems.
16 |
17 | ## Important Note:
18 | - (`Do NOT`) skimp on setting up proper access controls in EveBox. Leaving it wide open could expose your sensitive security data to unauthorized eyes. Always make sure only the right people can access and manage your alerts.
19 |
20 | ## Key Features:
21 |
22 | - ***Event Search & Filtering*** - Find exactly what you're looking for with powerful search and filtering tools.
23 | - ***Alert Management*** - Easily categorize and manage alerts—escalate, archive, or resolve them with just a few clicks.
24 | - ***Integration with Suricata*** - Seamlessly pulls in Suricata’s event data, ensuring you get the full picture in a user-friendly interface.
25 | - ***User-Friendly Interface*** - Designed with simplicity in mind, making it easy to navigate and manage your security events.
26 |
27 | ## Best Practices:
28 | - ***Regular Updates*** - Keep EveBox up-to-date to stay ahead with the latest features and security fixes.
29 | - ***User Access Control*** - Lock down who can see and manage security events—only let the right folks in.
30 | - ***Regular Auditing*** - Check those EveBox logs regularly to catch any unauthorized access or funny business.
31 | - ***Backup Configurations*** - Don’t forget to back up your EveBox settings and archived events—better safe than sorry!
32 |
33 | ##
34 | > One cool thing about EveBox is its (`Inbox`) feature, which transforms how you manage Suricata alerts. The Inbox automatically collects new security events as they come in, allowing you to triage them in real-time. You can quickly categorize, escalate, or resolve alerts directly from the Inbox, streamlining your workflow and ensuring that no critical alerts slip through the cracks. This feature, combined with its user-friendly interface, makes EveBox a powerful tool for both proactive and reactive security operations.
35 |
36 |
--------------------------------------------------------------------------------
/gitlab/abstracted_decomposed_classification_pattern_gitlab_CICD_process_chain.md:
--------------------------------------------------------------------------------
1 | ## Abstraction:
2 |
3 | - Abstract the entire event planning process by focusing only on the core tasks. In the context of a GitLab CI/CD process, the focus will be on automating key tasks that ensure the event runs smoothly. This includes automating ticket sales, security management, schedule updates, and guest management, while leaving out non-essential tasks like decorations or minor logistical details.
4 | Abstracted Tasks:
5 |
6 | - Ticket Sales & Registration Automation
7 | - Security Protocol Automation
8 | - Event Schedule Deployment
9 | - VIP & Guest Management Automation
10 |
11 |
12 | ## Decomposition:
13 |
14 | - Break down each of the abstracted tasks into smaller components that can be automated using GitLab CI/CD pipelines. Each of these steps is essential to the success of the event but can be handled individually through a pipeline.
15 | Decomposed Tasks:
16 |
17 | - Ticket Sales & Registration:
18 | - Setup a ticketing platform using a pipeline to deploy the infrastructure (Docker, Kubernetes).
19 | - Automate attendee registration by deploying an online registration system.
20 | - Security & Safety Protocols:
21 | - Automate security scans and compliance checks through GitLab’s CI tools.
22 | - Set up an access control system that automates guest check-ins.
23 | - Event Schedule:
24 | - Automate the deployment of event schedules to the website or mobile app.
25 | - Set up automated reminders for important schedule updates and timing.
26 | - Guest & VIP Management:
27 | - Automate the process of sending invitations, tracking responses, and managing VIP accommodations.
28 |
29 |
30 | ## Classification Patterns:
31 |
32 | - Classify the different tasks into categories that share similarities in the GitLab CI/CD pipeline.
33 | Classified Patterns:
34 |
35 | - Infrastructure Setup (Ticketing Platform, Registration System)
36 | - Security and Compliance (Safety Checks, Guest Access Control)
37 | - Content Management (Event Schedule, Notifications)
38 | - Logistics and Guest Management (Invitations, VIP Handling)
39 | - Pattern Identification:
40 |
41 | - Identify patterns in the way different events (e.g., concerts, conferences, sports games) can be automated using similar GitLab CI/CD pipelines.
42 |
43 |
44 | ## Identified Patterns:
45 |
46 | - Crowd-centric Events (Concerts, Sports Games): These events focus heavily on crowd management, ticketing, and security, which can be automated through security and ticketing pipelines.
47 | - Speaker-centric Events (Conferences): These events require more focus on scheduling, speaker management, and logistics, which can be automated through schedule deployment and VIP handling pipelines.
48 |
49 |
50 | ## Example Table for Classification:
51 |
52 | | Event Type | Shared Organizational Tasks | CI/CD Patterns |
53 | |--------------|--------------------------------------------|-------------------------------------------------------- |
54 | | Concert | Ticketing, security, crowd control | Infrastructure, security, notifications |
55 | | Sports Game | Ticketing, security, vendor management | Infrastructure, security, vendor onboarding |
56 | | Conference | Scheduling, registration, VIP management | Content management, scheduling, guest logistics |
57 |
58 |
59 | ##
60 | > This Chain of steps—starting with abstraction, moving through decomposition, and then identifying patterns—the event planning process is greatly simplified and automated using GitLab CI/CD, leading to efficiency and error reduction in the event management process. I open for questions :)
61 |
--------------------------------------------------------------------------------
/gitlab/docker_compose.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | gitlab:
4 | image: gitlab/gitlab-ce:17.5.5-ce.0
5 | container_name: gitlab
6 | ports:
7 | - "3200:22"
8 | - "8929:8929" #Managed by Traefik
9 | - "4435:443" #Managed by Traefik
10 | volumes:
11 | - /compose/gitlab/config:/etc/gitlab
12 | - /compose/gitlab/logs:/var/log/gitlab
13 | - /compose/gitlab/data:/var/opt/gitlab
14 | shm_size: "256m"
15 | healthcheck:
16 | test: ["CMD-SHELL", "curl --fail http://IP_ADDRESS || exit 0"]
17 | interval: 40s
18 | timeout: 10s
19 | retries: 3
20 | restart: unless-stopped
21 | #labels:
22 | # - "traefik.enable=true"
23 | # - "traefik.http.routers.gitlab-https.rule=Host('gitlab.alprojects.org')"
24 | # - "traefik.http.routers.gitlab-https.entrypoints=asgard"
25 | # - "traefik.http.routers.gitlab-https.tls.certresolver=production"
26 | # - "traefik.http.services.gitlab-service.loadbalancer.server.port=80"
27 | #networks:
28 | # - traefik_default
29 |
30 | #networks:
31 | #traefik_default:
32 | #external: true
33 |
--------------------------------------------------------------------------------
/gitlab/gitlab.rb:
--------------------------------------------------------------------------------
1 | # URL link
2 | external_url 'https://gitlabs.alprojects.org'
3 |
4 | # SSH port
5 | gitlab_rails['gitlab_shell_ssh_port'] = 3200
6 |
7 |
8 | # Disabling Gitlab Process
9 | letsencrypt['auto_renew'] = false
10 |
11 |
12 | # Traefik will terminate
13 | nginx['listen_port'] = 80
14 | nginx['listen_https'] = false
15 |
16 |
--------------------------------------------------------------------------------
/gitlab/sign-up_restrictions.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | ## denied domains for sign-ups
3 | mailinator.com
4 | yopmail.com
5 | guerrillamail.com
6 | 10minutemail.com
7 | trashmail.com
8 | getnada.com
9 | dispostable.com
10 | tempmail.com
11 | throwawaymail.com
12 | maildrop.cc
13 | ```
14 |
15 | ```sh
16 | ## email restrictions for sign-ups
17 | ^[a-zA-Z0-9._%+-]+@(gmail\.com|yahoo\.com|googlemail\.com|hotmail\.com|outlook\.com|aol\.com)$
18 | ```
19 |
--------------------------------------------------------------------------------
/grafana/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/homebrew/README.md:
--------------------------------------------------------------------------------
1 | ## Homebrew Wiki:
2 |
3 | **Homebrew is a free and open-source software package management system that simplifies the installation of software on Apple's macOS and Linux operating systems.** The name is intended to suggest the idea of building software on your own, in a similar way to brewing your own beer.
4 |
5 | **Homebrew primarily focuses on distributing command-line software, which can be installed from the terminal. It is also used to manage various libraries and dependencies that these tools may require.** The Homebrew package manager may be used on Linux and Windows Subsystem for Linux (`WSL-2`) . Homebrew was formerly referred to as Linuxbrew when running on Linux or (`WSL`). Due to known issues with `(WSL-1`), you may experience issues running various executables installed by Homebrew. We recommend you switch to (`WSL-2`) instead always.
6 |
7 | ## Key Features:
8 |
9 | - ***Ease of Use*** - Homebrew simplifies the installation process for a wide variety of software. Users can install packages using simple commands in the terminal.
10 | - ***Large Repository*** - Homebrew provides access to a vast repository of software packages, known as formulae, which are maintained by the Homebrew community.
11 | - ***Dependency Management*** - Homebrew automatically handles dependencies, ensuring that all required libraries and packages are installed and up to date.
12 | - ***Customizability*** - Users can create their own Homebrew formulae for software not available in the main repository.
13 |
14 | ## Best Practice:
15 |
16 | - ***Clean Up*** - Regularly clean up old versions of installed packages to free up disk space.
17 | ■ `Command: brew cleanup`
18 | - ***Check Health*** - Regularly run diagnostics to ensure Homebrew is functioning correctly.
19 | ■ `Command: brew doctor`
20 | - ***Use Taps Sparingly*** - Only add necessary taps to avoid cluttering your Homebrew setup.
21 | ■ `Command: brew tap `
22 | - ***Secure Your Installation*** - Use Homebrew’s security features, like verifying GPG signatures of packages.
23 | - ***Automate with Scripts*** - Automate routine tasks and environment setups using shell scripts to maintain consistency across systems.
24 | - ***Document Custom Installations*** - Keep a record of custom and manual installations to streamline system recovery or replication.
25 |
26 | ##
27 | > Homebrew, the popular package manager for macOS and Linux, was inspired by a visit to the Google campus. Max Howell, the creator of Homebrew, was impressed by the ease with which Google engineers could install software on their systems. Determined to bring this level of simplicity to the broader macOS community, Howell developed Homebrew, which allows users to install, update, and manage software directly from the command line. Homebrew's name and logo reflect the idea of crafting software solutions in a straightforward, DIY manner, much like brewing your own beer at home.
28 |
--------------------------------------------------------------------------------
/homebrew/source_build.md:
--------------------------------------------------------------------------------
1 | ***Home-brew + Permission fix + bashrc***
2 |
3 | ```sh
4 | 1) cp ~/.bashrc ~/.bashrc.backup **always backup**
5 | 2) apt install build-essential curl file git
6 | 3) /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
7 | 4) chown -h ms01:ms01 /home/ms01/.bashrc
8 | 5) chown -R ms01:ms01 /home/linuxbrew/.linuxbrew
9 | 6) chmod 644 /home/ms01/.bashrc
10 | 7) su ms01
11 | 8) echo 'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"' >>~/.bashrc **SYMLINK error, follow next 3 steps**
12 | 9) sudo rm /home/ms01/.bashrc
13 | 10) sudo touch /home/ms01/.bashrc
14 | 11) sudo chown ms01:ms01 /home/ms01/.bashrc **must be sudoer**
15 | 12) source ~/.bashrc
16 | 13) brew --version **4.3.10 for me**
17 |
18 |
--------------------------------------------------------------------------------
/homepage/.env.md:
--------------------------------------------------------------------------------
1 |
2 | ```sh
3 | 1) sudo docker exec -it CONTAINER_ID /bin/sh **container environment**
4 | 2) printenv ** should see HOMEPAGE_VAR_**
5 |
6 |
7 | # Permissions level
8 | PUID=1000
9 | PGID=1000
10 |
11 |
12 | # Plex
13 | HOMEPAGE_VAR_PLEX_HREF=https://**************:32400/web
14 | HOMEPAGE_VAR_PLEX_URL=https://**************:32400
15 | #
16 | HOMEPAGE_VAR_PLEX_API_KEY=VmbQ7nA_1oHErBH6e6rN
17 | #
18 | HOMEPAGE_VAR_PLEX_PING=http://**************
19 |
20 |
21 | # Sonarr
22 | HOMEPAGE_VAR_SONARR_HREF=http://**************:8989/
23 | HOMEPAGE_VAR_SONARR_URL=http://**************:8989/
24 | #
25 | HOMEPAGE_VAR_SONARR_API_KEY=****************************
26 | #
27 | HOMEPAGE_VAR_SONARR_PING=http://**************
28 |
29 |
30 | # Radarr
31 | HOMEPAGE_VAR_RADARR_HREF=http://**************:7878/
32 | HOMEPAGE_VAR_RADARR_URL=http://**************:7878/
33 | #
34 | HOMEPAGE_VAR_RADARR_API_KEY=****************************
35 | #
36 | HOMEPAGE_VAR_RADARR_PING=http://**************
37 |
38 |
39 | # qBitttorrent:
40 | HOMEPAGE_VAR_QBITTORRENT_HREF=http://**************:8080
41 | #
42 | HOMEPAGE_VAR_QBITTORRENT_PING=http://**************
43 | #
44 | HOMEPAGE_VAR_QBITTORRENT_URL=http://**************:8080
45 | #
46 | HOMEPAGE_VAR_QBITTORRENT_USERNAME=Hachimon
47 | #
48 | HOMEPAGE_VAR_QBITTORRENT_PASSWORD=Sanjuu!@#123
49 |
50 |
51 | # Proxmox
52 | HOMEPAGE_VAR_PROXMOX_HREF=https://**************:8006/
53 | HOMEPAGE_VAR_PROXMOX_HREF_2=https://**************:8006/
54 | #
55 | HOMEPAGE_VAR_PROXMIX_URL=http://**************:8006
56 | HOMEPAGE_VAR_PROXMIX_URL_2=http://**************:8006
57 | #
58 | HOMEPAGE_VAR_PROXMOX_USER=****************************
59 | HOMEPAGE_VAR_PROXMOX_USER_2=****************************
60 | HOMEPAGE_VAR_PROXMOX_API_KEY=************************************
61 | HOMEPAGE_VAR_PROXMOX_API_KEY_2=************************************
62 | #
63 | HOMEPAGE_VAR_PROXMOX_PING=http://**************
64 | HOMEPAGE_VAR_PROXMOX_PING_2=http://**************
65 |
66 |
67 | # Trunas
68 | HOMEPAGE_VAR_TRUENAS_HREF=https://*************/
69 | #
70 | HOMEPAGE_VAR_TRUENAS_URL=https://**************:52
71 | #
72 | OMEPAGE_VAR_TRUENAS_API_KEY=****************************
73 | #
74 | HOMEPAGE_VAR_TRUENAS_USER=root
75 | HOMEPAGE_VAR_TRUENAS_PWD=sasasa
76 | #
77 | HOMEPAGE_VAR_TRUENAS_PING=http://*************
78 |
79 |
80 | # Qnap
81 | HOMEPAGE_VAR_QNAP_HREF=https://**************:1148/
82 | #
83 | HOMEPAGE_VAR_QNAP_URL=https://**************:1148
84 | #
85 | HOMEPAGE_VAR_QNAP_USER=****************************
86 | HOMEPAGE_VAR_QNAP_PWD=****************************
87 | #
88 | HOMEPAGE_VAR_QNAP_PING=http://**************
89 |
90 |
91 | # Pihole
92 | HOMEPAGE_VAR_PIHOLE_HREF=http://**************/admin/
93 | HOMEPAGE_VAR_PIHOLE_HREF_2=http://**************/admin/
94 | #
95 | HOMEPAGE_VAR_PIHOLE_URL=http://**************
96 | HOMEPAGE_VAR_PIHOLE_URL_2=http://**************
97 | #
98 | HOMEPAGE_VAR_PIHOLE_API_KEY=****************************
99 | HOMEPAGE_VAR_PIHOLE_API_KEY_2=****************************
100 | #
101 | HOMEPAGE_VAR_PIHOLE_PING=http://**************
102 | HOMEPAGE_VAR_PIHOLE_PING_2=http://**************
103 |
104 |
105 | # Twingate
106 | HOMEPAGE_VAR_TWINGATE_HREF=****************************
107 | #
108 | HOMEPAGE_VAR_TWINGATE_URL=****************************
109 | #
110 | HOMEPAGE_VAR_TWINGATE_API_KEY=****************************
111 | #
112 | HOMEPAGE_VAR_TWINGATE_PING=****************************
113 |
114 |
115 | # Cloudflare
116 | HOMEPAGE_VAR_CLOUDFLARE_HREF=https://dash.cloudflare.com/
117 | #
118 | HOMEPAGE_VAR_CLOUDFLARE_URL=https://dash.cloudflare.com
119 | #
120 | HOMEPAGE_VAR_CLOUDFLARE_API_KEY=****************************
121 | #
122 | HOMEPAGE_VAR_CLOUDFLARE_PING=https://**************
123 | #
124 | HOMEPAGE_VAR_CLOUDFLARE_ACCOUNT_ID=****************************
125 | #
126 | HOMEPAGE_VAR_CLOUDFLARE_TUNNEL_ID=************************************
127 | HOMEPAGE_VAR_CLOUDFLARE_TUNNEL_ID_2=************************************
128 |
129 |
130 | # Authentik
131 | HOMEPAGE_VAR_AUTHENTIK_HREF=****************************
132 | #
133 | HOMEPAGE_VAR_AUTHENTIK_URL=****************************
134 | #
135 | HOMEPAGE_VAR_AUTHENTIK_API_KEY=****************************
136 | #
137 | HOMEPAGE_VAR_AUTHENTIK_PING=****************************
138 |
139 |
140 | # Speed Test Tracker
141 | HOMEPAGE_VAR_TRACKER_HREF=https://**************:8443/
142 | #
143 | HOMEPAGE_VAR_TRACKER_URL=https://**************:8443
144 | #
145 | HOMEPAGE_VAR_TRACKER_API_KEY=****************************
146 | #
147 | HOMEPAGE_VAR_TRACKER_PING=http://**************
148 |
149 |
150 | # Bookmarks
151 | HOMEPAGE_VAR_CYBER_POWER_HREF=http://**************:3052/management/login
152 | #
153 | HOMEPAGE_VAR_CYBER_POWER_PING=http://**************
154 |
155 |
156 | HOMEPAGE_VAR_BROTHER_PRINTER_HREF=http://*************/printer/main.html
157 | #
158 | HOMEPAGE_VAR_BROTHER_PRINTER_PING=http://*************
159 |
160 |
161 | HOMEPAGE_VAR_CUPS_HREF=http://localhost:631/
162 | #
163 | HOMEPAGE_VAR_CUPS_PING=http://localhost:631
164 |
165 |
166 | HOMEPAGE_VAR_BITWARDEN_HREF=https://vault.bitwarden.com/#/login
167 | #
168 | HOMEPAGE_VAR_BITWARDEN_PING=https://**************
169 |
--------------------------------------------------------------------------------
/homepage/README.md:
--------------------------------------------------------------------------------
1 | ## Homepage Wiki:
2 |
3 | **Homepage.dev is a highly customizable personal homepage designed to boost productivity and provide easy access to various tools and widgets.** It allows users to configure their homepage with a variety of themes and segments to display information such as the current date, weather, news, and more. Users can integrate shortcuts, bookmarks, and widgets for quick access to frequently used services and information.
4 |
5 | **Its open-source and maintained by a community of developers. Contributions are welcomed via the project's GitHub repository, where users can report issues, request features, and contribute code.**
6 | The project is hosted on GitHub, where users can find the source code, documentation, and installation instructions. The community actively contributes to the project by reporting issues, requesting features, and submitting pull requests.
7 |
8 | **uses YAML (`YAML Ain't Markup Language`) for configuration, which is a human-readable data serialization format. YAML is often used for configuration files because of its simplicity and ease of use.** It allows you to define widgets, themes, and layouts in a structured way without the complexity of (`XML`) or (`JSON`).
9 |
10 | - ## Key Features
11 |
12 | - ***Theming:*** - Customize the look and feel with various themes.
13 | - ***Widgets:*** - Add and arrange widgets to suit your needs.
14 | - ***Shortcuts:*** - Create shortcuts to frequently visited sites and services.
15 | - ***Bookmarks:*** - Easily manage and access bookmarks.
16 | - ***Search Bar:*** - Integrated search functionality for quick access to information.
17 |
18 | - ## Benefits
19 |
20 | - ***Productivity:*** - Enhances productivity by providing quick access to essential tools and information.
21 | - ***Customization:*** - High level of customization to match personal preferences.
22 | - ***Open Source:*** - Free to use and modify, with active community support.
23 |
24 | ##
25 | >The project is hosted on GitHub, where users can find the source code, documentation, and installation instructions. The community actively contributes to the project by reporting issues, requesting features, and submitting pull requests.
26 |
--------------------------------------------------------------------------------
/homepage/bookmarks.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - Cloud Services:
3 |
4 | - Cloudflare:
5 | - icon: mdi-cloud #cloudflare.png
6 | href: https://dash.cloudflare.com
7 | - Teleport:
8 | - icon: mdi-hub #teleport.png
9 | href: https://cl0ckwerk.teleport.sh
10 | - Twingate:
11 | - icon: mdi-gate #twingate.png
12 | href: https://cl0ckwerk.twingate.com
13 | - Netbird:
14 | - icon: mdi-bird #https://netbird.io/_next/static/media/netbird-icon.00225e97.svg
15 | href: https://app.netbird.io
16 |
17 | - Repositories:
18 | - Homelab:
19 | - icon: mdi-github #si-github-#FFFFFF
20 | href: https://github.com/alprojects1
21 | - Boilerplates:
22 | - icon: mdi-github #si-github-#FFFFFF
23 | href: https://github.com/alprojects1/home-lab
24 | - Cheat-Sheets:
25 | - icon: mdi-github #si-github-#FFFFFF
26 | href: https://github.com/alprojects1/boilerplates
27 | - Scripts:
28 | - icon: mdi-github #si-github-#FFFFFF
29 | href: https://github.com/alprojects1/cheat-sheet
30 |
31 |
32 | #- Developer:
33 | # - Github:
34 | # - abbr: GH
35 | # href: https://github.com/
36 | #
37 | #- Social:
38 | # - Reddit:
39 | # - abbr: RE
40 | # href: https://reddit.com/
41 | #
42 | #- Entertainment:
43 | # - YouTube:
44 | # - abbr: YT
45 | # href: https://youtube.com/
46 |
--------------------------------------------------------------------------------
/homepage/docker_compose.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | homepage:
4 | image: ghcr.io/gethomepage/homepage:latest
5 | env_file:
6 | - .env
7 | container_name: homepage
8 | ports:
9 | - 3000:3000
10 | volumes:
11 | - /compose/homepage:/app/config
12 | - /compose/homepage:/app/public/images
13 | - /compose/homepage:/app/public/icons
14 | #- /var/run/docker.sock:/var/run/docker.sock
15 | environment:
16 | PUID: $PUID
17 | PGID: $PGID
18 | restart: unless-stopped
19 |
20 |
--------------------------------------------------------------------------------
/homepage/other_projects.md:
--------------------------------------------------------------------------------
1 | ## Other Front-ends:
2 |
3 | - ***Dashy: https://github.com/lissy93/dashy***
4 |
5 | - ***Fenrus: https://github.com/revenz/Fenrus***
6 |
7 | - ***Flame: https://github.com/pawelmalak/flame***
8 |
9 | - ***Heimdall: https://github.com/linuxserver/Heimdall***
10 |
11 | - ***Homarr: https://github.com/ajnart/homarr***
12 |
13 | - ***Homepage: https://github.com/gethomepage/homepage***
14 |
15 | - ***Homer: https://github.com/bastienwirtz/homer***
16 |
17 | - ***Organizr:https://github.com/causefx/Organizr***
18 |
--------------------------------------------------------------------------------
/homepage/settings.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | title: alprojects_dashboard
3 | theme: dark
4 | color: violet
5 | hideVersion: true
6 |
7 | layout:
8 | Infrastructure & Data:
9 | icon: mdi-server-#FFFFFF
10 | style: row
11 | columns: 4
12 | Service & Host:
13 | icon: mdi-room-service-#FFFFFF
14 | style: row
15 | columns: 4
16 | Identity & security:
17 | icon: mdi-security-#FFFFFF
18 | style: row
19 | columns: 4
20 | Automation & Iot:
21 | icon: mdi-cog-outline-#FFFFFF
22 | style: row
23 | columns: 6
24 | Content & Media:
25 | icon: mdi-movie-cog-#FFFFFF
26 | style: row
27 | columns: 4
28 |
29 | background:
30 | #image: https://images.unsplash.com/photo-1557682250-33bd709cbe85?q=80&w=2629&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D
31 | image: https://images.unsplash.com/photo-1557683316-973673baf926?q=80&w=2629&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D
32 | #image: https://images.unsplash.com/photo-1557683311-eac922347aa1?q=80&w=2629&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D
33 | blur: sm # Options: sm, "", md, xl...
34 | saturate: 80
35 | brightness: 100
36 | opacity: 75
37 |
38 | providers:
39 | openweathermap: openweathermapapikey
40 | weatherapi: weatherapiapikey
41 |
--------------------------------------------------------------------------------
/homepage/widgets.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # System resources
3 | - resources:
4 | cpu: true
5 | expanded: true
6 | memory: true
7 | disk: /
8 | cputemp: true
9 | tempmin: 0
10 | tempmax: 100
11 | uptime: true
12 | units: metric # Options: imperial or metric
13 | refresh: 3000 # Optional: in ms
14 | diskUnits: bytes # Option: bytes or bbytes
15 |
16 | #Storage resources
17 | #- resources:
18 | # label: Mnt
19 | # disk:
20 | # - /mnt/nfs-lvm
21 |
22 | # Search resources
23 | - search:
24 | provider: duckduckgo
25 | target: _blank
26 |
27 |
28 | # Date&Time resources
29 | - datetime:
30 | text_size: sm
31 | format:
32 | timeStyle: long
33 | dateStyle: long
34 |
--------------------------------------------------------------------------------
/htop/README.md:
--------------------------------------------------------------------------------
1 | ## Htop Wiki:
2 |
3 | **Htop (`short for Hisham's top`) is an interactive process viewer for Unix Based Systems, developed as a more user-friendly alternative to the top command under the GNU General Public License (`GPL`).** It provides a real-time, text-mode interface, with a comprehensive overview of system processes in a tabular format, which includes columns for process ID (`PID`), user, priority, nice value, virtual memory size, resident set size, shared memory size, state, CPU percentage, memory percentage, time, and command.
4 |
5 | **It was written by Hisham Muhammad in 2004, he was motivated by the desire for a better process viewer than the traditional top command, particularly in terms of user interaction and visual presentation. The program is written in C programming language and utilizes the ncurses library to handle its interface.** Htop has a large user base and active development community and contributions are welcomed via its GitHub repository, where users can submit issues, request features, and contribute code.
6 |
7 | ## Key Features:
8 |
9 | - ***Ease of Use*** - Htop provides a user-friendly interface that simplifies the process of monitoring system resources. Users can easily navigate through processes and system metrics using keyboard shortcuts.
10 |
11 | - ***Real-Time Monitoring*** - Htop offers real-time updates on CPU, memory, and swap usage, giving users an instant overview of their system's performance.
12 |
13 | - ***Process Management*** - Allows users to manage processes directly from the interface, including killing, renicing, and tracing processes without needing their PIDs.
14 |
15 | - ***Customization*** - Users can customize the display, including sorting processes, filtering results, and adjusting the layout to suit their preferences.
16 |
17 | ## Besy practice:
18 |
19 | - ***Monitor Critical Processes*** - Keep an eye on critical system processes to maintain optimal system performance and quickly address any issues.
20 | - ***Customize Layout*** - Tailor the Htop interface to highlight the most important metrics for your specific use case.
21 | - ***Use Keyboard Shortcuts*** - Familiarize yourself with Htop's keyboard shortcuts to efficiently navigate and manage processes.
22 | - ***Automate Monitoring*** - Integrate Htop with scripts or monitoring tools to automate the tracking of system performance metrics.
23 |
24 | ##
25 | > Because system monitoring interfaces are not standardized among Unix-like operating systems, much of htop's code must be rewritten for each operating system. Cross-platform, OpenBSD, FreeBSD and Mac OS X support was added in htop 2.0.** Solaris/Illumos/OpenIndiana support was added in 2.2.0. htop was forked by several developers as htop-dev, and with support from the original author, the homepage was later redirected to a new domain. I am on version 3.3.4 as of writhing.
26 |
--------------------------------------------------------------------------------
/htop/Screenshot_20240730_233312.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alprojects1/home-lab/cbc1381d77b0c56ccac6ddb8f21576e6af77427c/htop/Screenshot_20240730_233312.png
--------------------------------------------------------------------------------
/htop/source_build.md:
--------------------------------------------------------------------------------
1 | ***htop/lm-sensors + Compile + Path + pkgs/dependencies***
2 |
3 | ```sh
4 | 1) apt-get install git libncursesw5-dev autotools-dev autoconf libtool pkg-config build-essential lm-sensors libsensors-dev -y **Debian based distros**
5 | 2) dnf install git lm_sensors-devel autoconf automake libtool ncurses-devel gcc make -y **RHEL based distros**
6 | 3) dnf groupinstall "Development Tools" -y **for RHEL, SUSE distros, comprehensive set of development tools and libraries**
7 | 4) git clone https://github.com/htop-dev/htop.git **Repo**
8 | 5) cd htop **ls -lah always, get use to structures**
9 | 6) ./autogen.sh **runs autotools commands, generates configuration scripts, checks dependencies**
10 | 7) ./configure **system Inspection, generate makefiles, set configuration options**
11 | 8) make -j$(nproc) **dynamically fetches cores**
12 | 9) make install
13 | 10) ls -l /usr/local/bin/ **should see htop**
14 | 11) echo $PATH **noticed /usr/local/bin/ missing**
15 | 12) echo 'export PATH=/usr/local/bin:$PATH' >> ~/.profile **add path**
16 | 13) source ~/.profile
17 | 14) htop --version **htop 3.4.0-dev-3.3.0-141-g58efa4e**
18 | 15) htop
19 | 16) cp ~/.config/htop/htoprc ~/.config/htop/htoprc.bak **backup config**
20 |
--------------------------------------------------------------------------------
/hypervisor/node-cluster/README.md:
--------------------------------------------------------------------------------
1 | ## Quorum/Ceph Wiki:
2 |
3 | **Setting up Ceph distributed storage typically requires a minimum of three nodes to achieve redundancy and fault tolerance. However, for a production-ready Ceph cluster, having more nodes is generally recommended to ensure better performance and higher availability.** With only three nodes, you can still set up a Ceph cluster, but it may not provide the optimal performance and fault tolerance that a larger cluster would offer. In a three-node setup, the failure of one node can still lead to a situation where the cluster is at risk if another node fails before the first one is restored.
4 |
5 | ***Corosync Quorum Device*** (`QDevice`) is a daemon which runs on each cluster node. It provides a configured number of votes to the cluster’s quorum subsystem, based on an externally running third-party arbitrator’s decision. Its primary use is to allow a cluster to sustain more node failures than standard quorum rules allow.** This can be done safely as the external device can see all nodes and thus choose only one set of nodes to give its vote. This will only be done if said set of nodes can have quorum (`again`) after receiving the third-party vote. Currently, **only QDevice Net is supported as a third-party arbitrator. This is a daemon which provides a vote to a cluster partition, if it can reach the partition members over the network.** It will only give votes to one partition of a cluster at any time. It’s designed to support multiple clusters and is almost configuration and state free. New clusters are handled dynamically and no configuration file is needed on the host running a QDevice.
6 |
7 | ***Replication and Redundancy*** With three nodes, you can set the replication factor to 3 (each piece of data is stored on all three nodes), ensuring that you can lose one node and still have all your data available (`QNAP also does this`). However, this setup limits your usable storage capacity to the size of a single node since all data is replicated across all nodes. Quorum: Ceph relies on a quorum of monitors (`MONs`) to make cluster decisions. With three monitors (`one on each node`), the loss of one node still allows the remaining two to achieve quorum and maintain cluster operations.
8 |
9 | - ***Failure Scenarios*** - If one node fails, the remaining two nodes can still serve data, but the cluster will be in a degraded state. If another node fails before the first one is repaired and brought back online, you could potentially lose data. Performance may be impacted in a three-node setup due to the high level of replication and the limited number of nodes to handle the workload.
10 |
11 | ## HA Storage Solution: (Two Nodes)
12 |
13 | - ***achieve a high-availability*** (`HA`) - storage solution with a two-node setup, we propose creating a custom storage architecture leveraging JBOD configurations with `SSDs` and `NFS/ISCSI` connections. This setup aims to deliver solid performance and reliability while maintaining high availability.
14 |
15 | - ***Configuration*** - Each node will be configured with a `JBOD` (`Just a Bunch of Disks`) setup utilizing SSDs to form a high-performance storage pool. each node has a isolated path to the `SAN` via `NFS` 4.1 ,and the nodes have 1G connection between eachother. This configuration allows each node to access the other’s storage over NFS, ensuring data accessibility and redundancy. A separate network interface card (`NIC`) and IP subnet is dedicated to the traffic, as mentioned above. This dedicated link will isolate storage traffic from regular network operations, thereby optimizing performance and reliability.
16 |
17 | - ***Implementation*** - To achieve data synchronization between the nodes, we will implement block-level replication. `DRBD` (`Distributed Replicated Block Device`) will be used to mirror data in real-time, ensuring data consistency and availability. HA will be managed using Corosync and QDEVICE (`daemon that acts as 3rd vote`) These tools will oversee failover mechanisms and resource management, ensuring seamless operation even in the event of a node failure.
18 |
19 | ##
20 | > You must meet the following requirements before you start with `HA`: 1. at least three cluster nodes (`to get reliable quorum`) 2. shared storage for `VMs` and containers 3. hardware redundancy (`everywhere`) 4. use reliable “server” components 5. hardware watchdog - if not available we fall back to the Linux kernel software watchdog (`softdog`) 6. optional hardware fencing devices. One last thing, the external host needs network access to the cluster and to have a corosync-qnetd package available.
21 |
--------------------------------------------------------------------------------
/hypervisor/node-cluster/storage_hw_conf.md:
--------------------------------------------------------------------------------
1 | ### P440-HW-RAID 0 on both NODES
2 |
3 | ```sh
4 | 3X - 300GB SAS 10K 60$ (Model:EG0300JFCKA - SN: 6C364910K4) - Raid 0 (P440ar) > Strip Size = 128KB/128KB > Sectors =63 > Caching enabled
5 | 2X 256GB m.SSD in Riad 1 this will ensure data is protected in cache in case something happens while moving data on the Raid 0 volumes
6 | Created a QEMU VM, Bios SeaBIOS, Machine Type q35 > created a 52GB partition for the VM > loaded
7 | Partition 1: 1MB, BIOS boot area - Partition 2: 300MB - FAT32, /boot/efi - Partition 3: 3GB, swap - Partition 4: 1GB, ext4, /boot - Partition 5: 52GB, XFS, /
8 |
--------------------------------------------------------------------------------
/hypervisor/proxmox/README.md:
--------------------------------------------------------------------------------
1 | ## Promox Wiki:
2 |
3 | **Proxmox supports both KVM virtualization and LXC Linux containers,** giving you flexibility in workload types. The web-based management interface simplifies creating and monitoring VMs and containers. **Although not as mature as ESXi, Proxmox offers live migration, high availability, and good performance for the price.**
4 |
5 | **Being open source, Proxmox is free to use and modify. However, support and advanced features still require paid enterprise subscriptions. Proxmox may also require more hands-on management and has a smaller ecosystem than the VMware stack.** For some, the open-source flexibility outweighs these potential downsides.
6 |
7 | **To put it simply, ESXi and Proxmox have distinct approaches to virtualization that cater to different requirements. ESXi is a solution designed for large and complex deployments in an enterprise setting.** Meanwhile, Proxmox is an open-source alternative that suits smaller workloads but can also be configurated for enterprise use. When determining which virtualization platform is right for your needs, it’s important to consider the system requirements for ESXi and Proxmox. Both platforms have minimum hardware requirements to function properly, but ESXi typically demands more robust hardware.
8 |
9 | ## ESXi vs Proxmox: System Requirements:
10 |
11 | - ***Esxi Requirements***
12 |
13 | - ESXi needs at least two CPU cores and a minimum of 8 GB of physical RAM for installation. For production use, VMware recommends using a host with at least four CPU cores and 32 GB of memory.
14 |
15 | - ESXi also requires a minimum of 5 GB of disk space for the boot partition, though at least 32 GB of disk space is recommended.
16 |
17 | - ESXi has extensive hardware compatibility lists that detail which components, like network cards, storage controllers and hard drives, are officially supported. Using unsupported hardware can lead to stability and performance issues.
18 |
19 | - ESXi is also designed to run on server-grade hardware, so it may not function properly on desktop hardware
20 |
21 | - ***Proxmox Requirements***
22 |
23 | - Proxmox VE requires a minimum of 2 GB memory for the OS and services, with additional memory for guests. It recommends at least two CPU cores and 8 GB of RAM for testing and development, but for production, it suggests four CPU cores and 32 GB of memory or more for good performance.
24 |
25 | - Proxmox is open source and built on Debian Linux, so it has more lenient hardware requirements and works with a wider range of components. However, for the best performance and stability, Proxmox still recommends using components from its hardware compatibility list when possible.
26 |
27 | - Proxmox can run on both server and desktop hardware, providing more flexibility.
28 |
29 | ## Key Differences Between ESXi and Proxmox:
30 |
31 | - ***Cost and Licensing***
32 | - ESXi is a commercial solution developed by VMware that requires paid licensing for enterprise features and support. Licensing fees for ESXi can be quite significant, especially for large deployments.
33 | - Proxmox, on the other hand, is an open-source virtualization platform with no licensing costs. This makes Proxmox an attractive option if budget is a primary concern.
34 |
35 | - ***Virtualization Technologies***
36 |
37 | - ESXi only supports VMware’s proprietary virtualization format, .vmdk. Proxmox supports the open virtualization formats .qcow2 and .vdi in addition to .vmdk. More importantly, Proxmox allows you to choose between KVM virtualization and LXC containers, depending on your needs. KVM provides full virtualization for running multiple operating systems, while LXC offers lightweight virtualization ideal for Linux containers. This versatility makes Proxmox suitable for a wider range of workloads.
38 |
39 | - ***Virtualization Difference***
40 |
41 | - For many, the decision comes down to available resources and technical expertise. ESXi provides an extensive ecosystem but requires substantial investment. Proxmox offers a more lightweight, cost-effective solution with a learning curve suitable for smaller teams."
42 |
43 | - ***Scalability and High Availability***
44 | - ESXi edges out Proxmox when it comes to enterprise-level scalability and high availability. Features like vMotion, vSphere HA, and DRS allow you to seamlessly migrate VMs between hosts, provide automatic failover, and optimize resource allocation. While Proxmox does support live migration and basic high availability, its capabilities are more limited.
45 |
46 | - ***Management Interface***
47 | - Both ESXi and Proxmox offer web-based management interfaces to easily configure and monitor your virtual infrastructure. ESXi’s vSphere Client interface is more sophisticated but complex requiring a steeper learning curve. Proxmox’s interface is very intuitive, making it ideal if you prefer a simple, user-friendly management experience.
48 |
49 | ##
50 | > ESXi and Proxmox have some similar capabilities, but there are some key differences that you should consider based on your priorities and virtualization needs.
51 |
--------------------------------------------------------------------------------
/kestra/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/lm-sensor/README.md:
--------------------------------------------------------------------------------
1 | ## Lm-Sensors:
2 |
3 | **lm-sensors (`Linux-monitoring sensors`) is a free open-source powerful and versatile software tool, used for monitoring the temperature, voltage, humidity, chassis intrusions, and fan speeds of various hardware components in Linux systems.** It interacts with sensor chips built into motherboards and provides real-time data about system health, which can be crucial for diagnosing hardware issues and ensuring optimal performance.
4 |
5 | **lm-sensors was originally created by Rudolf Marek and has been maintained and developed by various contributors over time.** The project is now managed by the lm-sensors development team, which includes volunteers and contributors from the open-source community. The tool is part of the Hardware Monitoring (`hwmon`) subsystem in the Linux kernel, and its development has involved contributions from numerous individuals who have worked on sensor chip support, kernel modules, and user-space utilities.
6 |
7 | **This has since been dealt with, and the separate README file dedicated to ThinkPads was removed in 2007. In 2013, the sensors-detect command of lm-sensors began disrupting the gamma correction settings of some laptop display screens.** This occurs while it is probing the (`I2C/SMBus`) adapters for connected hardware monitoring devices. Probing of these devices was disabled by default.
8 |
9 | ## Key Features:
10 |
11 | - ***Hardware Monitoring*** - lm-sensors provides real-time monitoring of hardware sensors, including temperature, voltage, and fan speeds.
12 | - ***Compatibility*** - Supports a wide range of hardware, making it versatile for different systems.
13 | - ***Alerting*** - Allows users to set thresholds for sensors and receive alerts when these are breached.
14 | - ***Logging*** - Capable of logging sensor data for long-term monitoring and analysis.
15 | - ***Integration*** - Can be integrated with other monitoring tools like `Grafana` and `Prometheus` for enhanced visualization and analysis.
16 |
17 | ## Best Practice:
18 |
19 | - ***Regularly Update lm-sensors*** - Keep lm-sensors and its database updated to ensure compatibility with new hardware, unless compiled from source
20 | - ***Initial Configuration*** - Run the initial configuration script to detect all available sensors. ■ `Command: sudo sensors-detect`
21 | - ***Check Sensor Readings*** - Regularly check sensor readings to monitor hardware health. ■ `Command: sensors`
22 | - ***Set Alerts*** - Configure alert thresholds for critical sensors to receive notifications of potential issues.
23 | - ***Automate Monitoring*** - Integrate lm-sensors with other monitoring tools to automate the tracking of hardware metrics.
24 | - ***Document Configuration*** - Keep a record of sensor configurations and any customizations for easy replication and troubleshooting.
25 |
26 |
27 | > During 2001/2004, the lm_sensors package was not recommended for use on IBM ThinkPads due to potential (`EEPROM`) corruption issues on some models when aggressively probing for (`I2C`) devices.
28 |
--------------------------------------------------------------------------------
/lm-sensor/format_sensors.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # lm sensor power draw & sensor info parsed data
4 |
5 | printf "%-15s" "Sensor" #keep it at 15, started from 20
6 |
7 | for i in {0..15}; do
8 |
9 | printf "%-8s" "Core $i" # Adjusted to 8 for spacing between each core header
10 |
11 | done
12 |
13 | echo
14 |
15 | # Initialize arrays to hold the temperature values and sensor details
16 |
17 | declare -A core_temp
18 |
19 | declare -A sensor_details
20 |
21 | # Parse the sensors output and fill the arrays
22 |
23 | while IFS= read -r line; do
24 |
25 | if [[ "$line" =~ ^Core ]]; then
26 |
27 | core_number=$(echo "$line" | awk '{print $2}' | cut -d":" -f1)
28 |
29 | temp=$(echo "$line" | awk '{print $3}')
30 |
31 | core_temp["Core$core_number"]=$temp
32 |
33 | elif [[ "$line" =~ ^Package ]]; then
34 |
35 | package_number=$(echo "$line" | awk '{print $3}')
36 |
37 | temp=$(echo "$line" | awk '{print $4}')
38 |
39 | sensor_details["Package$package_number"]=$temp
40 |
41 | elif [[ "$line" =~ power1 ]]; then
42 |
43 | power=$(echo "$line" | awk '{print $2, $3}')
44 |
45 | sensor_details["Power Draw"]=$power
46 |
47 | elif [[ "$line" =~ ^Adapter ]]; then
48 |
49 | sensor_name=$(echo "$line" | awk '{print $2}')
50 |
51 | sensor_details["Adapter"]=$sensor_name
52 |
53 | fi
54 |
55 | done < <(sensors)
56 |
57 |
58 |
59 | # Ensure all cores from 0 to 15 are initialized
60 |
61 | for i in {0..15}; do
62 |
63 | if [ -z "${core_temp[Core$i]}" ]; then
64 |
65 | core_temp["Core$i"]="N/A"
66 |
67 | fi
68 |
69 | done
70 |
71 | # Print Core temperatures with proper formatting
72 |
73 | printf "%-15s" "Cores"
74 |
75 | for i in {0..15}; do
76 |
77 | printf "%-9s" "${core_temp[Core$i]}" # Adjusted to 9 for spacing between each core value
78 |
79 | done
80 |
81 | echo
82 |
83 | # Print sensor details with proper formatting
84 |
85 | for key in "${!sensor_details[@]}"; do
86 |
87 | printf "%-15s" "$key"
88 |
89 | printf "%s\n" "${sensor_details[$key]}"
90 |
91 | done
92 |
--------------------------------------------------------------------------------
/lm-sensor/source_build.md:
--------------------------------------------------------------------------------
1 | ***Lm-sensors Compile + Path + pkgs/dependencies***
2 |
3 | ```sh
4 | 1) apt install git build-essential libtool autotools-dev autoconf automake lm-sensors bison flex libc-bin -y **Debian distros**
5 | 2) dnf install git gcc make libtool autoconf automake lm_sensors -y **RHEL distros**
6 | 3) dnf groupinstall "Development Tools" -y **comprehensive but more for development**
7 | 4) git clone https://github.com/lm-sensors/lm-sensors.git **repo**
8 | 5) cd lm-sensors
9 | 6) export PATH=/usr/local/bin:$PATH
10 | 7) make -j6 **./autogen.sh or ./configure is not needed, project has a simplified build process compared to other projects**
11 | 8) make install
12 | 9) ldconfig **library cache update**
13 | 10) sensors-detect **select properly**
14 | 11) sensors -v
15 | 12) nano /scripts/format_sensors.sh **please see .sh script**
16 | 13) chmod +x /scripts/format_sensors.sh
17 | 14) watch -n2 /scripts/format_sensors.sh **should see landscape format**
18 |
--------------------------------------------------------------------------------
/netbird/README.md:
--------------------------------------------------------------------------------
1 | ## Netbird Wiki:
2 |
3 | **NetBird is an open-source, peer-to-peer Virtual Private Network (VPN) solution designed for easy deployment and secure networking across various environments. Built to enable secure, encrypted communication between devices, NetBird is ideal for both personal and enterprise use.**
4 |
5 | **It supports NAT traversal, ensuring seamless connectivity even in complex network environments.** NetBird is written in Go and is licensed under the **MIT License, which allows for wide customization and community contributions**.
6 |
7 | **NetBird offers the simplicity of managing secure, scalable VPN connections without requiring a central server, making it a robust option for modern networking needs**. Its decentralized architecture ensures that no single point of failure exists, enhancing the resilience and reliability of the network. **Additionally, NetBird's lightweight design allows for minimal resource usage, making it suitable for use on a wide range of devices, from high-performance servers to low-power edge devices**.
8 |
9 | ## Security & Compliance:
10 |
11 | - ***End-to-End Encryption*** - NetBird employs advanced encryption protocols, such as WireGuard, to ensure that all communication between devices is fully encrypted. This robust encryption standard protects data from being intercepted or tampered with during transmission, ensuring confidentiality and integrity across the network.
12 | - ***Compliance Assistance*** - NetBird’s secure communication framework aids organizations in meeting stringent compliance requirements, including `(GDPR`) and (`HIPAA`). By securing all data transfers and network communications, NetBird helps organizations prevent data breaches, thus avoiding potential regulatory fines and ensuring that sensitive information is handled in accordance with legal standards.
13 | - ***Decentralized Security*** - NetBird’s peer-to-peer architecture reduces reliance on centralized servers, minimizing potential vulnerabilities and attack vectors. This decentralized approach aligns with modern security best practices, ensuring that there is no single point of failure that could be exploited by attackers.
14 |
15 | ## Important Note:
16 | - (`Do NOT`) expose NetBird keys and configurations publicly. Ensure that all authentication keys are securely stored and that access is restricted to authorized users only. Misconfiguration or key leakage can lead to unauthorized access and compromise of secure communications.
17 |
18 | ## Key Features:
19 |
20 | - ***Peer-to-Peer Connectivity*** - Ditch the central server—NetBird connects devices directly for streamlined communication.
21 | - ***NAT Traversal*** - Makes connecting across different network setups (even with firewalls and NAT) a breeze.
22 | - ***Easy Deployment*** - Quick setup that gets your secure VPN up and running fast.
23 | - ***Cross-Platform Support*** Runs on Linux, Windows, and macOS, covering all your bases.
24 |
25 | ## Best Practices:
26 |
27 | - ***Lock Down Keys*** - Keep authentication keys on lockdown—only trusted users should have access.
28 | - ***Stay Updated*** - Regularly update NetBird to keep security tight and enjoy the latest features.
29 | - ***Segment Wisely*** - Pair NetBird with network segmentation to up your security game.
30 | - ***Keep an Eye Out*** - Regularly check logs and performance to catch and address any issues early.
31 |
32 | ##
33 | > One Cool thing, Like I mentioned earlier is, NetBird’s NAT traversal feature is super cool because it lets devices behind firewalls or NAT connect smoothly without needing complicated setups. Even in tough network environments, it ensures secure peer-to-peer connections are a snap, making NetBird a seriously adaptable VPN solution.
34 |
--------------------------------------------------------------------------------
/pi-hole/Pi-Hole.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alprojects1/home-lab/cbc1381d77b0c56ccac6ddb8f21576e6af77427c/pi-hole/Pi-Hole.png
--------------------------------------------------------------------------------
/pi-hole/README.md:
--------------------------------------------------------------------------------
1 | ## Pi-hole Wiki:
2 |
3 | **Pi-hole is a network-wide ad blocker that operates as a DNS sinkhole to protect devices from unwanted content without the need to install client-side software. It was created by Jacob Salmela in 2014 as an open-source alternative to AdTrap**. The project quickly gained popularity and now has a large and active community of contributors.
4 |
5 | **ITs a advertising-aware DNS/Web server,** makes use of the following technologies: `dnsmasq` - a lightweight DNS and DHCP server, `curl` - A command-line tool for transferring data with URL syntax `lighttpd` - web server designed and optimized for high performance `php` - a popular general-purpose web scripting language `AdminLTE Dashboard` - premium admin control panel based on Bootstrap 3. `xsqlite3` - SQL Database engine
6 |
7 | ** It functions by intercepting DNS requests and checking them against a list of known advertising and tracking domains. If a match is found, Pi-hole blocks the request, preventing ads from being downloaded.** This approach differs from traditional ad blockers, which typically run as browser extensions and only hide ads after they have been downloaded.
8 |
9 | Pi-hole is highly configurable and **supports custom block and allow lists. It can be installed on various platforms, including Raspberry Pi,** which makes it a popular choice for home networks. The software uses a modified version of dnsmasq called `FTLDNS` to handle DNS queries and blocking. **It can act as a forwarding DNS (`by defeult`) or configured to act as a recursive**
10 |
11 | ## Key Features:
12 |
13 | - ***Network-wide Ad Blocking*** - Pi-hole blocks ads for all devices on the network by acting as a `DNS` server. This includes ads on websites, as well as in apps and on smart `TVs`.
14 | - ***Custom Block/Allow Lists*** - Users can configure their own lists of domains to block or allow, providing fine-grained control over content.
15 | - ***Web Interface: Pi-hole includes*** - a web-based interface for monitoring `DNS` queries, configuring settings, and viewing statistics.
16 | - ***Low Resource Usage*** - Designed to run on low-power devices like the Raspberry Pi, Pi-hole is efficient and requires minimal resources.
17 | - ***Privacy Protection*** - By blocking trackers and ad domains, Pi-hole enhances user privacy by preventing data collection from various sources.
18 |
19 | ## Best Practices:
20 |
21 | - When working with Pi-hole, it’s crucial to follow best practices to ensure security, efficiency, and maintainability. Best practices are essential for maintaining a secure, efficient, and reliable network-wide ad blocker environment. Here are the key practices:
22 |
23 | - ***Configure DNS Settings Properly*** -
24 | Ensure that your Pi-hole is configured to use reliable and privacy-respecting upstream DNS servers. Consider using `DNS` over `HTTPS` (DoH) or `DNS` over `TLS` (DoT) for added privacy.
25 |
26 | - ***Enable Query Logging and Privacy Levels*** -
27 | Enable query logging to monitor DNS requests and configure the appropriate privacy level to balance logging detail with user privacy. `Command: pihole -a logging`
28 |
29 | - ***Implement Custom Block and Allow Lists*** -
30 | Regularly update and customize your block lists to improve ad blocking effectiveness. Maintain a whitelist for domains that should always be allowed.
31 |
32 | ## Standard Pi-hole Process:
33 |
34 | - ***Client asks Pi-hole Who is pi-hole.net***
35 | - ***Pi-hole check its cache and reply if the answer is already known.***
36 | - ***Pi-hole checks the blocking lists and reply if the domain is blocked.***
37 | - ***If it can't Pi-hole forwards the request to external upstream DNS server.***
38 | - ***Pi-hole will reply to client and tell it the answer to its request once it has it from upstream.***
39 | - ***Pi-hole will save the answer in cache to be able to respond faster if any clients queries the same domain again.***
40 |
41 | ##
42 | > Pi-hole is widely used in home networks to block ads and protect privacy across all devices. It is also employed in small to medium office environments to reduce bandwidth usage and improve browsing speeds by blocking unwanted content at the network level.
43 |
--------------------------------------------------------------------------------
/pi-hole/docker/README.md:
--------------------------------------------------------------------------------
1 | ## Docker DHCP & Network Modes:
2 |
3 | Docker runs in a **separate network by default called a docker bridge network, making DHCP serve addresses to that network instead of your LAN.** This guide explains why Docker Pi-hole DHCP differs from normal Pi-hole and how to address this issue.
4 |
5 | - Technical Details
6 |
7 | **Docker's bridge network mode is default and recommended as a more secure setting for containers because docker is all about isolation, they isolate processes by default and the bridge network isolates the networking by default too.** You gain access to the isolated container's service ports by using port forwards in your container's runtime config; for example -p 67:67 is DHCP. However, **DHCP protocol operates through a network 'broadcast' which cannot span multiple networks (docker's bridge, and your LAN network).** In order to get DHCP on to your network there are a few approaches:
8 |
9 | - Working Network Modes
10 |
11 | - Host Networking Mode: Simple and fast setup. It makes the container be on your LAN network, allowing it to broadcast DHCP.
12 | - Macvlan Network: Advanced setup. This mode grabs a new IP address off your LAN network, solving the broadcast problem and avoiding port conflicts.
13 | - Bridge Networking: Requires a DHCP relay to spread the broadcast signal from an isolated docker bridge to your LAN network.
14 |
15 | ## Docker Pi-hole with Host Networking:
16 |
17 | - **Advantages**: Simple, easy, and fast setup
18 | - **Possibly the simplest way to get DHCP working with Docker Pi-hole is to use host networking which makes the container be on your LAN Network like a regular Raspberry Pi-hole would be, allowing it to broadcast DHCP.** It will have the same IP as your Docker host server in this mode so you may still have to deal with port conflicts.
19 | - **Inside your docker-compose.yml remove all ports and replace them with**:
20 | - `network_mode: host`
21 | - **If you don't use docker-compose**:
22 | - `docker run --net=host`
23 |
24 | ## Docker Pi-hole with Macvlan Networking:
25 | - Advantages: Works well with NAS devices or hard port conflicts
26 |
27 | - **A Macvlan network is the most advanced option since it requires more network knowledge and setup. This mode is similar to host network mode but instead of borrowing the IP of your docker host computer it grabs a new IP address off your LAN network.**
28 | Having the container get its own IP not only solves the broadcast problem but avoids port conflicts you might have on devices such as NAS devices with web interfaces. **Tony Lawrence detailed macvlan setup for Pi-hole first in the second part of his great blog series about Running Pi-hole on Synology Docker,** check it out here: Free your Synology ports with Macvlan
29 |
30 | ## Docker Pi-hole with Bridge Networking:
31 | - Advantages: Works well with container web reverse proxies like Nginx or Traefik
32 |
33 | - If you want to use docker's bridged network mode then you need to run a DHCP relay. **A relay points to your containers forwarded port 67 and spreads the broadcast signal from an isolated docker bridge onto your LAN network.** Relays are very simple software, **you just have to configure it to point to your Docker host's IP port 67. Although uncommon, if your router is an advanced enough router it may support a DHCP relay. Try googling for your router manufacturer + DHCP relay or looking in your router's configuration around the DHCP settings or advanced areas.** If your router doesn't support it, you can run a software/container based DHCP relay on your LAN instead. The author of dnsmasq made a very tiny simple one called DHCP-helper. DerFetzer kindly shared his great setup of a DHCP-helper container on the Pi-hole Discourse forums.
34 |
35 | ##
36 | >Warning about the Default Bridge Network
37 | The default bridge network has limitations that user-created bridge networks do not have. It is advisable to use a docker-compose setup to create a custom network automatically.
38 |
39 |
40 |
--------------------------------------------------------------------------------
/pi-hole/install_steps.md:
--------------------------------------------------------------------------------
1 | ***Pi-hole forwarding DNS***
2 |
3 | #### OS: Debian 12 (bookworm)
4 | #### environment: su
5 | #### Filter list site: https://firebog.net/
6 |
7 | ```sh
8 | 1) apt-get update && apt-get upgrade
9 | 2) curl -sSL https://install.pi-hole.net | bash **this will run you through a wizard**
10 | 3) configure to your needs **mine was more focused on priacy**
11 | 4) pihole -a -p **will get prompted to change password**
12 | 5) enter ip into broswer >> gui >> adlist >> used the 2 most resoable filters from firebog.net from each section
13 | 6) Save >> you now have a basic forwarding DNS server
14 |
--------------------------------------------------------------------------------
/pi-hole/unbound/README.md:
--------------------------------------------------------------------------------
1 | ## Unbound Wiki:
2 |
3 | **Pi-hole includes a caching and forwarding `DNS` server, now known as `FTLDNS`.** After applying the blocking lists, it forwards requests made by the clients to configured upstream DNS server. However, as has been mentioned by **several users in the past, this leads to some privacy concerns as it ultimately raises the question: Whom can you trust?** Recently, more and more small (`and not so small`) DNS upstream providers have appeared on the market, advertising free and private DNS service, but how can you know that they keep their promises? Right, you can't.
4 |
5 | **Furthermore, from the point of an attacker, the DNS servers of larger providers are very worthwhile targets, as they only need to poison one DNS server, but millions of users might be affected.** Instead of your bank's actual IP address, you could be sent to a phishing site hosted on some island. This scenario has already happened and it isn't unlikely to happen again. When you operate your own (tiny) recursive DNS server, then the likeliness of getting affected by such an attack is greatly reduced.
6 |
7 | ## What is a recursive DNS server:
8 |
9 | - **The first distinction we have to be aware of is whether a DNS server is authoritative or not.** If I'm the authoritative server for, e.g., pi-hole.net, then I know which IP is the correct answer for a query. Recursive name servers, in contrast, resolve any query they receive by consulting the servers authoritative for this query by traversing the domain. ***Example:*** We want to resolve pi-hole.net. On behalf of the client, the recursive DNS server will traverse the path of the domain across the Internet to deliver the answer to the question.
10 |
11 | - You can easily imagine even longer chains for subdomains as the query process continues until your recursive resolver reaches the authoritative server for the zone that contains the queried domain name. **It is obvious that the methods are very different and the own recursion is more involved than "just" asking some upstream server.** This has benefits and drawbacks:
12 |
13 | ■ ***Benefit*** - Privacy - as you're directly contacting the responsive servers, no server can fully log the exact paths you're going, as e.g. the Google DNS servers will only be asked if you want to visit a Google website, but not if you visit the website of your favorite newspaper, etc.
14 |
15 | ■ ***Drawback*** - Traversing the path may be slow, especially for the first time you visit a website - while the bigger DNS providers always have answers for commonly used domains in their cache, you will have to traverse the path if you visit a page for the first time. The first request to a formerly unknown TLD may take up to a second (or even more if you're also using DNSSEC). Subsequent requests to domains under the same TLD usually complete in < 0.1s. Fortunately, both your Pi-hole as well as your recursive server will be configured for efficient caching to minimize the number of queries that will actually have to be performed.
16 |
17 | ## Pihole & Unbound Process:
18 |
19 | - ***lient asks the Pi-hole Who is pi-hole.net***
20 | - ***Pi-hole checks its cache and reply if the answer is already known.***
21 | - ***Pi-hole Checks the blocking lists and reply if the domain is blocked.***
22 | - ***If it can't Pi-hole it delegates the request to the (local) recursive DNS resolver.***
23 | - ***Recursive server will send a query to the DNS root servers: "Who is handling .net?"***
24 | - ***Root server answers with a referral to the TLD servers for .net.***
25 | - ***Recursive server will send a query to one of the TLD DNS servers for .net: "Who is handling pi-hole.net?"***
26 | - ***TLD server answers with a referral to the authoritative name servers for pi-hole.net.***
27 | - ***Recursive server will send a query to the authoritative name servers: "What is the IP of pi-hole.net?"***
28 | - ***The authoritative server will answer with the IP address of the domain pi-hole.net.***
29 | - ***Recursive server will send the reply to your Pi-hole which will, in turn, reply to your client and tell it the answer to its request.***
30 |
--------------------------------------------------------------------------------
/pi-hole/unbound/install_steps.md:
--------------------------------------------------------------------------------
1 | ***Ubount recursive DNS***
2 |
3 | ```sh
4 | 1) apt install unbound
5 | 2) nano /etc/unbound/unbound.conf.d/pi-hole.conf **unbound_config.md**
6 | 3) service unbound restart
7 | 4) dig pi-hole.net @127.0.0.1 -p 5335
8 | 5) dig dnssec.works @127.0.0.1 -p 5335 **authority flag should be 1**
9 | 6) enter ip into broswer >> gui >> settings >> dns >> in the "Upstream DNS Servers" check "Custom 1 (IPv4)"
10 | 7) entered 127.0.0.1#5335 into empty box1
11 | 8) below that section you will see "Potentially dangerous options" >> bind it to your nic >> in my case "Respond only on interface eth0"
12 |
--------------------------------------------------------------------------------
/pi-hole/unbound/unbound_config.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | server:
3 | # If no logfile is specified, syslog is used
4 | # logfile: "/var/log/unbound/unbound.log"
5 | verbosity: 0
6 |
7 | interface: 127.0.0.1
8 | port: 5335
9 | do-ip4: yes
10 | do-udp: yes
11 | do-tcp: yes
12 |
13 | # May be set to yes if you have IPv6 connectivity
14 | do-ip6: no
15 |
16 | # You want to leave this to no unless you have *native* IPv6. With 6to4 and
17 | # Terredo tunnels your web browser should favor IPv4 for the same reasons
18 | prefer-ip6: no
19 |
20 | # Use this only when you downloaded the list of primary root servers!
21 | # If you use the default dns-root-data package, unbound will find it automatically
22 | #root-hints: "/var/lib/unbound/root.hints"
23 |
24 | # Trust glue only if it is within the server's authority
25 | harden-glue: yes
26 |
27 | # Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS
28 | harden-dnssec-stripped: yes
29 |
30 | # Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes
31 | # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details
32 | use-caps-for-id: no
33 |
34 | # Reduce EDNS reassembly buffer size.
35 | # IP fragmentation is unreliable on the Internet today, and can cause
36 | # transmission failures when large DNS messages are sent via UDP. Even
37 | # when fragmentation does work, it may not be secure; it is theoretically
38 | # possible to spoof parts of a fragmented DNS message, without easy
39 | # detection at the receiving end. Recently, there was an excellent study
40 | # >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<<
41 | # by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/)
42 | # in collaboration with NLnet Labs explored DNS using real world data from the
43 | # the RIPE Atlas probes and the researchers suggested different values for
44 | # IPv4 and IPv6 and in different scenarios. They advise that servers should
45 | # be configured to limit DNS messages sent over UDP to a size that will not
46 | # trigger fragmentation on typical network links. DNS servers can switch
47 | # from UDP to TCP when a DNS response is too big to fit in this limited
48 | # buffer size. This value has also been suggested in DNS Flag Day 2020.
49 | edns-buffer-size: 1232
50 |
51 | # Perform prefetching of close to expired message cache entries
52 | # This only applies to domains that have been frequently queried
53 | prefetch: yes
54 |
55 | # One thread should be sufficient, can be increased on beefy machines. In reality for most users running on small networks or on a single machine, it should be unnecessary to seek performance enhancement by increasing num-threads above 1.
56 | num-threads: 1
57 |
58 | # Ensure kernel buffer is large enough to not lose messages in traffic spikes
59 | so-rcvbuf: 1m
60 |
61 | # Ensure privacy of local IP ranges
62 | private-address: 192.168.0.0/16
63 | private-address: 169.254.0.0/16
64 | private-address: 172.16.0.0/12
65 | private-address: 10.0.0.0/8
66 | private-address: fd00::/8
67 | private-address: fe80::/10
68 |
--------------------------------------------------------------------------------
/prometheus/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/samba/DFS-R/README.md:
--------------------------------------------------------------------------------
1 | ## DFS-R Wiki:
2 |
3 | **Samba in its current state doesn't support SysVol replication via `DFS-R` (`Distributed File System Replication`) or the older `FRS` (`File Replication Service`) used in Windows Server 2000/2003 for Sysvol replication. You need to sync idmap.ldb from the DC holding the `PDC_Emulator` `FSMO` role to all other `DCs`.** This ensures that all DCs will use the same IDs. If you do not sync idmap.ldb, you can and will get different IDs on each DC. You need to sync idmap.ldb when you first join a new DC and then regularly, to ensure the IDs remain constant. You do not need to sync idmap.ldb every time you sync SysVol, but as stated, it should be done periodically.
4 |
5 | **Everything stored inside the `AD` is replicated between `DCs`. For example: users, groups, and DNS records. In the current state, Samba does not support the distributed file system replication (`DFS-R`) protocol used for Sysvol replication.** its often viewed as a subfunction of DFS, and both are often used together, but they can also be used completely separately: DFS with multiple targets can be used without DFS-R (`if the folder contents are updated with some other method`), and DFS-R can replicate shared folders that are not DFS-targets.
6 |
7 | **Everything stored inside the AD is replicated between DCs. For example: users, groups, and DNS records. In the current state, Samba does not support the distributed file system replication (`DFS-R`) protocol used for Sysvol replication.** You can't use another distributed filesystem like `GlusterFS` or `Lustre` for `SysVol` Replication because a cluster file system with Samba requires `CTDB` to be able to do it safely. And `CTDB` and AD DC are incompatible.
8 |
9 |
10 |
11 | ## Key Features:
12 |
13 | - ***Data Synchronization*** - DFS-R (Distributed File System Replication) allows for efficient data replication between Samba servers, ensuring consistency across multiple servers.
14 | - ***Resilience and Redundancy*** - Provides fault tolerance by replicating data across different servers, enhancing data availability and reliability.
15 | - ***Bandwidth Optimization*** - Uses remote differential compression to minimize the amount of data sent over the network during replication, optimizing bandwidth usage.
16 | - ***Automatic Recovery*** - Capable of automatic recovery from interruptions, ensuring data integrity and continuity.
17 |
18 |
19 | ## Best Practice:
20 |
21 | - Samba doesn't have a direct way of performing DFS-R. The best practice here depends on the environment, use case, as well as security policies and framework set in place. Once you have that, you may perform SysVol replication via one of the following:
22 |
23 | - Rsync based SysVol replication workaround (Samba DCs only): Quick setup, easy to configure.
24 | - Bidirectional Rsync/Unison based SysVol replication workaround (Samba DCs only): Complex, requires a third-party script, each DC requires a cron job against each other DC.
25 | - Bidirectional Rsync/osync based SysVol replication workaround (Samba DCs only): Complex, requires a third-party script, each DC requires a cron job against each other DC.
26 | - Robocopy based SysVol replication workaround (Samba DCs -> Windows DCs): Quick setup, easy to configure, uses MS Robocopy.
27 |
28 |
29 |
30 | ##
31 | > `TDB` files and `LDB` files using TDB have a maximum size of 4 GB because the databases use 32-bit structures. Previously, there was a project called NTDB that should address the size limit and other problems. However, the project has been stopped because of problems migrating the databases. `LDB` files based on LMDB, specifically the sam.ldb on the AD DC, have a size specified by the --backend-store-size=SIZE parameter to samba-tool domain provision and samba-tool domain join which controls the maximum DB size. The default is 8GB. As LMDB is a true 64-bit database, the maximum is limited only by the storage available on the system.
32 |
--------------------------------------------------------------------------------
/samba/DFS-R/crone_rsync.md:
--------------------------------------------------------------------------------
1 | ### Crone + Rsync One way Replication:
2 |
3 |
4 | ```sh
5 | 1) nano /usr/local/bin/sysvol-sync-from-dc01.sh (on DC02)
6 | #!/bin/bash
7 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
8 | export PATH
9 | 2) echo "Starting sysvol sync at $(date)" >> /tmp/sysvol-sync-debug.log
10 | 3) rsync -XAavz -e 'ssh -p 4193' --delete-after DC01:/usr/local/samba/var/locks/sysvol/ /usr/local/samba/var/locks/sysvol/ >> /tmp/sysvol-sync-debug.log 2>&1
11 | 4) rsync -XAavz -e 'ssh -p 4193' --delete-after DC01:/usr/local/samba/var/locks/sysvol/alprojects.tech/scripts/ /usr/local/samba/var/locks/sysvol/alprojects.tech/scripts/ >> /tmp/sysvol-sync-debug.log 2>&1
12 | 5) rsync -XAavz -e 'ssh -p 4193' --delete-after DC01:/usr/local/samba/var/locks/sysvol/alprojects.tech/SHARE/ /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARE/ >> /tmp/sysvol-sync-debug.log 2>&1
13 | 6) rsync -XAavz -e 'ssh -p 4193' --delete-after DC01:/usr/local/samba/var/locks/sysvol/alprojects.tech/APPS/ /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS/ >> /tmp/sysvol-sync-debug.log 2>&1
14 | 7) echo "Completed sysvol sync at $(date)" >> /tmp/sysvol-sync-debug.log
15 | 8) chmod +x /usr/local/bin/sysvol-sync-from-dc01.sh
16 | 9) mkdir -p /usr/local/samba/var/locks/sysvol/alprojects.tech/scripts/ **if not on DC02**
17 | 10) mkdir -p /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARE/ **if not on DC02**
18 | 11) mkdir -p /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS/ **if not on DC02**
19 | 12) chown -R root:root /usr/local/samba/var/locks/sysvol/ **if not done**
20 | 13) chmod -R 750 /usr/local/samba/var/locks/sysvol/ **both DCs should be locked down**
21 | 14) crontab -e
22 | 15) */5 * * * * /usr/local/bin/sysvol-sync-from-dc01.sh >> /tmp/sysvol-sync.log 2>&1
23 | 16) /usr/local/bin/sysvol-sync-from-dc01.sh **if you have tmux its beneficial here**
24 | 17) tail -f /var/log/cron **toss this in a tmux split screen**
25 | 18) tail -f /var/log/syslog
26 | 19) systemctl restart samba
27 | 20) samba-tool fsmo show
28 |
--------------------------------------------------------------------------------
/samba/DFS-R/groups_shares.md:
--------------------------------------------------------------------------------
1 | ### Samba Users & Groups + DB+ Shares
2 |
3 | ```sh
4 | 1) samba-tool user create echo Word1234 --given-name='Heffy' --surname='moef' --must-change-at-next-login
5 | 2) pdbedit -L **Samba user database**
6 | 3) samba-tool user list **basic view compared to pdbedit**
7 | 4) samba-tool group list **you should see the same in RSAT**
8 | 5) samba-tool group show Users **specific Samba group**
9 | 6) pdbedit -L -v echo **specific Samba user**
10 | 7) mkdir -p /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS
11 | 8) mkdir -p /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARES
12 | 9) chown root:3000000 /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS
13 | 10) chown root:3000000 /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARES
14 | 11) chmod 770 /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS **If not already**
15 | 12) chmod 770 /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARES **If not already**
16 |
--------------------------------------------------------------------------------
/samba/DFS-R/smb.conf:
--------------------------------------------------------------------------------
1 | Samba config for Audit & Compliance + Combing /etc/samba/smb.conf & /usr/local/samba/etc/smb.conf:
2 |
3 |
4 | 1) nano /usr/local/samba/etc/smb.conf **both DC's should match config files**
5 | # Global parameters
6 | [global]
7 |
8 | bind interfaces only = Yes
9 | dns forwarder = 0.0.0.0
10 | interfaces = lo ens18
11 | netbios name = DC01
12 | realm = ALPROJECTS.TECH
13 | server role = active directory domain controller
14 | workgroup = ALPROJECTS
15 | idmap_ldb:use rfc2307 = yes
16 | passdb backend = tdbsam
17 | log file = /var/log/samba/%m.log
18 | log level = 1 auth:5 winbind:5
19 | max log size = 10000
20 |
21 | # Additional settings from /etc/samba/smb.conf global settings or shares
22 |
23 | printcap name = cups
24 | load printers = yes
25 | cups options = raw1
26 | workgroup = Alprojects
27 | min protocol = SMB3
28 | max protocol = SMB3
29 | server signing = mandatory
30 | smb encrypt = mandatory
31 | map to guest = bad user
32 |
33 | [sysvol]
34 |
35 | path = /usr/local/samba/var/locks/sysvol
36 | read only = No
37 | vfs objects = full_audit
38 | full_audit:prefix = %u|%I|%m|%S
39 | full_audit:success = open read write rename pwrite
40 | full_audit:failure = all
41 | full_audit:facility = local7
42 | full_audit:priority = NOTICE
43 |
44 | [netlogon]
45 |
46 | path = /usr/local/samba/var/locks/sysvol/alprojects.tech/scripts
47 | read only = No
48 | vfs objects = full_audit
49 | full_audit:prefix = %u|%I|%m|%S
50 | full_audit:success = open read write rename
51 | full_audit:failure = all
52 | full_audit:facility = local7
53 | full_audit:priority = NOTICE
54 | [SHARE]
55 |
56 | path = /usr/local/samba/var/locks/sysvol/alprojects.tech/SHARE
57 | read only = No
58 | vfs objects = full_audit
59 | full_audit:prefix = %u|%I|%m|%S
60 | full_audit:success = open read write rename
61 | full_audit:failure = all
62 | full_audit:facility = local7
63 | full_audit:priority = NOTICE
64 | [APPS]
65 |
66 | path = /usr/local/samba/var/locks/sysvol/alprojects.tech/APPS
67 | read only = yes
68 | vfs objects = full_audit
69 | full_audit:prefix = %u|%I|%m|%S
70 | full_audit:success = open read write rename
71 | full_audit:failure = all
72 | full_audit:facility = local7
73 | full_audit:priority = NOTICE
74 | [homes]
75 |
76 | comment = Home Directories
77 | valid users = %S, %D%w%S
78 | browseable = No
79 | read only = No
80 | inherit acls = Yes
81 |
82 | [printers]
83 |
84 | comment = All Printers
85 | path = /var/tmp
86 | printable = Yes
87 | create mask = 0600
88 | browseable = No
89 | [print$]
90 |
91 | comment = Printer Drivers
92 | path = /var/lib/samba/drivers
93 | write list = @printadmin root
94 | force group = @printadmin
95 | create mask = 0664
96 | directory mask = 0775
97 |
98 |
99 | 2) ls -lah /var/log/samba **make sure the log files are being made**
100 | 3) journalctl -u dhcpd -f
101 | 4) journalctl -u samba -f
102 | 5) tail -f /var/log/samba/*.log **GENSEC Mechanism, Samba Service, Winbind Service should all show no errors**
103 |
--------------------------------------------------------------------------------
/samba/README.md:
--------------------------------------------------------------------------------
1 | ## Samba Wiki:
2 |
3 | Starting from version 4.0 (`released in 2012`), Samba is able to serve as an Active Directory (AD) domain controller (DC).**Samba operates at the forest functional level of Windows Server 2008 R2 which is more than sufficient to manage sophisticated enterprises** that use Windows 10/11 with strict compliance requirements (`including NIST 800-171`).
4 |
5 | While `NIST 800-171` is designed specifically for non-Federal (`commercial`) enterprises, with a separate set of guidelines – `NIST 800-57` – developed to cover Federal systems and organizations, `ISO 27001` is a more general standard and can be applicable to organizations of all types.
6 |
7 | Installing `RSAT` tools to manage the DC and AD services frontend, it's worth mentioning `DHCP` won't connect to RSAT tools. **RSAT is designed primarily for managing Windows services.** While it can interact with some non-Windows services that use protocols compatible with Windows (like Samba for Active Directory), it generally does not support managing non-Windows implementations of DHCP directly. This is because the DHCP service in Linux (`commonly ISC DHCP Server`) does not communicate with the same management protocols as the Windows DHCP service.
8 |
9 | ## Security & Compliance:
10 |
11 | - Many regulatory frameworks require detailed logs of access to sensitive data. Given that `SYSVOL` and `NETLOGON` can contain sensitive information, auditing these shares can help meet such compliance needs. Ensure that your Samba configuration aligns with any relevant compliance requirements (`like GDPR, HIPAA, SOC2`) especially in handling logging and user data.
12 |
13 | ## Important Points:
14 |
15 | - (`Do not`) create a symbolic link to the generated krb5.conf file. In Samba 4.7 and later, the `/usr/local/samba/private/` directory is no longer accessible by other users than the root user. If the file is a symbolic link, other users are not able to read the file and, for example, dynamic DNS updates fail if you use the `BIND_DLZ` DNS back end. Make sure that you provision the AD using a DNS domain that will not need to be changed. Samba does not support renaming the AD DNS zone and Kerberos realm. Do not use `.local` for the TLD, this is used by `Avahi`.
16 |
17 |
18 | ## Key Features:
19 |
20 | - ***File and Print Services*** - Samba provides seamless file and print services for clients across various operating systems, including Windows, Linux, and macOS.
21 | - ***Active Directory Domain Controller*** - Capable of acting as an AD Domain Controller, enabling Linux servers to manage AD-based networks with authentication, directory services, and group policy management.
22 | - ***Interoperability*** - Ensures compatibility between Unix/Linux and Windows systems, allowing them to share files and printers effortlessly.
23 | - ***Security*** - Supports advanced security features like Kerberos, NTLM, and integration with existing security infrastructures.
24 | - ***Flexibility***- Can be configured for different roles, including standalone server, member server in a domain, or as a domain controller.
25 |
26 |
27 | ## Best Practice:
28 |
29 | - When you extract source files directly to the root directory (`/`), it can lead to files being scattered across the system if the archive does not contain a top-level directory. This can make cleanup and management more difficult. Typically, it's better to extract source archives into a dedicated directory to keep your system organized and to avoid any unintended overwriting of system files or directories. A common practice is to use a directory like `/usr/src` or `/opt` for compiling software from source. This approach has several benefits:
30 |
31 | - It keeps source files contained and easy to manage.
32 | - It avoids cluttering the root directory or accidentally overwriting important system files.
33 | - It makes it easier to delete or archive the source directory once you're done.
34 | - This method provides a clean and reversible way to manage software compiled from source, which is especially helpful in environments where you might be compiling different versions or multiple software packages.
35 |
36 | ##
37 | > Samba, an open-source software suite, has a unique history tied to its name. "Samba" was chosen because it contains the letters "S", "M", and "B" from the SMB (Server Message Block) protocol, which is fundamental to its function.
38 |
--------------------------------------------------------------------------------
/samba/dhcpd.md:
--------------------------------------------------------------------------------
1 | ### ISC DHCP Server + DHCP Reservation
2 |
3 | ```sh
4 | dnf install dhcp-server
5 | nano /etc/dhcp/dhcpd.conf
6 |
7 | default-lease-time 600;
8 | max-lease-time 7200;
9 | ddns-update-style none;
10 | authoritative;
11 |
12 | subnet 0.0.0.0 netmask 255.255.255.0 {
13 | range 0.0.0.0 0.0.0.0;
14 | option routers 0.0.0.0;
15 | option subnet-mask 0.0.0.0;
16 | option domain-name-servers 0.0.0.0;
17 | option domain-name "alprojects";
18 | option domain-search "alprojects";
19 | }
20 |
21 | host device1 {
22 | hardware ethernet #MAC#;
23 | fixed-address #ip#;
24 | }
25 |
26 | systemctl enable dhcpd
27 | systemctl start dhcpd
28 | systemctl status dhcpd **see clients leases**
29 |
--------------------------------------------------------------------------------
/samba/domain_controller_source_build.md:
--------------------------------------------------------------------------------
1 | ***Repo + smb.conf + krb5.conf + Updates***
2 |
3 | ```sh
4 | 1) mv /etc/samba/smb.conf /etc/samba/smb.conf.old
5 | 2) mv /etc/krb5.conf /etc/krb5.conf.old
6 | 3) mkdir -p /samba-4.20.0
7 | 4) cd ../../samba-4.20.0 **I was in a home user folder**
8 | 5) wget https://download.samba.org/pub/samba/stable/samba-4.20.0.tar.gz
9 | 6) tar -zxf samba-4.20.0.tar.gz
10 | 7) cd samba-4.20.0
11 | 8) dnf install epel-release -y **installing Enterprise Repo**
12 | 9) dnf update -y
13 | 10) dnf config-manager --set-enabled powertools **on Build 8.6**
14 | 11) dnf config-manager --set-enabled crb **on Build 9.3**
15 | 12) dnf repolist
16 | ```
17 | ***Packages + Variables + Compile Parameters***
18 |
19 | ```sh
20 | 1) dnf -y install docbook-style-xsl bison flex gcc gdb gnutls-devel jansson-devel keyutils-libs-devel krb5-workstation libacl-devel libaio-devel libattr-devel libblkid-devel libtasn1 libtasn1-tools libxml2-devel
21 | libxslt openldap-devel pam-devel perl perl-ExtUtils-MakeMaker perl-Parse-Yapp popt-devel python3-cryptography python3-dns python3-gpg python36-devel readline-devel systemd-devel tar zlib-devel json perl-JSON
22 | lmdb-devel gpgme-devel libarchive-devel dbus-devel python3-markdown python3-pyasn1 rpcgen libtirpc-devel screen python3-devel krb5-devel --skip-broken
23 | 2) find /usr -name "lmdb.h" **should show up**
24 | 3)find /usr/include -name "rpc.h" **should show up**
25 | 4) export CFLAGS="-I/path/to/lmdb -I/usr/include/tirpc $CFLAGS" **tested with/without**
26 | 5) export CPPFLAGS="-I/path/to/lmdb -I/usr/include/tirpc $CPPFLAGS" **good practice to include them**
27 | 6) dnf update -y
28 | 7) ./configure --enable-selftest \ --with-piddir=/usr/local/samba/var/run \ --with-privatedir=/usr/local/samba/private \ --with-sockets-dir=/usr/local/samba/var/run \ --with-lockdir=/usr/local/samba/var/locks \ --with-statedir=/usr/local/samba/var \ --with-cachedir=/usr/local/samba/var/cache
29 | 8) make -j6 **6=core count**
30 | 9) make install
31 | 10) ls /usr/local/samba/bin **lots of green entries**
32 | 11) ls /usr/local/samba/sbin **line of green entry**
33 | 12) samba -b | grep "CONFIGFILE" ** this will point to where conf is located**
34 | ```
35 | ***Provisioning + Bash Configuration + Resolv.conf***
36 |
37 | ```sh
38 | 1) export PATH=/usr/local/samba/bin:/usr/local/samba/sbin:$PATH
39 | 2) nano /etc/hosts **Made adjustments**
40 | 3) nano /etc/resolv.conf **Made adjustments**
41 | 4) chattr +i /etc/resolv.conf **Immutable status**
42 | 5) samba-tool domain provision --use-rfc2307 --interactive --option="interfaces= lo ens18" --option="bind interfaces only=yes"
43 | 6) samba **start it off**
44 | 7) ps -ax | grep samba **long list**
45 | 8) chown root:root /usr/local/samba/etc/smb.conf **if not already**
46 | 9) chmod 640 /usr/local/samba/etc/smb.conf **compiled location**
47 | ```
48 | ***Testing Configuration Aftermath***
49 |
50 | ```sh
51 | 1) cp /usr/local/samba/private/krb5.conf /etc/krb5.conf **dont forget to do this**
52 | 2) host -t SRV _kerberos._udp.alprojects.tech **passed**
53 | 3) host -t SRV _ldap._tcp.alprojects.tech **passed*
54 | 4) samba-tool dbcheck --cross-ncs **healthy**
55 | 5) host -t A dc01.alprojects.tech **passed**
56 | 6) ping -c3 www.google.ca **passed**
57 | 7) kinit administrator **logged in**
58 | 8) testparm **passed**
59 | 9) klist **generated ticket**
60 | ```
61 | ***Chrony + Syslog + Local-7***
62 |
63 | ```sh
64 | 1) dnf install chrony -y **if not installed**
65 | 2) systemctl enable chronyd --now
66 | 3) systemctl restart chronyd
67 | 4) systemctl status chronyd
68 | 5) chronyc sources
69 | 6) chronyc tracking
70 | 7) nano /etc/chrony.conf **for accuracy add more than one**
71 | pool 0.pool.ntp.org iburst
72 | pool 1.pool.ntp.org iburst
73 | pool 3.pool.ntp.org iburst
74 | 8) chronyc ntpdata **since the last or current time sync**
75 | 9) nano /etc/rsyslog.conf
76 | local7.* /var/log/samba-audit.log
77 | 10) nano /var/log/samba-audit.log **save and exit**
78 | 11) chmod 644 /var/log/samba-audit.log (if its not already)
79 | 12) systemctl restart rsyslog
80 | 13) tail -f /var/log/samba-audit.log
81 | 14) tail -f /var/log/samba/0.0.0.0.log **this is present because logging is working correctly**
82 | 15) tail -f /var/log/samba/%m.log
83 | 16) tail -f /var/log/samba/smbd.log
84 | 17) tail -f /var/log/samba/winbindd.log (all tail commands showing normal operation)
85 | ```
86 |
87 |
--------------------------------------------------------------------------------
/samba/samba.service:
--------------------------------------------------------------------------------
1 | nano /etc/systemd/system/samba.service
2 |
3 | [Unit]
4 | Description=Samba AD Daemon
5 | Wants=network-online.target
6 | After=network.target network-online.target
7 |
8 | [Service]
9 | Type=forking
10 | ExecStart=/usr/local/samba/sbin/samba
11 | PIDFile=/usr/local/samba/var/run/samba.pid
12 | ExecReload=/bin/kill -HUP $MAINPID
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
17 | systemctl daemon-reload
18 | systemctl enable samba.service
19 | restorecon -v /usr/local/samba/sbin/samba
20 | systemctl start samba.service
21 | systemctl status samba.service
22 | ps -ax | grep samba **passed**
23 | systemctl status samba.service
24 |
--------------------------------------------------------------------------------
/samba/samba_enable.sh:
--------------------------------------------------------------------------------
1 | nano /etc/profile.d/samba_enable.sh
2 |
3 | #!/bin/bash
4 | # Add Samba to PATH
5 |
6 | if [[ ":$PATH:" != *":/usr/local/samba/sbin:"* ]]; then
7 |
8 | PATH="/usr/local/samba/sbin:$PATH"
9 |
10 | fi
11 |
12 | if [[ ":$PATH:" != *":/usr/local/samba/bin:"* ]]; then
13 |
14 | PATH="/usr/local/samba/bin:$PATH"
15 |
16 | fi
17 |
18 | export PATH
19 |
20 | chmod +x /etc/profile.d/samba_enable.sh
21 | reboot
22 | echo $PATH **verify**
23 |
--------------------------------------------------------------------------------
/samba/selinux_iptables.md:
--------------------------------------------------------------------------------
1 | Linux FW + SSH Configuration:
2 |
3 | ```sh
4 | 1) semanage port -a -t ssh_port_t -p tcp 4193
5 | 2) semanage port -l | grep ssh or semanage port -l | grep ssh_port_t
6 | 3) systemctl restart sshd
7 | 4) systemctl status sshd
8 | 5) systemctl stop firewalld
9 | 6) systemctl mask firewalld **logout/login + new port**
10 | 7) dnf install iptables-services -y
11 | 8) systemctl start iptables
12 | 9) systemctl enable iptables
13 | 10) nano /etc/sysconfig/iptables
14 | #
15 | # Allow Samba, DNS, DHCP Access
16 | -A INPUT -p tcp -s 0.0.0.0/24 -m state --state NEW -m multiport --dports 53,88,135,139,389,445,464,636,3268,49152:65535 -j ACCEPT
17 | -A INPUT -p udp -s 0.0.0.0/24 -m state --state NEW -m multiport --dports 53,123,137,138,389,636 -j ACCEPT
18 | #
19 | # Allow/Drop for SSH IP Addresses
20 | -A INPUT -p tcp -s 0.0.0.0/32 --dport 4193 -m state --state NEW -j ACCEPT
21 | #
22 | #DROP is more security focused REJECT gives away to much info
23 | -A INPUT -p tcp --dport 22 -j DROP
24 | #
25 | systemctl restart iptables
26 | systemctl restart sshd **logout/login**
27 |
--------------------------------------------------------------------------------
/semaphore/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/snort/README.md:
--------------------------------------------------------------------------------
1 | ## Snort Wiki:
2 |
3 | **Snort is a widely-used open-source Intrusion Prevention System (IPS) that detects and prevents network intrusions by analyzing network traffic in real-time. Developed by Martin Roesch and maintained by Cisco.** Snort is **written in C and is released under the GNU General Public License (`GPL`)**, enabling both personal and commercial use.
4 |
5 | **Snort utilizes a robust set of rules to identify malicious activities, generating alerts or blocking harmful packets. Snort is versatile, serving as a packet sniffer, logger, or a full-fledged IPS.** It supports various deployment options, including inline mode for active prevention.
6 |
7 | ## the two main offers for rule sets are:
8 |
9 | - ***Community Ruleset*** - Freely available and developed by the community.
10 | - ***Snort Subscriber Ruleset*** - Maintained by Cisco Talos, providing real-time updates to subscribers.
11 |
12 |
13 | ## Security & Compliance:
14 |
15 | - Snort assists in achieving compliance with security standards like (`PCI-DSS`) by monitoring network traffic for policy violations and potential threats. It ensures that organizations adhere to regulatory requirements by providing detailed audit logs and real-time alerts, crucial for maintaining compliance.
16 |
17 | ## Important Note:
18 | - (`Do NOT`) overlook the importance of updating Snort rules regularly. Using outdated rules can lead to false positives or missed detections, compromising network security. Always ensure that your Snort rules are up-to-date to effectively identify and mitigate threats.
19 |
20 |
21 | ## Key Features:
22 | - ***Intrusion Detection and Prevention (IDS/IPS)*** - Monitors network traffic for suspicious activity and can block or alert on potential threats.
23 | - ***Packet Sniffer*** - Functions like tcpdump, capturing network packets for analysis.
24 | - ***Packet Logger*** - Logs network packets, useful for traffic debugging and analysis.
25 | - ***Custom Rule Creation*** - Allows users to define specific rules to detect unique threats.
26 | - ***Scalability*** - Suitable for various network sizes, from small businesses to large enterprises.
27 |
28 |
29 | ## Best Practices:
30 | - ***Rule Management*** - Regularly update your Snort rules to ensure optimal security.
31 | - ***Network Segmentation*** - Use Snort with network segmentation to isolate and protect critical infrastructure.
32 | - ***Performance Tuning*** - Optimize Snort's performance by adjusting configurations for your specific network environment.
33 | - ***Monitoring & Logging*** - Continuously monitor Snort's performance and logs to stay ahead of threats.
34 | - ***Backup Configurations*** - Regularly back up Snort configurations to prevent data loss and simplify recovery.
35 |
36 |
37 | ##
38 | > One of the coolest aspects of Snort is its Community Ruleset, which is developed and maintained by a global community of security experts. This collaborative approach ensures that Snort stays updated with the latest threat intelligence, making it a powerful and dynamic tool for intrusion prevention.
39 |
--------------------------------------------------------------------------------
/sql/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/sql/lite/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/sql/ms/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/sql/postgres/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/suricata/README.md:
--------------------------------------------------------------------------------
1 | ## Suricata Wiki:
2 |
3 |
4 | **Suricata is an open-source network threat detection engine that performs real-time intrusion detection (IDS), intrusion prevention (IPS), network security monitoring (NSM), and offline packet capture (pcap) processing. Developed by the Open Information Security Foundation (OISF)**, Suricata is capable of **deep packet inspection, traffic logging, and protocol identification, making it an essential tool for cybersecurity professionals. It is written in the C programming language and released under the GPLv2 license**, allowing for wide-scale deployment and community-driven improvements.
5 |
6 | Suricata's versatility allows it to be integrated with various security frameworks and platforms, making it a robust solution for monitoring and defending networks against a wide range of threats. **Additionally, Suricata supports multi-threading, GPU acceleration, and is highly scalable, enabling it to handle the demands of modern, high-speed networks**.
7 |
8 | ## Security & Compliance:
9 | - Suricata helps in achieving compliance with security regulations like (`PCI-DSS`) and (`HIPAA`) by providing deep packet inspection, intrusion detection, and logging capabilities. It monitors network traffic for policy violations and potential threats, ensuring adherence to regulatory standards. Suricata’s extensive logging and alerting features allow for detailed audit trails and incident response, which are essential for maintaining compliance.
10 |
11 |
12 | ## Important Note:
13 |
14 | - (`Do NOT`) overlook the importance of regularly updating Suricata’s rule sets. Outdated or misconfigured rules can lead to false positives, missed detections, and overall reduced effectiveness in identifying and mitigating threats. Always ensure that the rules are optimized for your specific network environment to maintain high levels of security.
15 |
16 | ## Key Features:
17 | - ***Intrusion Detection and Prevention (IDS/IPS)*** - Monitors network traffic for suspicious activity and blocks or alerts on potential threats.
18 | - ***Deep Packet Inspection*** - Analyzes packet content for protocol anomalies, malware, and other indicators of compromise.
19 | - ***Protocol Identification*** - Automatically identifies application-layer protocols, aiding in the detection of malicious or anomalous traffic.
20 | - ***Custom Rule Engine*** - Supports a flexible rule syntax for creating tailored detection rules specific to your network environment.
21 | - ***Scalability*** - Designed to operate efficiently on networks of all sizes, from small businesses to large enterprise environments.
22 |
23 | ## Best Practices:
24 |
25 | - ***Rule Management*** - Regularly update and optimize your rulesets to ensure they are current and effective for your specific environment.
26 | - ***Network Segmentation*** - Use Suricata in conjunction with network segmentation to limit the spread of potential intrusions.
27 | - ***Monitoring and Logging*** - Continuously monitor Suricata's alerts and logs to stay ahead of emerging threats and adjust configurations as needed.
28 | - ***Performance Tuning*** - Leverage multi-threading and GPU acceleration to optimize Suricata’s performance on high-traffic networks.
29 | - ***Backup Configurations*** - Regularly back up your Suricata configurations and rulesets to avoid data loss and simplify recovery in the event of an issue.
30 |
31 | ##
32 | > one of the coolest features of Suricata is its multi-threading capability. Unlike many other IDS/IPS solutions, Suricata is designed to fully utilize modern multi-core processors. This means it can process multiple packets simultaneously, significantly increasing its throughput and making it highly effective for monitoring large, high-speed networks without dropping packets or missing critical threats.
33 |
--------------------------------------------------------------------------------
/terminal-editor/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/terminal-editor/bashrc/.bashrc:
--------------------------------------------------------------------------------
1 | # ssh_collection
2 | alias compose_now='ssh -o IdentitiesOnly=yes -i ~/.ssh/keys/compose/id_ed25519 user@address.com -p '
3 | alias compose_now2='ssh -o IdentitiesOnly=yes -i ~/.ssh/keys/compose/id_ed25519 user@address.com -p '
4 | alias compose_now3='ssh -o IdentitiesOnly=yes -i ~/.ssh/keys/compose/id_ed25519 user@address.com -p '
5 | alias sshaddk='ssh-add ~/.ssh/keys/compose/id_ed25519'
6 |
7 | # packages_collection
8 | alias update_dist='sudo apt-get update && sudo apt-get dist-upgrade -y'
9 | alias update_full='sudo apt-get update && sudo apt-get upgrade -y'
10 | alias clean='sudo apt autoremove && sudo apt autoclean -y'
11 | alias search_pkg='apt-cache search'
12 | alias update='sudo apt update -y'
13 |
14 | # go_to_collection
15 | alias gotow='cd /home/echo/.local/share/warp-terminal/themes'
16 | alias goton='sudo nano ~/.config/neofetch/config.conf'
17 | alias gotos='sudo nano ~/.config/starship.toml'
18 | alias gotoh='cd /repo/homelab/home-lab/'
19 | alias gotohtl='cd /var/www/html'
20 | alias gotologs='cd /var/log'
21 |
22 | # git_collection
23 | alias gitge='git config --global user.email alporjects1@proton.me'
24 | alias gshortlog='git log --oneline --decorate --graph --all'
25 | alias gitgu='git config --global user.name LR03'
26 | alias gplm='git pull --rebase origin main'
27 | alias gpld='git pull --rebase origin dev'
28 | alias gundo='git reset --soft HEAD~1'
29 | alias gphm='git push origin main'
30 | alias gphd='git push origin dev'
31 | alias gpl='git pull --rebase'
32 | alias gfo='git fetch origin'
33 | alias glog='git log -p -1'
34 | alias gcm='git commit -m'
35 | alias gco='git checkout'
36 | alias gst='git status'
37 | alias gcl='git clone'
38 | alias gaa='git add .'
39 |
40 | # directory_collection
41 | alias ....='cd ../../../../../../../../../../../../../..'
42 | alias ...='cd ../..'
43 | alias home='cd ~'
44 | alias ..='cd ..'
45 |
46 | # find_it_collection
47 | alias findext='find . -type f -name "*."'
48 | alias findit='find . -name 2>/dev/null'
49 | alias finddir='find . -type d -name'
50 | alias find='find . -name'
51 |
52 | # misc
53 | alias bkupbashrc='cp ~/.bashrc ~/.bashrc.$(date +%Y%m%d%H%M%S).bak'
54 | alias dms='sudo ./scripts/clean_memory.sh'
55 | alias cms='free -m -l -t'
56 | alias df='df -h'
57 | #
58 | alias ping='ping -c 5'
59 | alias netstat='netstat -tuln'
60 | alias nettop='sudo nethogs'
61 | #
62 | alias rm='rm -i'
63 | alias rmd='sudo rm -rf'
64 | alias cls='clear '
65 |
66 | # list_collection
67 | alias ll='ls -liha'
68 | alias lmd='ls -lt'
69 | alias la='ls -A'
70 | alias l='ls -CF'
71 |
72 | # function_it_collection
73 | huntit() {
74 | sudo find / -name "$1*" 2>/dev/null
75 | }
76 |
77 | killit() {
78 | echo "You sure you want to kill processes matching '$1'? (y/n)"
79 | read -r confirm
80 | if [[ "$confirm" == "y" ]]; then
81 | pkill -f "$1"
82 | else
83 | echo "Aborted."
84 | fi
85 | }
86 |
87 | backit() {
88 | src="$1"
89 | dest="$2"
90 | filename=$(basename "$src")
91 |
92 | if [ -z "$src" ] || [ -z "$dest" ]; then
93 | echo "Usage: backit /path/to/source /path/to/backup/destination"
94 | return 1
95 | fi
96 |
97 | if [ ! -e "$src" ]; then
98 | echo "Source path does not exist."
99 | return 1
100 | fi
101 |
102 | if [ ! -d "$dest" ]; then
103 | echo "Destination must be a directory."
104 | return 1
105 | fi
106 |
107 | tar -czvf "$dest/$filename-$(date +%Y%m%d%H%M%S).tar.gz" "$src"
108 | }
109 |
110 | sendit() {
111 | src="$1"
112 | dest="$2"
113 |
114 | if [ -z "$src" ] || [ -z "$dest" ]; then
115 | echo "Usage: sendit /path/to/source user@remote:/path/to/destination"
116 | return 1
117 | fi
118 |
119 | if [ ! -e "$src" ]; then
120 | echo "Source path does not exist."
121 | return 1
122 | fi
123 |
124 | if ! command -v rsync >/dev/null; then
125 | echo "rsync is not installed. Please install it first."
126 | return 1
127 | fi
128 |
129 | rsync -avh --progress "$src" "$dest"
130 | }
131 |
--------------------------------------------------------------------------------
/terminal-editor/key_chain/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/terminal-editor/key_chain/clean_up.md:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Temp directory for Keychain updates
4 | TEMP_DIR="/tmp/keychain_update"
5 |
6 | # Remove Temp directory
7 | if [ -d "$TEMP_DIR" ]; then
8 | rm -rf "$TEMP_DIR"
9 | echo "Temporary files cleaned up from /tmp."
10 | else
11 | echo "No temporary files found."
12 | fi
13 |
14 | echo "it is done..."
15 |
--------------------------------------------------------------------------------
/terminal-editor/key_chain/remove_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Directory where Keychain is installed
4 | INSTALL_DIR="/opt/keychain"
5 |
6 | # Remove the Keychain directory
7 | if [ -d "$INSTALL_DIR" ]; then
8 | sudo rm -rf "$INSTALL_DIR"
9 | echo "Keychain has been removed from /opt."
10 | else
11 | echo "Keychain not found in /opt."
12 | fi
13 |
14 | # Removing related entries from .bashrc
15 | if grep -q "keychain" ~/.bashrc; then
16 | sed -i '/keychain/d' ~/.bashrc
17 | echo "Removed Keychain references from .bashrc."
18 | fi
19 |
20 | # Re-source .bashrc
21 | source ~/.bashrc
22 |
23 | echo "It is done....."
24 |
--------------------------------------------------------------------------------
/terminal-editor/key_chain/source_build.md:
--------------------------------------------------------------------------------
1 | ***Packages + Dependencies + Permissions***
2 |
3 | ```sh
4 | 1) sudo wget https://github.com/funtoo/keychain/archive/refs/heads/master.zip -O /opt/keychain-master.zip **prefered location**
5 | 2) sudo unzip /opt/keychain-master.zip -d /opt/
6 | 3) sudo mv /opt/keychain-master /opt/keychain **rename for simplicity**
7 | 4) sudo chmod +x /opt/keychain/keychain
8 | 5) nano ~/.bashrc
9 | 6) eval $(/opt/keychain/keychain --eval --agents ssh id_ed25519) **persistant ssh**
10 | 7) source ~/.bashrc
11 | ```
12 |
13 | ***Symbolic Link***
14 |
15 | ```sh
16 | 1) sudo ln -s /opt/keychain/keychain /usr/local/bin/keychain **must for terminal only**
17 | 2) keychain --version
18 | 3) which keychain **should return /usr/local/bin/keychain**
19 | ```
20 |
21 |
--------------------------------------------------------------------------------
/terminal-editor/key_chain/update_build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Directory where Keychain is installed
4 | INSTALL_DIR="/opt/keychain"
5 |
6 | # Temp directory for download
7 | TEMP_DIR="/tmp/keychain_update"
8 |
9 | # Create a temporary directory for update
10 | rm -rf "$TEMP_DIR"
11 | mkdir -p "$TEMP_DIR"
12 | cd "$TEMP_DIR" || exit 1
13 |
14 | # Downloading latest version
15 | wget https://github.com/funtoo/keychain/archive/refs/heads/master.zip
16 |
17 | # Unzip downloaded
18 | unzip master.zip
19 |
20 | # Replace the old version with new one
21 | sudo rm -rf "$INSTALL_DIR"
22 | sudo mv keychain-master "$INSTALL_DIR"
23 |
24 | # Ensure the script is executable
25 | sudo chmod +x "$INSTALL_DIR/keychain"
26 |
27 | # Clean up the temporary directory
28 | rm -rf "$TEMP_DIR"
29 |
30 | echo "It is done....."
31 |
--------------------------------------------------------------------------------
/terminal-editor/oh_my_posh/README.md:
--------------------------------------------------------------------------------
1 | ## Posh Wiki:
2 |
3 |
4 | Oh My Posh is a highly customizable prompt theme engine for various shell environments, **including PowerShell, Bash, and Zsh. It allows users to configure their command prompt with various themes and segments to display information such as the current directory, git status, system load, and more.** The project is hosted on GitHub, where users can find the source code, documentation, and installation instructions. The community actively contributes to the project by reporting issues, requesting features, and submitting pull requests, I am on version 21.7.0 as of writing
5 |
6 | **It was written by Jan De Dobbeleer and is actively developed and maintained by the community. The project is open-source and licensed under the MIT License, making it freely available for anyone to use and contribute to.** Oh My Posh is written in Go, a statically typed, compiled programming language designed for system programming. This choice of language ensures that Oh My Posh is efficient and performant across different platforms.
7 |
8 |
9 | ## Key Features:
10 |
11 | - ***Highly Customizable*** - Oh-My-Posh allows users to create personalized and visually appealing terminal prompts across various shell environments, including `PowerShell`, `Bash`, and `Zsh`.
12 | - ***Segmented Themes*** - Offers a variety of themes and segments that display useful information, such as the current directory, git status, system load, and more.
13 | - ***Cross-Platform Support*** - Compatible with multiple operating systems, making it versatile for different development environments.
14 | - ***Ease of Use*** - Simple configuration and setup, allowing users to quickly apply and switch between different themes.
15 |
16 | ## Best Practice:
17 |
18 | - ***Integrate with Git*** - Make use of Oh-My-Posh’s git segment to display your current branch, status, and other git-related information for easier version control management.
19 | - ***Optimize Performance*** - Customize your prompt to avoid displaying unnecessary information, which can slow down terminal performance.
20 | - ***Document Your Configuration*** - Keep a backup of your configuration files to easily replicate your setup across different environments or machines.
21 |
22 |
23 | >Traditionally, prompt tools work with custom scripts per theme (`just like Oh My Posh 2 did`) or a lot of `CLI` configuration switches to define what it looks like. With Posh, it starts from a single configuration file that could easily be shared anywhere, removing the need to really grasp what goes on underneath.
24 |
--------------------------------------------------------------------------------
/terminal-editor/oh_my_posh/bashrc.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | 1) nano ~/.bashrc
3 | 2) scroll down to the bottom
4 | 3) copy and paste below script
5 | 4) source ~/.bashrc
6 | 5) change_theme jandedobbeleer.omp.json
7 | 6) should change theme
8 | ```
9 |
10 |
11 | ```sh
12 | eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
13 | eval "$(oh-my-posh init bash --config ~/.poshthemes/jandedobbeleer.omp.json)"
14 |
15 | change_theme() {
16 | local theme=$1
17 | if [ -f ~/.poshthemes/$theme ]; then
18 | echo "Changing theme to $theme"
19 | echo "eval \"\$(oh-my-posh init bash --config ~/.poshthemes/$theme)\"" > ~/.oh-my-posh-current-theme
20 | source ~/.oh-my-posh-current-theme
21 | else
22 | echo "Theme $theme not found in ~/.poshthemes"
23 | fi
24 | }
25 |
26 | # Source the current theme on shell startup
27 | if [ -f ~/.oh-my-posh-current-theme ]; then
28 | source ~/.oh-my-posh-current-theme
29 | else
30 | echo "No current theme set"
31 | fi
32 | ```
33 |
--------------------------------------------------------------------------------
/terminal-editor/oh_my_posh/lin_source_build.sh:
--------------------------------------------------------------------------------
1 | brew install jandedobbeleer/oh-my-posh/oh-my-posh
2 | oh-my-posh --version
3 | oh-my-posh font install
4 | brew update && brew upgrade oh-my-posh **should be updated but just in case**
5 | mkdir -p ~/.config
6 | cp /home/linuxbrew/.linuxbrew/opt/oh-my-posh/themes/paradox.omp.json ~/.config/oh-my-posh.json
7 | echo 'eval "$(oh-my-posh init bash --config ~/.config/oh-my-posh.json)"' >> ~/.bashrc
8 | source ~/.bashrc **should see default theme**
9 | nano ~/.poshthemes/theme-name.json **needed to create it**
10 | cd ~/.poshthemes
11 | wget https://github.com/JanDeDobbeleer/oh-my-posh/releases/latest/download/themes.zip
12 | unzip themes.zip
13 | chmod u+rw ~/.poshthemes/*.json
14 | ls ~/.poshthemes **should see 100+ themes **
15 | nano ~/.bashrc (you should see "eval entry")
16 |
--------------------------------------------------------------------------------
/terminal-editor/oh_my_posh/theme_list.md:
--------------------------------------------------------------------------------
1 | ```sh
2 | change_theme atomic.omp.json
3 |
4 | change_theme blue-owl.omp.json
5 |
6 | change_theme jandedobbeleer.omp.json
7 |
8 | change_theme atomic.omp.json
9 |
10 | change_theme clean-detailed.omp.json
11 |
12 | change_theme easy-term.omp.json
13 |
14 | change_theme free-ukraine.omp.json
15 |
16 | change_theme slim.omp.json
17 |
18 | change_theme slimfat.omp.json
19 |
20 | change_theme sonicboom_dark.omp.json
21 |
22 | change_theme sonicboom_light.omp.json
23 |
24 | change_theme sonicboom_dark.omp.json
25 |
26 | sudo nano ~/.config/neofetch/config.conf
27 |
--------------------------------------------------------------------------------
/terminal-editor/oh_my_posh/win_source_build.ps:
--------------------------------------------------------------------------------
1 | Windows POSH + Path + Configuration:
2 |
3 | set-ExecutionPolicy -List (choose best options for you)
4 | set-ExecutionPolicy RemoteSigned -Scope CurrentUser (prefer this but feel free to go unrestricted if you prefer)
5 | winget install JanDeDobbeleer.OhMyPosh -s winget (installed POSH + Themes)
6 | get-Command -Name oh-my-posh -ErrorAction SilentlyContinue (installation directory)
7 | $env:Path += ";C:\Users\user\AppData\Local\Programs\oh-my-posh\bin" (add path)
8 | oh-my-posh --version (checks out)
9 | oh-my-posh font install (install from a select list. Beta as of writing)
10 | manually installing fonts via nerdfont https://www.nerdfonts.com/ (CascadiaCode, Cascadia Mono, Iosevka, Hack, are some of the ones I've used)
11 | winget upgrade JanDeDobbeleer.OhMyPosh -s winget
12 | remove-Item -Path $PROFILE -Force (If corrupted)
13 | new-Item -Path $PROFILE -ItemType File -Force (new profile)
14 | notepad $PROFILE (powershell profile)
15 | oh-my-posh init pwsh | Invoke-Expression (default theme)
16 | oh-my-posh init pwsh --config "$env:POSH_THEMES_PATH\jandedobbeleer.omp.json" | Invoke-Expression (pull from a themes folder)
17 | location of theme is located in C:\Users\Gizmo\AppData\Local\Programs\oh-my-posh\themes (for me)
18 | . $PROFILE (to reload profile which will error but still reload or just close and reopen)
19 |
--------------------------------------------------------------------------------
/terminal-editor/tmux_terminal/.conf:
--------------------------------------------------------------------------------
1 | nano ~/.tmux.conf (create it if you don't find it)
2 |
3 | # Enable mouse support
4 | set -g mouse on
5 |
6 | # Use vi keys in copy mode
7 | setw -g mode-keys vi
8 |
9 | # Set prefix key to Ctrl + a (optional)
10 | set -g prefix C-a
11 | unbind C-b
12 | bind C-a send-prefix
13 |
14 | # Reload configuration file
15 | bind r source-file ~/.tmux.conf \; display "Reloaded!"
16 |
17 | # Save Sessions
18 | set -g @plugin 'tmux-plugins/tpm'
19 | set -g @plugin 'tmux-plugins/tmux-resurrect'
20 |
21 | # Initialize TMUX plugin manager (keep this line at the very bottom of tmux.conf)
22 | run '~/.tmux/plugins/tpm/tpm'
23 |
24 | tmux source-file ~/.tmux.conf (reload if running)
25 | tmux new -s TW -n MON (new session + window)
26 |
--------------------------------------------------------------------------------
/terminal-editor/tmux_terminal/README.md:
--------------------------------------------------------------------------------
1 | ## Tmux Wiki:
2 |
3 | (`short for terminal multiplexer`) is a powerful open-source terminal multiplexer for Unix-like operating systems. **It's used to manage multiple terminal sessions, create, access, and control multiple terminals (`or panes`) within a single screen.** It can also be used for **detaching processes from their controlling terminals, allowing remote sessions to remain active without being visible, making it a valuable tool for system administrators, developers, and anyone who frequently** works with the command line.
4 |
5 | **It was originally written by Nicholas Marriott in 2007. It is distributed under the ISC license, which is a permissive free software license. The development of tmux was motivated by the need for a modern, feature-rich terminal multiplexer that could serve as an alternative to the older GNU Screen.** This introduced several improvements over GNU Screen, such as a more flexible and user-friendly configuration system, better handling of multiple sessions and windows, and the ability to split windows into multiple panes.
6 |
7 | **The project is hosted on GitHub, where users can report issues, request features, and contribute to the codebase. The ongoing development ensures that it remains up-to-date with modern terminal capabilities and user needs.** In 2009 the first public release made available, and it quickly gained traction as a viable alternative to GNU Screen.
8 |
9 |
10 | ## Key Features:
11 |
12 | - ***Session Management*** - Allows users to create and manage multiple terminal sessions within a single terminal window. Sessions can be detached and reattached, enabling users to resume their work from where they left off.
13 | - ***Window and Pane Management*** - Users can split terminal windows into multiple panes, either horizontally or vertically, and switch between them easily. This feature allows for efficient multitasking and organization of terminal-based workflows.
14 | - ***Customizable Key Bindings*** - Tmux provides customizable key bindings and commands, enabling users to tailor the interface to their preferences and optimize their workflow.
15 | - ***Persistence*** - Sessions remain active even if the user disconnects or logs out, allowing for persistent workflows that continue to run in the background.
16 | - ***Scripting and Automation*** - Supports scripting and automation, enabling users to create complex setups and automate repetitive tasks using configuration files and scripts.
17 |
18 |
19 |
20 | ## Besy Practice:
21 |
22 | - ***Organize Workspaces*** - Use Tmux to create and manage distinct workspaces for different tasks or projects. This helps keep your terminal environment organized and efficient.
23 | - ***Leverage Key Bindings*** - Customize Tmux key bindings to match your workflow and improve navigation speed. Familiarize yourself with default bindings and consider setting up your own.
24 | - ***Utilize Sessions*** - Make use of Tmux sessions to manage long-running processes or remote work. Detach and reattach sessions as needed to maintain a continuous workflow.
25 | - ***Automate Setup*** - Use Tmux configuration files to automate the setup of your terminal environment, including window and pane arrangements, to streamline your workflow.
26 | - ***Monitor Resources*** - Integrate Tmux with monitoring tools to keep track of system performance or running processes directly from your Tmux session.
27 |
28 | ##
29 | > by 2011 tmux was adopted by several Unix-like operating systems' package managers, making it more accessible to a broader user base. In 2014, major enhancements were made to improve performance, configuration flexibility, and compatibility with modern terminal features. By 2018 The project saw significant contributions from new developers, leading to the introduction of new features and improvements. I am on version 3.2a as of writing.
30 |
--------------------------------------------------------------------------------
/terminal-editor/tmux_terminal/cleanup_tmux.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Step 1: Remove the tmux binary from /usr/local/bin
4 | echo "Removing tmux binary from /usr/local/bin..."
5 | sudo rm -f /usr/local/bin/tmux
6 |
7 | # Step 2: Remove development tools and dependencies installed for tmux
8 | echo "Removing development tools and dependencies..."
9 | sudo apt-get remove --purge git autoconf automake libtool pkg-config libevent-dev ncurses-dev build-essential bison -y
10 | sudo apt-get autoremove -y
11 |
12 | # Step 3: Clean up the tmux source directory
13 | echo "Cleaning up the tmux source directory..."
14 | rm -rf ~/tmux
15 |
16 | # Step 4: Remove tmux plugins (TPM and tmux-resurrect)
17 | echo "Removing tmux plugins..."
18 | rm -rf ~/.tmux/plugins/tpm
19 | rm -rf ~/.tmux/plugins/tmux-resurrect
20 |
21 | # Step 5: Revert PATH changes in ~/.profile and reload the profile
22 | echo "Reverting PATH changes in ~/.profile..."
23 | sed -i '/export PATH=\/usr\/local\/bin:\$PATH/d' ~/.profile
24 | source ~/.profile
25 |
26 | # Step 6: Clear Bash history to remove tmux-related commands
27 | echo "Clearing Bash history..."
28 | history -c
29 |
30 | # Final Step: Confirm cleanup completion
31 | echo "Cleanup complete."
32 |
--------------------------------------------------------------------------------
/terminal-editor/tmux_terminal/remove_build.md:
--------------------------------------------------------------------------------
1 | ***Removal + cleanup + Tool removal***
2 |
3 | ```sh
4 | 1) rm -f /usr/local/bin/tmux **remove tmux binary**
5 | 2) apt-get remove --purge git autoconf automake libtool pkg-config libevent-dev ncurses-dev build-essential bison -y **remove tools on debian**
6 | 2) dnf remove git autoconf automake libtool pkgconf libevent-devel ncurses-devel gcc make -y **remove tools on rhel**
7 | 3) apt autoremove -y
8 | 3) dnf autoremove -y
9 | 4) rm -rf ~/tmux **clean up source /**
10 | 5) rm -rf ~/.tmux/plugins/tpm
11 | 6) rm -rf ~/.tmux/plugins/tmux-resurrect
12 | 7) sed -i '/export PATH=\/usr\/local\/bin:\$PATH/d' ~/.profile **revert PATH changes in ~/.profile**
13 | 8) source ~/.profile
14 | 9) history -c
15 |
--------------------------------------------------------------------------------
/terminal-editor/tmux_terminal/source_build.md:
--------------------------------------------------------------------------------
1 | ***tmux + Compile + Path + Sessions + pkgs/dependencies***
2 |
3 | ```sh
4 | 1) apt-get install git autoconf automake libtool pkg-config libevent-dev ncurses-dev build-essential -y **Debian distros**
5 | 2) dnf install git autoconf automake libtool pkgconf libevent-devel ncurses-devel gcc make -y (RHEL distros)
6 | 3) apt-get install bison **LXC/Docker needs yacc, which is a parser generator and is often required for building software from source**
7 | 4) git clone https://github.com/tmux/tmux.git **Repo**
8 | 5) cd tmux **ls -lah always, get use to structures**
9 | 6) ./autogen.sh **runs autotools commands, generates configuration scripts, checks dependencies**
10 | 7) ./configure **system Inspection, generate Makefiles, configuration options**
11 | 8) make -j6
12 | 9) make install
13 | 10) ls -l /usr/local/bin/ **htop tmux should be present, skip steps**
14 | 11) echo 'export PATH=/usr/local/bin:$PATH' >> ~/.profile
15 | 12) source ~/.profile
16 | 13) tmux -V
17 | 14) tmux **green banner session**
18 | 15) cd /tmux
19 | 16) git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
20 | 17) git clone https://github.com/tmux-plugins/tmux-resurrect ~/.tmux/plugins/tmux-resurrect
21 | 18) Ctrl+a I **install plugins, terminal pointer should blink**
22 | 19) ls ~/.tmux/plugins/ **should get tpm & tmux-resurrect**
23 | 20) while read -r cmd; do history -s "$cmd"; done < ~/.bash_history **Bash history loop**
24 |
--------------------------------------------------------------------------------
/terminal-editor/warp_terminal/.config/starship.toml:
--------------------------------------------------------------------------------
1 | # ~/.config/starship.toml
2 |
3 | # Use custom format with lines
4 | format = '''
5 | [========================>](bold purple)
6 | [┌──────────────────>](bold green)
7 | [│](bold green)$os$directory$git_branch$git_status$rust$package$cmd_duration
8 | [└─────>](bold green)
9 | [===========>](bold purple)'''
10 |
11 | # Command Duration Module Configuration
12 | [cmd_duration]
13 | min_time = 2000 # Show duration if the command takes longer than 2 seconds
14 | show_milliseconds = false # Only show seconds
15 | format = "took [$duration]($style) " # Format for displaying the duration
16 | style = "bold yellow" # Style for the duration text
17 |
18 | # OS module configuration for displaying the Debian logo
19 | [os]
20 | format = "[$symbol](bold white) " # Display the OS symbol in bold white
21 | disabled = false
22 |
23 | [os.symbols]
24 | # Define symbols for various operating systems
25 | Windows = " " # Windows symbol
26 | Arch = " " # Arch Linux symbol
27 | Ubuntu = " " # Ubuntu symbol
28 | Macos = " " # macOS symbol
29 | Debian = " " # Debian symbol
30 |
31 | # Git Branch Module Configuration
32 | [git_branch]
33 | symbol = " " # This is the branch icon symbol
34 | format = "on [$symbol$branch]($style) " # Format for displaying the branch
35 | style = "bold blue" # Style for the branch text
36 |
37 | # Git Status Module Configuration
38 | [git_status]
39 | format = "[$all_status$ahead_behind]($style)" # Show all status icons
40 | style = "bold purple" # Style for the status text
41 |
--------------------------------------------------------------------------------
/terminal-editor/warp_terminal/Night_City_Terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alprojects1/home-lab/cbc1381d77b0c56ccac6ddb8f21576e6af77427c/terminal-editor/warp_terminal/Night_City_Terminal.png
--------------------------------------------------------------------------------
/terminal-editor/warp_terminal/dark_city_bg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/alprojects1/home-lab/cbc1381d77b0c56ccac6ddb8f21576e6af77427c/terminal-editor/warp_terminal/dark_city_bg.jpg
--------------------------------------------------------------------------------
/terminal-editor/warp_terminal/night_city.yaml:
--------------------------------------------------------------------------------
1 | accent: '#28b9ff' # Accent color for UI elements
2 | background: '#191919' # Terminal background color
3 | background_image:
4 | path: LR03/dark_city_bg.jpg
5 | opacity: 20
6 | details: darker # Whether the theme is lighter or darker.
7 | foreground: '#f1f1f1' # The foreground color.
8 | terminal_colors: # Ansi escape colors.
9 | bright:
10 | black: '#666666'
11 | blue: '#5c78ff'
12 | cyan: '#5ac8ff'
13 | green: '#905aff'
14 | magenta: '#5ea2ff'
15 | red: '#ba5aff'
16 | white: '#ffffff'
17 | yellow: '#657b83'
18 | normal:
19 | black: '#121212'
20 | blue: '#2b4fff'
21 | cyan: '#28b9ff'
22 | green: '#7129ff'
23 | magenta: '#2883ff'
24 | red: '#a52aff'
25 | white: '#f1f1f1'
26 | yellow: '#3d2aff'
27 |
--------------------------------------------------------------------------------
/terminal-editor/wave_terminal/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/terminal-editor/zsh/README.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/traefik/README.md:
--------------------------------------------------------------------------------
1 | ## Traefik Wiki:
2 |
3 | **Traefik is a modern reverse proxy and load balancer designed to manage and route traffic for microservices. It was created by Emile Vauge and is written in the Go programming language.** Traefik is an open-source project **released under the MIT License, allowing for extensive customization and community contributions.**
4 |
5 | **Traefik automatically discovers services in dynamic environments by integrating with various orchestration frameworks like Docker, Kubernetes, and Mesos. It supports advanced features like (`SSL`) termination, (`HTTP/2`), WebSocket, and traffic routing based on custom rules**. Additionally, it provides observability through metrics, tracing, and access logs, enhancing the security and monitoring of application traffic.
6 |
7 | Traefik's popularity is due in part to its **ease of integration, lightweight nature, and support for a wide range of use cases**, making it an ideal choice for cloud-native applications.
8 |
9 |
10 | ## Security & Compliance:
11 |
12 | - Traefik assists in maintaining a secure environment by offering robust access control and (`SSL/TLS`) management. It ensures secure communication through (`SSL`) termination and supports Let's Encrypt integration for automated certificate management. Additionally, Traefik's logging and monitoring features aid in tracking access and detecting potential security threats, making it compliant with modern security standards.
13 |
14 |
15 | ## Important Note:
16 | - (`Do NOT`) misconfigure Traefik's access control settings. Incorrect settings can expose your services to unauthorized access. Always verify that your ACLs, firewall rules, and authentication methods are correctly configured to protect your services.
17 |
18 | ## Key Features:
19 |
20 | - ***Automatic Service Discovery*** - Automatically detects new services in dynamic environments.
21 | - ***SSL Termination*** - Simplifies SSL certificate management and renewal with Let's Encrypt integration.
22 | - ***Traffic Routing*** - Advanced routing options based on HTTP headers, methods, hostnames, and more.
23 | - ***Observability*** - Built-in metrics, tracing, and logs for real-time monitoring and troubleshooting.
24 | - ***Integration*** - Supports Docker, Kubernetes, and other orchestration platforms out-of-the-box.
25 | - ***Scalability*** - Efficiently balances load across multiple instances, supporting high-availability setups.
26 |
27 | ## Best Practices:
28 |
29 | - ***Use Secure Protocols*** - Ensure SSL/TLS is enabled for secure communication.
30 | - ***Regular Updates*** - Keep Traefik up-to-date to benefit from the latest features and security patches.
31 | - ***Monitor Traffic*** - Utilize Traefik’s metrics and logs for continuous monitoring of application traffic.
32 | - ***Leverage Middleware*** - Use Traefik's middleware features to implement rate-limiting, IP whitelisting, and custom authentication.
33 | - ***Backup Configurations*** - Regularly backup Traefik's configurations to prevent data loss.
34 |
35 | ##
36 | > Traefik’s most unique and powerful feature is its (`Auto-Discovery`) capability. Unlike traditional reverse proxies, Traefik automatically discovers services across your infrastructure through integrations with platforms like Docker, Kubernetes, and more. This means that as you deploy or remove services, Traefik dynamically adjusts routing without requiring any manual configuration changes. This level of automation significantly reduces the overhead for managing complex microservices architectures.
37 |
--------------------------------------------------------------------------------
/traefik/dns_docker-compose.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | traefik:
4 | image: docker.io/library/traefik:v3.1.2
5 | container_name: traefik
6 | ports:
7 | - 80:80
8 | - 443:443
9 | - 8080:8080 # Enable Dashboard, Protect in production
10 | volumes:
11 | - /compose/traefik/traefik.yaml:/etc/traefik/traefik.yaml:ro
12 | - /compose/traefik/conf/:/etc/traefik/conf/
13 | - /compose/traefik/certs/:/etc/traefik/certs/
14 | - /run/docker.sock:/run/docker.sock
15 | environment:
16 | - CF_API_EMAIL=EMAIL # or use secret
17 | - CF_DNS_API_TOKEN=KEY # or use secret
18 | - PUID=1000
19 | - PGID=1000
20 | healthcheck:
21 | test: ["CMD-SHELL", "curl --fail http://10.100.100.234:8686 || exit 0"]
22 | interval: 40s
23 | timeout: 10s
24 | retries: 3
25 | restart: unless-stopped
26 |
--------------------------------------------------------------------------------
/traefik/dns_traefik.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | checkNewVersion: false
3 | sendAnonymousUsage: false
4 |
5 | log:
6 | level: ERROR
7 | format: common
8 | filePath: /compose/traefik/logs/traefik.log
9 |
10 | accesslog:
11 | format: common
12 | filePath: /compose/traefik/logs/access.log
13 |
14 | api:
15 | dashboard: true
16 | insecure: true
17 |
18 | entryPoints:
19 | midgard:
20 | address: ":80"
21 | http:
22 | redirections:
23 | entryPoint:
24 | to: asgard
25 | scheme: https
26 | asgard:
27 | address: ":443"
28 |
29 | providers:
30 | docker:
31 | exposedByDefault: false
32 | file:
33 | directory: /compose/traefik
34 | watch: true
35 |
36 | certificatesResolvers:
37 | staging:
38 | acme:
39 | email: alprojects1@proton.me
40 | storage: /compose/traefik/certs/acme-staging.json
41 | caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
42 | dnsChallenge:
43 | provider: cloudflare
44 | resolvers:
45 | - "RESOLVER:53"
46 | - "RESOLVER:53"
47 |
48 | production:
49 | acme:
50 | email: alprojects1@proton.me
51 | storage: /compose/traefik/certs/acme-production.json
52 | caServer: "https://acme-v02.api.letsencrypt.org/directory"
53 | dnsChallenge:
54 | provider: cloudflare
55 | resolvers:
56 | - "RESOLVER:53"
57 | - "RESOLVER:53"
58 |
59 | # Optional: Disable TLS cert verification
60 | serversTransport:
61 | insecureSkipVerify: true
62 |
63 | # Optional: Overwrite Default Certificates
64 | tls:
65 | stores:
66 | default:
67 | defaultCertificate:
68 | certFile: /compose/traefik/certs/cert.pem
69 | keyFile: /compose/traefik/certs/cert-key.pem
70 | options:
71 | default:
72 | minVersion: VersionTLS12
73 |
--------------------------------------------------------------------------------
/traefik/http_docker_compose.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | traefik:
4 | image: docker.io/library/traefik:v3.1.2
5 | container_name: traefik
6 | ports:
7 | - 80:80
8 | - 443:443
9 | - 8080:8080 # Enable Dashboard, Protect in production
10 | volumes:
11 | - /compose/traefik/traefik.yaml:/etc/traefik/traefik.yaml:ro
12 | - /compose/traefik/conf/:/etc/traefik/conf/
13 | - /compose/traefik/certs/:/etc/traefik/certs/
14 | - /run/docker.sock:/run/docker.sock
15 | environment:
16 | - PUID=1000
17 | - PGID=1000
18 | healthcheck:
19 | test: ["CMD-SHELL", "curl --fail http://10.100.100.234:8686 || exit 0"]
20 | interval: 40s
21 | timeout: 10s
22 | retries: 3
23 | restart: unless-stopped
24 |
--------------------------------------------------------------------------------
/traefik/http_traefik.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | checkNewVersion: false
3 | sendAnonymousUsage: false
4 |
5 | log:
6 | level: ERROR
7 | format: common
8 | filePath: /compose/traefik/logs/traefik.log
9 |
10 | accesslog:
11 | format: common
12 | filePath: /compose/traefik/logs/access.log
13 |
14 | api:
15 | dashboard: true
16 | insecure: true
17 |
18 | entryPoints:
19 | midgard:
20 | address: ":80"
21 | http:
22 | redirections:
23 | entryPoint:
24 | to: asgard
25 | scheme: https
26 | asgard:
27 | address: ":443"
28 |
29 | providers:
30 | docker:
31 | exposedByDefault: false
32 | file:
33 | directory: /compose/traefik
34 | watch: true
35 |
36 | certificatesResolvers:
37 | staging:
38 | acme:
39 | email: alprojects1@proton.me
40 | storage: /compose/traefik/certs/acme-staging.json
41 | caServer: "https://acme-staging-v02.api.letsencrypt.org/directory"
42 | httpChallenge:
43 | entryPoint: midgard
44 |
45 | production:
46 | acme:
47 | email: alprojects1@proton.me
48 | storage: /compose/traefik/certs/acme-production.json
49 | caServer: "https://acme-v02.api.letsencrypt.org/directory"
50 | httpChallenge:
51 | entryPoint: midgard
52 |
53 | # Optional: Disable TLS cert verification
54 | serversTransport:
55 | insecureSkipVerify: true
56 |
57 | # Optional: Overwrite Default Certificates
58 | tls:
59 | stores:
60 | default:
61 | defaultCertificate:
62 | certFile: /compose/traefik/certs/cert.pem
63 | keyFile: /compose/traefik/certs/cert-key.pem
64 | options:
65 | default:
66 | minVersion: VersionTLS12
67 |
--------------------------------------------------------------------------------
/traefik/tracker_labels_.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | tracker:
4 | image: lscr.io/linuxserver/speedtest-tracker:0.21.2
5 | container_name: speedtest-tracker
6 | ports:
7 | - 8686:80
8 | - 8443:443
9 | volumes:
10 | - /compose/tracker/config:/config
11 | - /compose/tracker/ssl-keys:/config/keys
12 | environment:
13 | - PUID=1000
14 | - PGID=1000
15 | - APP_KEY=KEY
16 | - DB_CONNECTION=sqlite
17 | - SPEEDTEST_SCHEDULE=0 */1 * * *
18 | - PRUNE_RESULTS_OLDER_THAN=30
19 | - CHART_DATETIME_FORMAT=Y-m-d H:i
20 | - DATETIME_FORMAT=Y-m-d H:i:s
21 | - APP_TIMEZONE=America/New_York
22 | - APP_NAME=MySpeedTracker
23 | - APP_URL=https://speedtest.net
24 | - DISPLAY_TIMEZONE=America/New_York
25 | - CONTENT_WIDTH=7xl
26 | - PUBLIC_DASHBOARD=false
27 | - DASHBOARD_POLLING=60s
28 | - NOTIFICATION_POLLING=60s
29 | - RESULTS_POLLING=false
30 | healthcheck:
31 | test: ["CMD-SHELL", "curl -f http://IP_ADDRESS:8080 || exit 1"]
32 | interval: 1m30s
33 | timeout: 10s
34 | retries: 3
35 | restart: unless-stopped
36 |
37 | labels:
38 | - "traefik.enable=true"
39 | - "traefik.http.routers.speedtest.rule=Host(`speed.alprojects.org`)"
40 | - "traefik.http.routers.speedtest.entrypoints=asgard"
41 | - "traefik.http.routers.speedtest.tls.certresolver=production"
42 | - "traefik.http.services.speedtest.loadbalancer.server.port=80"
43 | networks:
44 | - traefik_default
45 |
46 | networks:
47 | traefik_default:
48 | external: true
49 |
--------------------------------------------------------------------------------
/traefik/traefik_labels.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | traefik:
4 | image: docker.io/library/traefik:v3.1.2
5 | container_name: traefik
6 | ports:
7 | - 80:80
8 | - 443:443
9 | volumes:
10 | - /compose/traefik/data/traefik.yaml:/etc/traefik/traefik.yaml:ro
11 | - /compose/traefik/data/acme.json:/acme.json
12 | - /compose/traefik/certs/:/etc/traefik/certs/
13 | - /compose/traefik/conf/:/etc/traefik/conf/
14 | - /run/docker.sock:/run/docker.sock:ro
15 | environment:
16 | - CF_API_EMAIL=EMAIL # or use secret
17 | - CF_DNS_API_TOKEN=KEY # or use secret
18 | - PUID=1000
19 | - PGID=1000
20 | healthcheck:
21 | test: ["CMD-SHELL", "curl --fail http://10.100.100.234:8686 || exit 0"]
22 | interval: 40s
23 | timeout: 10s
24 | retries: 3
25 | restart: unless-stopped
26 |
27 | labels:
28 | - "traefik.enable=true"
29 | - "traefik.http.routers.traefik.entrypoints=midgard"
30 | - "traefik.http.routers.traefik.rule=Host(`traefik.alprojects.org`)"
31 | - "traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme=https"
32 | - "traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto=https"
33 | - "traefik.http.routers.traefik.middlewares=traefik-https-redirect"
34 | - "traefik.http.routers.traefik-secure.entrypoints=asgard"
35 | - "traefik.http.routers.traefik-secure.rule=Host(`traefik.alprojects.org`)"
36 | - "traefik.http.routers.traefik-secure.tls=true"
37 | - "traefik.http.routers.traefik-secure.tls.certresolver=cloudflare"
38 | - "traefik.http.routers.traefik-secure.service=api@internal"
39 | - "traefik.http.routers.traefik-secure.tls.domains[0].main=alprojects.org"
40 | - "traefik.http.routers.traefik-secure.tls.domains[0].sans=*.alprojects.org"
41 |
42 | networks:
43 | traefik_default:
44 | external: true
45 |
--------------------------------------------------------------------------------
/wazuh/README.md:
--------------------------------------------------------------------------------
1 | ## Wazuh Wiki:
2 |
3 | **Wazuh is an open-source security monitoring platform designed to provide comprehensive security visibility and compliance for your infrastructure. Wazuh helps organizations adhere to regulatory standards such as GDPR, HIPAA, and PCI DSS by offering robust tools for security monitoring and compliance management. It integrates a variety of security components and offers Extended Detection and Response (XDR) capabilities with Security Information and Event Management (SIEM) features, providing a comprehensive solution for threat,** such as (`intrusion detection`), (`log data analysis`), (`vulnerability detection`), and security information and event management (`SIEM`). Wazuh comprises three main components: the **Wazuh Manager**, **Wazuh Agent**, and **Wazuh Indexer**.
4 |
5 | **Wazuh is its integration flexibility. Wazuh can be seamlessly integrated with various cloud environments, containers, and on-premise infrastructures, making it highly adaptable to diverse IT ecosystems.** Additionally, Wazuh's **open-source nature allows for extensive customization, which can be crucial for organizations with unique security needs.** It's also worth noting that **Wazuh provides active community support and regular updates,** ensuring that the platform evolves with emerging security threats and compliance requirements.
6 |
7 | ## Security & Compliance Managment:
8 |
9 | - ***Audit Logs*** - Wazuh continuously tracks and logs user access to sensitive data, as well as system changes, providing an audit trail that is crucial for regulatory compliance.
10 | - ***Policy Enforcement*** - It ensures that all endpoints are in alignment with security policies and industry standards by automatically checking and enforcing compliance rules across the network.
11 | - ***File Integrity Monitoring (FIM)*** - Wazuh monitors changes to critical files and directories, alerting administrators to unauthorized modifications, which is essential for maintaining compliance with regulations.
12 | - ***Vulnerability Detection*** - Wazuh identifies and reports vulnerabilities across the infrastructure, allowing organizations to take proactive steps to secure their systems and stay compliant with industry standards.
13 |
14 | ## Key Features:
15 |
16 | - ***Extended Detection and Response (`XDR`)***
17 | Wazuh’s XDR capabilities extend beyond traditional security tools by integrating data from multiple sources—endpoints, networks, and servers—into a unified threat detection and response system. This allows for the following:
18 |
19 | - ***Comprehensive Threat Detection*** - Wazuh correlates security events from various sources, providing in-depth threat analysis.
20 | - ***Automated Response*** - XDR allows Wazuh to automatically respond to detected threats, reducing the time between detection and mitigation.
21 | - ***Advanced Analytics*** - XDR in Wazuh leverages machine learning and behavioral analysis to detect sophisticated threats that might bypass traditional security measures.
22 |
23 | - ***Security Information and Event Management (`SIEM`)***
24 | Wazuh’s SIEM capabilities offer centralized monitoring, analysis, and management of security events across an organization's infrastructure. This includes:
25 |
26 | - ***Event Correlation*** - Aggregates data from various sources, identifying patterns and anomalies indicative of security threats.
27 | - ***Log Management*** - Collects, stores, and analyzes logs from across your network, enabling real-time detection of threats.
28 | - ***Compliance Reporting*** - Assists in meeting regulatory requirements by generating detailed reports based on security events and log data.
29 |
30 | ## Important Note:
31 |
32 | - (`Do NOT`) misconfigure agent enrollment settings. If agents are not correctly enrolled or if there's a mismatch in authentication tokens, it can lead to failed connections and gaps in security monitoring. Always verify that agent keys are securely stored and matched with the correct Wazuh Manager settings to ensure continuous and accurate data flow from all monitored endpoints. Additionally, avoid modifying system-level files or configurations without proper understanding, as this could disrupt critical security processes and compliance checks.
33 |
34 | ## Best Practices:
35 |
36 | - ***Keep Components Updated*** - Ensure Wazuh Manager, Agent, and Indexer are always running the latest stable versions to avoid compatibility issues.
37 | - ***Follow Official Documentation*** - Adhere strictly to the official Wazuh documentation during deployment and configuration for optimal security and functionality.
38 | - ***Network Segregation*** - Segregate network traffic between Wazuh components to minimize the attack surface.
39 | - ***Regular Policy Review*** - Regularly update and review security policies to adapt to evolving threats.
40 | - ***Log Management*** - Configure proper log rotation and storage to maintain long-term data availability and compliance.
41 |
42 | ##
43 | > A cool and fun fact about Wazuh is its roots in the OSSEC project. Wazuh started as a fork of OSSEC, an open-source intrusion detection system, and has since evolved into a comprehensive security monitoring platform. Unlike its predecessor, Wazuh has expanded significantly to include features like XDR and SIEM, making it a powerful tool for modern cybersecurity needs. Wazuh's open-source nature and active community support ensure it continuously evolves to tackle emerging threats, making it a favorite among security professionals.
44 |
45 |
46 |
--------------------------------------------------------------------------------
/wazuh/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | wazuh.manager:
4 | image: wazuh/wazuh-manager:4.9.2
5 | container_name: wazuh-manager
6 | hostname: WZMG01
7 | ulimits:
8 | memlock:
9 | soft: -1
10 | hard: -1
11 | nofile:
12 | soft: 655360
13 | hard: 655360
14 | ports:
15 | - "1514:1514"
16 | - "1515:1515"
17 | - "514:514/udp"
18 | - "55000:55000"
19 | volumes:
20 | - wazuh_api_configuration:/var/ossec/api/configuration
21 | - wazuh_etc:/var/ossec/etc
22 | - wazuh_logs:/var/ossec/logs
23 | - wazuh_queue:/var/ossec/queue
24 | - wazuh_var_multigroups:/var/ossec/var/multigroups
25 | - wazuh_integrations:/var/ossec/integrations
26 | - wazuh_active_response:/var/ossec/active-response/bin
27 | - wazuh_agentless:/var/ossec/agentless
28 | - wazuh_wodles:/var/ossec/wodles
29 | - filebeat_etc:/etc/filebeat
30 | - filebeat_var:/var/lib/filebeat
31 | - ./config/wazuh_indexer_ssl_certs/root-ca-manager.pem:/etc/ssl/root-ca.pem
32 | - ./config/wazuh_indexer_ssl_certs/wazuh.manager.pem:/etc/ssl/filebeat.pem
33 | - ./config/wazuh_indexer_ssl_certs/wazuh.manager-key.pem:/etc/ssl/filebeat.key
34 | - ./config/wazuh_cluster/wazuh_manager.conf:/wazuh-config-mount/etc/ossec.conf
35 | environment:
36 | - INDEXER_URL=https://wazuh.indexer:9200
37 | - INDEXER_USERNAME=CHANGE_FROM_DEFAULT
38 | - INDEXER_PASSWORD=CHANGE_FROM_DEFAULT
39 | - FILEBEAT_SSL_VERIFICATION_MODE=full
40 | - SSL_CERTIFICATE_AUTHORITIES=/etc/ssl/root-ca.pem
41 | - SSL_CERTIFICATE=/etc/ssl/filebeat.pem
42 | - SSL_KEY=/etc/ssl/filebeat.key
43 | - API_USERNAME=CHANGE_FROM_DEFAULT
44 | - API_PASSWORD=CHANGE_FROM_DEFAULT
45 | - PUID=1000 #$PUID
46 | - PGID=1000 #$PGID
47 | healthcheck:
48 | test: ["CMD-SHELL", "curl --fail http://localhost:1415 || exit 0"]
49 | interval: 40s
50 | timeout: 10s
51 | retries: 3
52 | restart: unless-stopped
53 |
54 | wazuh.indexer:
55 | image: wazuh/wazuh-indexer:4.9.2
56 | container_name: wazuh-indexer
57 | hostname: WZIN01
58 | ulimits:
59 | memlock:
60 | soft: -1
61 | hard: -1
62 | nofile:
63 | soft: 65536
64 | hard: 65536
65 | ports:
66 | - "9200:9200"
67 | volumes:
68 | - wazuh-indexer-data:/var/lib/wazuh-indexer
69 | - ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-indexer/certs/root-ca.pem
70 | - ./config/wazuh_indexer_ssl_certs/wazuh.indexer-key.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.key
71 | - ./config/wazuh_indexer_ssl_certs/wazuh.indexer.pem:/usr/share/wazuh-indexer/certs/wazuh.indexer.pem
72 | - ./config/wazuh_indexer_ssl_certs/admin.pem:/usr/share/wazuh-indexer/certs/admin.pem
73 | - ./config/wazuh_indexer_ssl_certs/admin-key.pem:/usr/share/wazuh-indexer/certs/admin-key.pem
74 | - ./config/wazuh_indexer/wazuh.indexer.yml:/usr/share/wazuh-indexer/opensearch.yml
75 | - ./config/wazuh_indexer/internal_users.yml:/usr/share/wazuh-indexer/opensearch-security/internal_users.yml
76 | environment:
77 | - "OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g"
78 | - PUID=1000 #$PUID
79 | - PGID=1000 #$PGID
80 | healthcheck:
81 | test: ["CMD-SHELL", "curl --fail http://localhost:9200 || exit 0"]
82 | interval: 40s
83 | timeout: 10s
84 | retries: 3
85 | restart: unless-stopped
86 |
87 | wazuh.dashboard:
88 | image: wazuh/wazuh-dashboard:4.9.2
89 | container_name: wazuh-dashboard
90 | hostname: WZDB01
91 | ports:
92 | - 7443:5601
93 | volumes:
94 | - ./config/wazuh_indexer_ssl_certs/wazuh.dashboard.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem
95 | - ./config/wazuh_indexer_ssl_certs/wazuh.dashboard-key.pem:/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem
96 | - ./config/wazuh_indexer_ssl_certs/root-ca.pem:/usr/share/wazuh-dashboard/certs/root-ca.pem
97 | - ./config/wazuh_dashboard/opensearch_dashboards.yml:/usr/share/wazuh-dashboard/config/opensearch_dashboards.yml
98 | - ./config/wazuh_dashboard/wazuh.yml:/usr/share/wazuh-dashboard/data/wazuh/config/wazuh.yml
99 | - wazuh-dashboard-config:/usr/share/wazuh-dashboard/data/wazuh/config
100 | - wazuh-dashboard-custom:/usr/share/wazuh-dashboard/plugins/wazuh/public/assets/custom
101 | environment:
102 | - INDEXER_USERNAME=CHANGE_FROM_DEFAULT
103 | - INDEXER_PASSWORD=CHANGE_FROM_DEFAULT
104 | - WAZUH_API_URL=https://wazuh.manager
105 | - DASHBOARD_USERNAME=CHANGE_FROM_DEFAULT
106 | - DASHBOARD_PASSWORD=CHANGE_FROM_DEFAULT
107 | - API_USERNAME=wazuh-CHANGE_FROM_DEFAULT
108 | - API_PASSWORD=CHANGE_FROM_DEFAULT
109 | - PUID=1000 #$PUID
110 | - PGID=1000 #$PGID
111 | healthcheck:
112 | test: ["CMD-SHELL", "curl --fail http://localhost:7443 || exit 0"]
113 | interval: 40s
114 | timeout: 10s
115 | retries: 3
116 | restart: unless-stopped
117 | depends_on:
118 | - wazuh.indexer
119 |
120 | volumes:
121 | wazuh_api_configuration:
122 | wazuh_etc:
123 | wazuh_logs:
124 | wazuh_queue:
125 | wazuh_var_multigroups:
126 | wazuh_integrations:
127 | wazuh_active_response:
128 | wazuh_agentless:
129 | wazuh_wodles:
130 | filebeat_etc:
131 | filebeat_var:
132 | wazuh-indexer-data:
133 | wazuh-dashboard-config:
134 | wazuh-dashboard-custom:
135 |
--------------------------------------------------------------------------------
/wazuh/generate-indexer-certs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | services:
3 | generator:
4 | image: wazuh/wazuh-certs-generator:0.0.2
5 | hostname: wazuh-certs-generator
6 | volumes:
7 | - ./config/wazuh_indexer_ssl_certs/:/certificates/
8 | - ./config/certs.yml:/config/certs.yml
9 |
--------------------------------------------------------------------------------