├── .editorconfig ├── .github └── workflows │ ├── build-latest.yml │ ├── build-nightly.yml │ └── ci.yml ├── ARCHITECTURE.md ├── CONTRIBUTING.md ├── Dockerfile ├── README.md ├── test ├── containers │ ├── trafficjam_test │ │ └── Dockerfile │ └── whoami │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ └── whoami.go ├── docker-compose-dind-allowhost.yml ├── docker-compose-dind-swarm.yml ├── docker-compose-dind.yml ├── docker-compose-swarm.yml ├── docker-compose.yml ├── test-dind-swarm.bats ├── test-dind.bats └── test.bats ├── trafficjam-diagram.png ├── trafficjam-functions.sh └── trafficjam.sh /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | [*.sh] 3 | indent_style = tab 4 | 5 | shell_variant = bash 6 | binary_next_line = false 7 | switch_case_indent = true 8 | space_redirects = true 9 | keep_padding = true 10 | function_next_line = false -------------------------------------------------------------------------------- /.github/workflows/build-latest.yml: -------------------------------------------------------------------------------- 1 | name: Build - Latest 2 | 3 | on: 4 | push: 5 | tags: 6 | - "v*.*.*" 7 | 8 | jobs: 9 | build: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check Out Repo 13 | uses: actions/checkout@v4.2.2 14 | 15 | - name: Prepare 16 | id: prep 17 | run: | 18 | DOCKER_IMAGE=kaysond/trafficjam 19 | VERSION=edge 20 | if [[ $GITHUB_REF == refs/tags/* ]]; then 21 | VERSION=${GITHUB_REF#refs/tags/} 22 | fi 23 | TAGS="${DOCKER_IMAGE}:${VERSION}" 24 | if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then 25 | TAGS="$TAGS,${DOCKER_IMAGE}:latest" 26 | fi 27 | echo ::set-output name=tags::${TAGS} 28 | 29 | - name: Cache Docker layers 30 | uses: actions/cache@v4.2.3 31 | with: 32 | path: /tmp/.buildx-cache 33 | key: ${{ runner.os }}-buildx-${{ github.sha }} 34 | restore-keys: | 35 | ${{ runner.os }}-buildx- 36 | 37 | - name: Login to Docker Hub 38 | if: github.event_name != 'pull_request' 39 | uses: docker/login-action@v3.4.0 40 | with: 41 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 42 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 43 | 44 | - name: Set up QEMU 45 | uses: docker/setup-qemu-action@v3.6.0 46 | 47 | - name: Set up Docker Buildx 48 | id: buildx 49 | uses: docker/setup-buildx-action@v3.10.0 50 | 51 | - name: Build and push 52 | id: docker_build 53 | uses: docker/build-push-action@v6.4 54 | with: 55 | builder: ${{ steps.buildx.outputs.name }} 56 | context: ./ 57 | file: ./Dockerfile 58 | push: true 59 | tags: ${{ steps.prep.outputs.tags }} 60 | platforms: linux/amd64,linux/arm64 61 | cache-from: type=local,src=/tmp/.buildx-cache 62 | cache-to: type=local,dest=/tmp/.buildx-cache 63 | 64 | - name: Image digest 65 | run: echo ${{ steps.docker_build.outputs.digest }} 66 | -------------------------------------------------------------------------------- /.github/workflows/build-nightly.yml: -------------------------------------------------------------------------------- 1 | name: Build - Nightly 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | jobs: 8 | nightly_build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Check Out Repo 12 | uses: actions/checkout@v4.2.2 13 | 14 | - name: Cache Docker layers 15 | uses: actions/cache@v4.2.3 16 | with: 17 | path: /tmp/.buildx-cache 18 | key: ${{ runner.os }}-buildx-${{ github.sha }} 19 | restore-keys: | 20 | ${{ runner.os }}-buildx- 21 | 22 | - name: Login to Docker Hub 23 | uses: docker/login-action@v3.4.0 24 | with: 25 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 26 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 27 | 28 | - name: Set up QEMU 29 | uses: docker/setup-qemu-action@v3.6.0 30 | 31 | - name: Set up Docker Buildx 32 | id: buildx 33 | uses: docker/setup-buildx-action@v3.10.0 34 | 35 | - name: Build and push 36 | id: docker_build 37 | uses: docker/build-push-action@v6.4 38 | with: 39 | builder: ${{ steps.buildx.outputs.name }} 40 | context: ./ 41 | file: ./Dockerfile 42 | push: true 43 | tags: kaysond/trafficjam:nightly 44 | platforms: linux/amd64,linux/arm64 45 | cache-from: type=local,src=/tmp/.buildx-cache 46 | cache-to: type=local,dest=/tmp/.buildx-cache 47 | 48 | - name: Image digest 49 | run: echo ${{ steps.docker_build.outputs.digest }} 50 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | workflow_dispatch: 9 | 10 | jobs: 11 | ci: 12 | runs-on: ubuntu-24.04 13 | steps: 14 | - name: Check out code 15 | uses: actions/checkout@v2 16 | 17 | - name: Run shellcheck 18 | run: | 19 | shellcheck trafficjam-functions.sh 20 | shellcheck -x trafficjam.sh 21 | 22 | - name: Install shfmt 23 | shell: bash 24 | run: | 25 | sudo add-apt-repository ppa:longsleep/golang-backports 26 | sudo apt install golang-go 27 | export GOPATH=/usr/local 28 | sudo -E go install mvdan.cc/sh/v3/cmd/shfmt@latest 29 | 30 | - name: Run shfmt 31 | run: shfmt --diff trafficjam.sh trafficjam-functions.sh 32 | 33 | - name: Login to Docker Hub 34 | uses: docker/login-action@v3.4.0 35 | if: ${{ github.secret_source == 'Actions' }} 36 | with: 37 | username: ${{ secrets.DOCKER_HUB_USERNAME }} 38 | password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} 39 | 40 | - name: Install bats 41 | run: | 42 | sudo git clone --depth 1 --branch v1.8.0 https://github.com/bats-core/bats-core.git /opt/bats 43 | sudo /opt/bats/install.sh /usr/local 44 | 45 | - name: Run test suite 46 | run: bats test/test.bats 47 | -------------------------------------------------------------------------------- /ARCHITECTURE.md: -------------------------------------------------------------------------------- 1 | # Architecture 2 | [`trafficjam.sh`](trafficjam.sh) is the main script that sets up configuration and runs commands in a loop. It is a series of well-named function calls so that it is easy to read an understand, improving security. All of the functions are defined in [`trafficjam-functions.sh`](trafficjam-functions.sh). If any function fails, it calls `log_error` which prints a message to stderr and increments an error counter, then returns 1. The calls are all appended with `|| continue` so the loop restarts if any function fails. `tj_sleep` sets the loop interval, and will slow itself down as the error counter increases (successfully completed loops reset the counter). 3 | 4 | ## Principal of Operation 5 | TrafficJam limits traffic between containers by adding the necessary iptables rules on the host. When Docker Swarm is in use, TrafficJam acts as a daemon that spawns a global mode service so that the rules are added to the correct network namespace on each host. This daemon-service method is also required because Docker Swarm employs a separate load balancer on each node whose IP address must be permitted to communicate to the subnet. Since each node (even a manager) is only aware of its own load balancer's IP address, the daemon must start the service, collect the reported load balancer IP addresses of all nodes, then update the service. 6 | 7 | First, TrafficJam queries the docker daemon to determine the specified network's subnet and the ID's of whitelisted containers. If Docker Swarm is in use, TrafficJam also determines the correct network namespace and load balancer IP on the host. 8 | 9 | TrafficJam then adds its own chain in the `filter` table called `TRAFFICJAM`. It also adds a jump rule to the `DOCKER-USER` chain (or `FORWARD` for Docker Swarm) to jump to this chain: `iptables --table filter --insert --jump TRAFFICJAM` 10 | 11 | Then, TrafficJam inserts several rules to the `TRAFFICJAM` chain in the `filter` table which are ultimately evaluated top to bottom: 12 | 1. Accept already-established traffic whose source and destination are the network subnet - `iptables --table filter --insert TRAFFICJAM --source $SUBNET --destination $SUBNET --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN` 13 | 2. Accept traffic from whitelisted containers destined for the network subnet (this requires one rule per container) - `iptables --table filter --insert TRAFFICJAM --source "$IP" --destination "$SUBNET" --jump RETURN` 14 | 3. (Docker Swarm only) Accept traffic from all load balancers (this requires one rule per node) - `iptables --table filter --insert TRAFFICJAM --source "$LOAD_BALANCER_IP" --destination "$SUBNET" --jump RETURN` 15 | 4. Drop traffic whose source and destination are the network subnet - `iptables --table filter --insert TRAFFICJAM --source "$SUBNET" --destination "$SUBNET" --jump DROP` 16 | (Note that the script inserts the rules in reverse order since they're inserted to the top of the chain) 17 | 18 | Thus all traffic on the relevant subnet hits the `DROP` on Rule 4 except traffic initiated by the whitelisted containers (usually the reverse proxy). 19 | 20 | This alone is not sufficient to prevent inter-container communication, however. If a container has a port mapped to the host, other containers are still able to access it via the host ip address and the mapped port. This is because Rule 4 above only drops traffic within the subnet, not traffic to the outside, to allow containers to have internet access. 21 | 22 | This is blocked by another chain and set of rules. First, TrafficJam adds another chain in the `filter` table: `TRAFFICJAM_INPUT`. Then it adds a jump rule to the `INPUT` chain: `iptables --table filter --insert input --jump TRAFFICJAM_INPUT`. The `INPUT` chain is used here because the incoming packet is destined for an IP address assigned to the host and does not need to be forwarded. 23 | 24 | TrafficJam adds two rules to this new chain, again shown in final order: 25 | 1. Accept already-established traffic whose source is the network subnet - `iptables --table filter --insert TRAFFICJAM_INPUT --source $SUBNET --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN` 26 | 2. Drop traffic whose source is the network subnet - `iptables --table filter --insert TRAFFICJAM_INPUT --source "$SUBNET" --jump DROP` 27 | 28 | ## Testing 29 | The test suite uses `bats` for automation. In order to avoid issues with iptables verison mismatches, docker-in-docker is used to test `trafficjam` inside a container. This also facilitates docker swarm testing, by utilizing two test containers connected to a docker network. The test container (`trafficjam_test`) checks which version of `iptables` it should use, launches the docker daemon, then builds the necessary images. 30 | 31 | In the CI runner, `bats` is used to build the test image, deploy it, then run another `bats` test inside the container itself. The internal `bats` test (e.g. `test-dind.bats`) then deploys `trafficjam` and some `whoami` containers on the containerized docker host waits for rules to be created, then checks for connectivity to be present and absent where appropriate. 32 | 33 | There are three iterations of this procedure: one to check vanilla docker with legacy `iptables` one to check vanilla docker with `nftables`, and another to check docker swarm (with legacy `iptables`). -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | Pull requests are welcome. Please submit them to the `develop` branch, and ensure you're rebased to the latest changes in the repo. 3 | 4 | Consider familiarizing yourself with trafficjam's [architecture](ARCHITECTURE.md) before getting started. 5 | 6 | Please run the tests before submitting a PR. There are two dependencies for testing: [`bats`](https://github.com/bats-core/bats-core) (v1.2.1) and [`shellcheck`](https://github.com/koalaman/shellcheck) (v0.7.0 is used in CI, but newer versions are fine too). The tests can be run with `bats test/test.bats` 7 | 8 | For information on how `trafficjam` and its tests are structured, please see [ARCHITECTURE.md](architecture.md) 9 | 10 | ## Style 11 | Please use [`shfmt`](https://github.com/mvdan/sh) for formatting. For readability and ease of understanding, use long forms of arguments where possible. -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker:28.0.4 2 | 3 | RUN apk add --no-cache bash iproute2 iptables tzdata 4 | 5 | COPY trafficjam.sh /usr/local/bin/trafficjam.sh 6 | COPY trafficjam-functions.sh /usr/local/bin/trafficjam-functions.sh 7 | 8 | HEALTHCHECK --timeout=3s CMD ps aux | grep [t]rafficjam.sh 9 | 10 | ENTRYPOINT ["/usr/local/bin/trafficjam.sh"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TrafficJam (Beta) 2 | A Docker firewall for your reverse proxy network 3 | 4 | [![Build - Latest](https://github.com/kaysond/trafficjam/actions/workflows/build-latest.yml/badge.svg)](https://github.com/kaysond/trafficjam/actions/workflows/build-latest.yml) [![Build - Nightly](https://github.com/kaysond/trafficjam/actions/workflows/build-nightly.yml/badge.svg)](https://github.com/kaysond/trafficjam/actions/workflows/build-nightly.yml) [![CI](https://github.com/kaysond/trafficjam/actions/workflows/ci.yml/badge.svg)](https://github.com/kaysond/trafficjam/actions/workflows/ci.yml) 5 | ## Threat Model 6 | **Why do you need something like TrafficJam?** Reverse proxies are often used to authenticate external access to internal services, providing benefits such as centralized user management, access control, 2FA and SSO. In a typical Docker setup, multiple services are connected to the reverse proxy via a single network. If a user authenticates to one service and is able to compromise that service (such as by using [this Pi-Hole vulnerability](https://natedotred.wordpress.com/2020/03/28/cve-2020-8816-pi-hole-remote-code-execution/ "this Pi-Hole vulnerability")), that user will gain access to the entire network *behind* the reverse proxy, and can access every service on the network whether they would normally have permission or not. 7 | 8 | Potential solutions include: 9 | * Use each service's own authentication 10 | * Not all services provide 2FA :( 11 | * Many services do not support centralized user management (LDAP) :( 12 | * Many services do not support SSO :( 13 | * Have each service on a unique network 14 | * Reverse proxy network connections must be updated every time a service is added or removed :( 15 | * Manually configuring every service and reverse proxy entry is painful and error-prone even with tools like Ansible :( 16 | * Use a reverse proxy with auto-discovery and a firewall to isolate services 17 | * Enables 2FA, LDAP, ACL, SSO, etc. regardless of service support :) 18 | * Routes are automatically discovered by the proxy without manual configuration :) 19 | * Every service only needs a connection to one network :) 20 | 21 | ## What TrafficJam Does 22 | TrafficJam allows you to safely and easily connect all of your backend containers to your reverse proxy using a single docker network by preventing the backend containers from communicating with each other. 23 | 24 | ![TrafficJam](./trafficjam-diagram.png) 25 | 26 | ## How TrafficJam Works 27 | TrafficJam works by adding some firewall (`iptables`) rules to the docker network you specify. First, it blocks all traffic on the network. Then it adds a rule that only allows traffic to/from the container(s) you specify in the whitelist. It continually monitors the docker network to make sure the rules stay up to date as you add or remove containers. 28 | 29 | ## Setup Examples 30 | 31 | ### Vanilla Docker 32 | `docker-cli`: 33 | ``` 34 | docker run \ 35 | --name trafficjam \ 36 | --cap-add NET_ADMIN \ 37 | --network host \ 38 | --volume "/var/run/docker.sock:/var/run/docker.sock" \ 39 | --env NETWORK=traefik_public \ 40 | --env WHITELIST_FILTER="ancestor=traefik:latest" \ 41 | --env TZ="America/Los_Angeles" \ 42 | --detach \ 43 | kaysond/trafficjam 44 | ``` 45 | 46 | `docker-compose.yml`: 47 | ``` 48 | services: 49 | trafficjam: 50 | container_name: trafficjam 51 | image: kaysond/trafficjam 52 | cap_add: 53 | - NET_ADMIN 54 | network_mode: host 55 | volumes: 56 | - /var/run/docker.sock:/var/run/docker.sock 57 | environment: 58 | NETWORK: traefik_public 59 | WHITELIST_FILTER: ancestor=traefik:latest 60 | TZ: America/Los_Angeles 61 | 62 | traefik: 63 | container_name: traefik 64 | image: traefik:latest 65 | networks: 66 | traefik_public: 67 | 68 | whoami: 69 | container_name: whoami 70 | image: traefik/whoami 71 | networks: 72 | traefik_public: 73 | 74 | networks: 75 | traefik_public: 76 | ``` 77 | 78 | ### Docker Swarm 79 | `docker-cli`: 80 | ``` 81 | docker service create \ 82 | --name trafficjam \ 83 | --mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock \ 84 | --env NETWORK=traefik_public \ 85 | --env WHITELIST_FILTER=ancestor=traefik:v3.3.3@sha256:19884a9d0b922b321c9cff54cbfe43f3169893041b8dd4ea6100677afaddce46 \ 86 | --env SWARM_DAEMON=true \ 87 | --env TZ=America/Los_Angeles \ 88 | --replicas 1 \ 89 | --constraint node.role==manager \ 90 | kaysond/trafficjam 91 | ``` 92 | 93 | `docker-compose.yml`: 94 | ``` 95 | services: 96 | trafficjam: 97 | image: trafficjam 98 | volumes: 99 | - /var/run/docker.sock:/var/run/docker.sock 100 | environment: 101 | NETWORK: traefik_network 102 | WHITELIST_FILTER: ancestor=traefik:v3.3.3@sha256:19884a9d0b922b321c9cff54cbfe43f3169893041b8dd4ea6100677afaddce46 103 | SWARM_DAEMON: "true" 104 | TZ: America/Los_Angeles 105 | deploy: 106 | replicas: 1 107 | placement: 108 | constraints: ['node.role==manager'] 109 | ``` 110 | 111 | ### Docker Socket Proxying 112 | The attack surface of trafficjam is very low because it is not exposed to any networks; it's nearly the same as running the bash scripts outside of docker. For this reason, bind mounting the docker socket does not pose a significant security concern. It is possible to use a docker socket proxy nonetheless with some special setup. First, the proxy image must have a static IP address. Second, the environment variable `DOCKER_HOST` must be set on **trafficjam** to `tcp://:2375`. For more details, see #15. 113 | 114 | **Notes:** 115 | Docker Swarm services tag images with a sha256 hash to guarantee that every node runs the exact same container (since tags are mutable). When using the `ancestor` tag, ensure that the appropriate hash is included as shown in the examples. 116 | 117 | `trafficjam` requires the `NET_ADMIN` Linux capability in order to manipulate `iptables` rules. For Docker Swarm setups, `SYS_ADMIN` is also required in order to enter namespaces, though the setting of container capabilities is automatically handled by the `trafficjam` swarm daemon. 118 | 119 | ## Configuration 120 | TrafficJam is configured via several environment variables: 121 | * **NETWORK** - The name of the Docker network this instance of TrafficJam should manage (multiple instances can be run for different networks) 122 | * **WHITELIST_FILTER** - A Docker `--filter` parameter that designates which containers should be permitted to openly access the network. See [Docker Docs - filtering](https://docs.docker.com/engine/reference/commandline/ps/#filtering) 123 | * **TZ** - Timezone (for logging) 124 | * **INSTANCE_ID** - A unique alphanumeric instance ID that is required to run multiple instances of trafficjam 125 | * **SWARM_DAEMON** - Setting this variable is required for swarm and activates a daemon that determines network load balancer IP addresses and properly configures the trafficjam service 126 | * **SWARM_IMAGE** - The image the trafficjam swarm daemon should deploy (defaults to `kaysond/trafficjam`). The best practice is to pin this to a particular image hash (e.g. `kaysond/trafficjam:v1.0.0@sha256:8d41599fa564e058f7eb396016e229402730841fa43994124a8fb3a14f1a9122`) 127 | * **POLL_INTERVAL** - How often TrafficJam checks Docker for changes 128 | * **ALLOW_HOST_TRAFFIC** - Allow containers to initiate communication with the docker host, and thus any port-mapped containers. Most users do not need this setting enabled. (See [ARCHITECTURE.md](ARCHITECTURE.md)). Note that if this setting is enabled while old rules exist, some will not be cleared automatically and must be done so manually (See [Clearing Rules](#clearing-rules)). 129 | * **DEBUG** - Setting this variable turns on debug logging 130 | 131 | ## Dependencies 132 | * Linux with iptables whose version is compatible with the iptables in TrafficJam (currently `1.8.10`) 133 | * **NOTE:** support for legacy iptables (non-nftables) is deprecated, not actively tested, and will be removed from a future release. 134 | * Modern version of Docker (trafficjam image and CI use 28.0.4) 135 | 136 | ## Known Limitations 137 | * ipv6 is currently unsupported 138 | * networks with multiple IPAM configurations (e.g. ipv4 and ipv6 subnets) are not supported 139 | 140 | ## Clearing Rules 141 | `trafficjam` can be run with the `--clear` argument to remove all rules that have been set. Note that the host docker socket must be mounted within the container. The rules can also be cleared by sending the `SIGUSR1` signal to the container. This will cause `trafficjam` to exit. 142 | 143 | Examples: 144 | * `docker run --volume "/var/run/docker.sock:/var/run/docker.sock" --cap-add NET_ADMIN --network host kaysond/trafficjam --clear` 145 | * `docker kill --signal SIGUSR1 trafficjam` 146 | -------------------------------------------------------------------------------- /test/containers/trafficjam_test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker:28.0.4-dind 2 | 3 | ARG BATS_VER=v1.8.0 4 | 5 | #Install Testing Dependencies 6 | RUN apk add --no-cache bash curl && \ 7 | git clone --depth 1 --branch $BATS_VER https://github.com/bats-core/bats-core.git /opt/bats && \ 8 | /opt/bats/install.sh /usr/local 9 | 10 | #Copy Repo 11 | COPY . /opt/trafficjam 12 | -------------------------------------------------------------------------------- /test/containers/whoami/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine3.21 AS binary 2 | ADD whoami.go /app/ 3 | WORKDIR /app 4 | RUN go mod init whoami && \ 5 | go build 6 | 7 | FROM alpine:3.21 8 | WORKDIR /app 9 | ENV PORT=8000 10 | EXPOSE 8000 11 | COPY --from=binary /app/whoami /app 12 | RUN apk add --no-cache curl 13 | CMD ["/app/whoami"] -------------------------------------------------------------------------------- /test/containers/whoami/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Jason Wilder 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /test/containers/whoami/README.md: -------------------------------------------------------------------------------- 1 | whoami 2 | ====== 3 | 4 | Simple HTTP docker service that prints it's container ID 5 | 6 | $ docker run -d -p 8000:8000 --name whoami -t jwilder/whoami 7 | 736ab83847bb12dddd8b09969433f3a02d64d5b0be48f7a5c59a594e3a6a3541 8 | 9 | $ curl $(hostname --all-ip-addresses | awk '{print $1}'):8000 10 | I'm 736ab83847bb 11 | -------------------------------------------------------------------------------- /test/containers/whoami/whoami.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "fmt" 6 | "net/http" 7 | "log" 8 | ) 9 | 10 | func main() { 11 | port := os.Getenv("PORT") 12 | if port == "" { 13 | port = "8080" 14 | } 15 | 16 | fmt.Fprintf(os.Stdout, "Listening on :%s\n", port) 17 | hostname, _ := os.Hostname() 18 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 19 | fmt.Fprintf(os.Stdout, "I'm %s\n", hostname) 20 | fmt.Fprintf(w, "I'm %s\n", hostname) 21 | }) 22 | 23 | 24 | log.Fatal(http.ListenAndServe(":" + port, nil)) 25 | } 26 | 27 | -------------------------------------------------------------------------------- /test/docker-compose-dind-allowhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | trafficjam: 4 | container_name: trafficjam 5 | image: trafficjam 6 | network_mode: host 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | environment: 10 | TZ: America/Los_Angeles 11 | POLL_INTERVAL: 1 12 | NETWORK: test_public 13 | WHITELIST_FILTER: ancestor=traefik:v3.3.3 14 | ALLOW_HOST_TRAFFIC: "true" 15 | DEBUG: "true" 16 | cap_add: 17 | - NET_ADMIN 18 | 19 | reverseproxy: 20 | container_name: traefik 21 | image: traefik:v3.3.3 22 | ports: 23 | - 80:80 24 | networks: 25 | public: 26 | private: 27 | 28 | private1: 29 | container_name: private1 30 | image: whoami 31 | ports: 32 | - "8000:8000" 33 | networks: 34 | private: 35 | 36 | public1: 37 | container_name: public1 38 | image: whoami 39 | ports: 40 | - "8001:8000" 41 | networks: 42 | public: 43 | 44 | public2: 45 | container_name: public2 46 | image: whoami 47 | ports: 48 | - "8002:8000" 49 | networks: 50 | public: 51 | 52 | networks: 53 | public: 54 | ipam: 55 | config: 56 | - subnet: "172.23.0.0/24" 57 | private: 58 | ipam: 59 | config: 60 | - subnet: "172.23.1.0/24" -------------------------------------------------------------------------------- /test/docker-compose-dind-swarm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | trafficjam: 4 | image: trafficjam 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock 7 | environment: 8 | TZ: America/Los_Angeles 9 | POLL_INTERVAL: 1 10 | NETWORK: test_public 11 | WHITELIST_FILTER: ancestor=traefik:v3.3.3@sha256:19884a9d0b922b321c9cff54cbfe43f3169893041b8dd4ea6100677afaddce46 12 | DEBUG: "true" 13 | SWARM_IMAGE: "trafficjam" 14 | SWARM_DAEMON: "true" 15 | deploy: 16 | replicas: 1 17 | placement: 18 | constraints: ['node.role==manager'] 19 | 20 | reverseproxy: 21 | hostname: "{{ .Service.Name }}.{{ .Task.Slot }}" 22 | image: traefik:v3.3.3@sha256:19884a9d0b922b321c9cff54cbfe43f3169893041b8dd4ea6100677afaddce46 23 | networks: 24 | public: 25 | private: 26 | deploy: 27 | mode: global 28 | 29 | private1: 30 | hostname: "{{ .Service.Name }}.{{ .Task.Slot }}" 31 | image: whoami 32 | ports: 33 | - "8000:8000" 34 | networks: 35 | private: 36 | deploy: 37 | replicas: 2 38 | placement: 39 | max_replicas_per_node: 1 40 | 41 | public1: 42 | hostname: "{{ .Service.Name }}.{{ .Task.Slot }}" 43 | image: whoami 44 | ports: 45 | - "8001:8000" 46 | networks: 47 | public: 48 | deploy: 49 | replicas: 2 50 | placement: 51 | max_replicas_per_node: 1 52 | 53 | public2: 54 | hostname: "{{ .Service.Name }}.{{ .Task.Slot }}" 55 | image: whoami 56 | ports: 57 | - "8002:8000" 58 | networks: 59 | public: 60 | deploy: 61 | replicas: 2 62 | placement: 63 | max_replicas_per_node: 1 64 | 65 | networks: 66 | public: 67 | ipam: 68 | config: 69 | - subnet: "172.23.0.0/24" 70 | private: 71 | ipam: 72 | config: 73 | - subnet: "172.23.1.0/24" -------------------------------------------------------------------------------- /test/docker-compose-dind.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | trafficjam: 4 | container_name: trafficjam 5 | image: trafficjam 6 | build: /opt/trafficjam 7 | network_mode: host 8 | volumes: 9 | - /var/run/docker.sock:/var/run/docker.sock 10 | environment: 11 | TZ: America/Los_Angeles 12 | POLL_INTERVAL: 1 13 | NETWORK: test_public 14 | WHITELIST_FILTER: ancestor=traefik:v3.3.3 15 | DEBUG: "true" 16 | cap_add: 17 | - NET_ADMIN 18 | 19 | reverseproxy: 20 | container_name: traefik 21 | image: traefik:v3.3.3 22 | networks: 23 | public: 24 | private: 25 | 26 | private1: 27 | container_name: private1 28 | image: whoami 29 | build: /opt/trafficjam/test/containers/whoami 30 | ports: 31 | - "8000:8000" 32 | networks: 33 | private: 34 | 35 | public1: 36 | container_name: public1 37 | image: whoami 38 | build: /opt/trafficjam/test/containers/whoami 39 | ports: 40 | - "8001:8000" 41 | networks: 42 | public: 43 | 44 | public2: 45 | container_name: public2 46 | image: whoami 47 | build: /opt/trafficjam/test/containers/whoami 48 | ports: 49 | - "8002:8000" 50 | networks: 51 | public: 52 | 53 | networks: 54 | public: 55 | ipam: 56 | config: 57 | - subnet: "172.23.0.0/24" 58 | private: 59 | ipam: 60 | config: 61 | - subnet: "172.23.1.0/24" -------------------------------------------------------------------------------- /test/docker-compose-swarm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | swarm-manager: 4 | image: trafficjam_test 5 | build: 6 | context: .. 7 | dockerfile: test/containers/trafficjam_test/Dockerfile 8 | container_name: swarm-manager 9 | privileged: true 10 | networks: 11 | - swarm 12 | volumes: 13 | - '$HOME/.docker/config.json:/root/.docker/config.json' 14 | 15 | swarm-worker: 16 | image: trafficjam_test 17 | build: 18 | context: .. 19 | dockerfile: test/containers/trafficjam_test/Dockerfile 20 | container_name: swarm-worker 21 | privileged: true 22 | networks: 23 | - swarm 24 | volumes: 25 | - '$HOME/.docker/config.json:/root/.docker/config.json' 26 | 27 | networks: 28 | swarm: -------------------------------------------------------------------------------- /test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | trafficjam_test: 4 | image: trafficjam_test 5 | build: 6 | context: .. 7 | dockerfile: test/containers/trafficjam_test/Dockerfile 8 | container_name: trafficjam_test 9 | privileged: true 10 | volumes: 11 | - '$HOME/.docker/config.json:/root/.docker/config.json' -------------------------------------------------------------------------------- /test/test-dind-swarm.bats: -------------------------------------------------------------------------------- 1 | setup_file() { 2 | #Only run this on the manager 3 | if docker node ls &> /dev/null; then 4 | #Wait up to 3min for test swarm to reach desired state 5 | READY="" 6 | i=0 7 | LIMIT=36 8 | while [[ -z "$READY" ]]; do 9 | sleep 5 10 | READY="true" 11 | ERRORS=() 12 | 13 | #Images are built 14 | if ! docker image ls |& grep -q whoami; then 15 | READY="" 16 | ERRORS=("${ERRORS[@]}" "Images aren't built" "$(docker image ls)") 17 | fi 18 | 19 | #All containers are started 20 | if [[ "$(docker ps 2> /dev/null | wc -l)" != "7" ]]; then 21 | READY="" 22 | ERRORS=("${ERRORS[@]}" "Containers aren't started" "$(docker ps)") 23 | fi 24 | 25 | if docker service ls | grep -q trafficjam_DEFAULT; then 26 | #Two trafficjam tasks exist with LOAD_BALANCER_IPS env vars set 27 | if [[ "$(docker inspect --format '{{ .Spec.ContainerSpec.Env }}' $(docker service ps --quiet --filter desired-state=running trafficjam_DEFAULT) | \ 28 | grep -cE 'ALLOWED_SWARM_IPS=172\.23\.0\.[[:digit:]]{1,3} 172\.23\.0\.[[:digit:]]{1,3} 172\.23\.0\.[[:digit:]]{1,3} 172\.23\.0\.[[:digit:]]{1,3}')" != "2" ]]; then 29 | READY="" 30 | ERRORS=("${ERRORS[@]}" "trafficjam tasks aren't ready" "$(docker inspect --format '{{ .Spec.ContainerSpec.Env }}' $(docker service ps --quiet --filter desired-state=running trafficjam_DEFAULT))") 31 | fi 32 | 33 | #All rules are added on both running trafficjam tasks 34 | for TASKID in $(docker service ps --quiet --filter desired-state=running trafficjam_DEFAULT); do 35 | if [[ "$(docker service logs --tail 30 trafficjam_DEFAULT | grep "${TASKID:0:9}" | awk -F']' '{ print $2 }' | grep -v Whitelisted | tail -n 11 | grep -c 'DEBUG: Error Count: 0')" != "2" ]]; then 36 | READY="" 37 | ERRORS=("${ERRORS[@]}" "rules are not added on task $TASKID" "$(docker logs $(docker ps --quiet --filter 'name=trafficjam_DEFAULT') | awk -F']' '{ print $2 }' | grep -v Whitelisted | tail -n 6)") 38 | fi 39 | done 40 | else 41 | READY="" 42 | ERRORS=("${ERRORS[@]}" "trafficjam service doesn't exist" "$(docker service ls)") 43 | fi 44 | 45 | #All whoami servicecs are running 46 | if [[ "$(docker inspect --format '{{ .Status.State }}' $(docker service ps -q test_public1 | head -n1))" != "running" || \ 47 | "$(docker inspect --format '{{ .Status.State }}' $(docker service ps -q test_public2 | head -n1))" != "running" || \ 48 | "$(docker inspect --format '{{ .Status.State }}' $(docker service ps -q test_private1 | head -n1))" != "running" ]]; then 49 | READY="" 50 | ERRORS=("${ERRORS[@]}" "whoami services aren't ready" "$(docker service ls)" "$(docker service ps test_public1)" "$(docker service ps test_public2)" "$(docker service ps test_private1)" ) 51 | fi 52 | 53 | if (( ++i >= LIMIT )); then 54 | echo "Timed out waiting for swarm state to converge" >&2 55 | printf "%s\n" "${ERRORS[@]}" >&2 56 | exit 1 57 | fi 58 | done 59 | fi 60 | export RP_ID=$(docker ps --quiet --filter 'name=test_reverseproxy') 61 | export TJ_ID=$(docker ps --quiet --filter 'name=trafficjam_DEFAULT') 62 | export TPU1_ID=$(docker ps --quiet --filter 'name=test_public1') 63 | export TPU2_ID=$(docker ps --quiet --filter 'name=test_public2') 64 | export TPR1_ID=$(docker ps --quiet --filter 'name=test_private1') 65 | docker exec "$RP_ID" apk add --no-cache curl 66 | } 67 | 68 | @test "whitelisted containers can communicate with all other containers on the specified network" { 69 | # Each is run twice to hit both nodes 70 | docker exec "$RP_ID" curl --verbose --max-time 5 test_public1:8000 71 | docker exec "$RP_ID" curl --verbose --max-time 5 test_public1:8000 72 | 73 | docker exec "$RP_ID" curl --verbose --max-time 5 test_public2:8000 74 | docker exec "$RP_ID" curl --verbose --max-time 5 test_public2:8000 75 | 76 | # Also hit the containers by interface IP 77 | # This only works on the manager 78 | if docker node ls &> /dev/null; then 79 | IPS="$(docker inspect --format '{{ (index (index .NetworksAttachments 1).Addresses 0) }}' $(docker service ps --quiet --filter desired-state=running test_public1 test_public2) | sed 's#/24$##')" 80 | for IP in $IPS; do 81 | docker exec "$RP_ID" curl --verbose --max-time 5 "$IP:8000" 82 | done 83 | fi 84 | } 85 | 86 | @test "containers on the specified network can not communicate with one another" { 87 | run docker exec "$TPU1_ID" ping -c 2 -w 10 test_public2 88 | [ "$status" -eq 1 ] 89 | 90 | run docker exec "$TPU1_ID" curl --verbose --max-time 5 test_public2:8000 91 | [ "$status" -eq 7 -o "$status" -eq 28 ] 92 | run docker exec "$TPU1_ID" curl --verbose --max-time 5 test_public2:8000 93 | [ "$status" -eq 7 -o "$status" -eq 28 ] 94 | } 95 | 96 | @test "containers on the specified network can not communicate with one another (opposite direction)" { 97 | run docker exec "$TPU2_ID" ping -c 2 -w 10 test_public1 98 | [ "$status" -eq 1 ] 99 | 100 | run docker exec "$TPU2_ID" curl --verbose --max-time 5 test_public1:8000 101 | [ "$status" -eq 7 -o "$status" -eq 28 ] 102 | run docker exec "$TPU2_ID" curl --verbose --max-time 5 test_public1:8000 103 | [ "$status" -eq 7 -o "$status" -eq 28 ] 104 | } 105 | 106 | @test "containers on the specified network can not communicate with others via host-mapped ports" { 107 | run docker exec "$TPU1_ID" sh -c "curl --verbose --max-time 5 `ip route | grep default | awk '{ print $3 }'`:8002" #get to host via default gateway 108 | [ "$status" -eq 7 -o "$status" -eq 28 ] 109 | 110 | run docker exec "$TPU1_ID" sh -c "curl --verbose --max-time 5 `ip route | grep default | awk '{ print $3 }'`:8002" #get to host via default gateway 111 | [ "$status" -eq 7 -o "$status" -eq 28 ] 112 | } 113 | 114 | @test "containers on non-specified networks can communicate" { 115 | docker exec "$TPR1_ID" curl --verbose --max-time 5 test_reverseproxy 116 | docker exec "$TPR1_ID" curl --verbose --max-time 5 test_reverseproxy 117 | 118 | docker exec "$RP_ID" curl --verbose --max-time 5 test_private1:8000 119 | docker exec "$RP_ID" curl --verbose --max-time 5 test_private1:8000 120 | } -------------------------------------------------------------------------------- /test/test-dind.bats: -------------------------------------------------------------------------------- 1 | setup_file() { 2 | if iptables -t filter -L | grep -q trafficjam; then 3 | echo "Found existing trafficjam rules" >&2 4 | exit 1 5 | fi 6 | docker compose -f /opt/trafficjam/test/docker-compose-dind.yml up -d 7 | docker exec traefik apk add --no-cache curl 8 | } 9 | 10 | @test "whoami containers are responsive" { 11 | curl --verbose --max-time 5 localhost:8000 12 | 13 | curl --verbose --max-time 5 localhost:8001 14 | 15 | curl --verbose --max-time 5 localhost:8002 16 | } 17 | 18 | @test "whitelisted containers can communicate with all other containers on the specified network" { 19 | #Sometimes this ping fails for no reason on github CI, so try it again 20 | docker exec traefik ping -c 2 -w 10 public1 || docker exec traefik ping -c 2 -w 10 public1 21 | 22 | docker exec traefik curl --verbose --max-time 5 public1:8000 23 | 24 | docker exec traefik ping -c 2 -w 10 public2 || docker exec traefik ping -c 2 -w 10 public2 25 | 26 | docker exec traefik curl --verbose --max-time 5 public2:8000 27 | } 28 | 29 | @test "containers on the specified network can not communicate with one another" { 30 | run docker exec public1 ping -c 2 -w 10 public2 31 | [ "$status" -eq 1 ] 32 | 33 | run docker exec public1 curl --verbose --max-time 5 public2:8000 34 | [ "$status" -eq 7 -o "$status" -eq 28 ] 35 | } 36 | 37 | @test "containers on the specified network can not communicate with one another (opposite direction)" { 38 | run docker exec public2 ping -c 2 -w 10 public1 39 | [ "$status" -eq 1 ] 40 | 41 | run docker exec public2 curl --verbose --max-time 5 public1:8000 42 | [ "$status" -eq 7 -o "$status" -eq 28 ] 43 | } 44 | 45 | @test "containers on the specified network can not communicate with others via host-mapped ports" { 46 | run docker exec public1 sh -c "curl --verbose --max-time 5 `ip route | grep default | awk '{ print $3 }'`:8002" #get to host via default gateway 47 | [ "$status" -eq 7 -o "$status" -eq 28 ] 48 | } 49 | 50 | @test "containers on non-specified networks can communicate" { 51 | docker exec private1 ping -c 2 -w 10 traefik 52 | docker exec traefik ping -c 2 -w 10 private1 53 | docker exec traefik curl --verbose --max-time 5 private1:8000 54 | } 55 | 56 | @test "clearing rules with SIGUSR1 works properly" { 57 | docker kill --signal SIGUSR1 trafficjam 58 | sleep 5 59 | run bash -c "docker ps | grep trafficjam" 60 | [ "$status" -eq 1 ] 61 | [ "$(iptables --numeric --list TRAFFICJAM | wc -l)" -eq 2 ] 62 | [ "$(iptables --numeric --list TRAFFICJAM_INPUT | wc -l)" -eq 2 ] 63 | } 64 | 65 | @test "deploy with ALLOW_HOST_TRAFFIC" { 66 | docker compose -f /opt/trafficjam/test/docker-compose-dind.yml down 67 | sleep 5 68 | docker compose -f /opt/trafficjam/test/docker-compose-dind-allowhost.yml up -d 69 | } 70 | 71 | @test "containers can communicate via host-mapped ports (public1)" { 72 | HOST_IP=$(ip route get 8.8.8.8 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') 73 | docker exec public1 ping -c 2 -w 10 "$HOST_IP" 74 | docker exec public1 curl --verbose --max-time 5 "$HOST_IP":80 75 | docker exec public1 curl --verbose --max-time 5 "$HOST_IP":8000 76 | docker exec public1 curl --verbose --max-time 5 "$HOST_IP":8002 77 | } 78 | 79 | @test "containers can communicate via host-mapped ports (public2)" { 80 | HOST_IP=$(ip route get 8.8.8.8 | awk -F"src " 'NR==1{split($2,a," ");print a[1]}') 81 | docker exec public2 ping -c 2 -w 10 "$HOST_IP" 82 | docker exec public2 curl --verbose --max-time 5 "$HOST_IP":80 83 | docker exec public2 curl --verbose --max-time 5 "$HOST_IP":8000 84 | docker exec public2 curl --verbose --max-time 5 "$HOST_IP":8001 85 | } 86 | 87 | @test "clearing rules with a command works properly" { 88 | docker run \ 89 | --volume "/var/run/docker.sock:/var/run/docker.sock" \ 90 | --cap-add NET_ADMIN \ 91 | --network host \ 92 | trafficjam --clear 93 | iptables --numeric --list 94 | [ "$(iptables --numeric --list TRAFFICJAM | wc -l)" -eq 2 ] 95 | [ "$(iptables --numeric --list TRAFFICJAM_INPUT | wc -l)" -eq 2 ] 96 | } -------------------------------------------------------------------------------- /test/test.bats: -------------------------------------------------------------------------------- 1 | @test "Deploy the non-swarm environment" { 2 | docker compose --file "$BATS_TEST_DIRNAME"/docker-compose.yml --project-name trafficjam_test up --detach 3 | while ! docker exec trafficjam_test docker info &> /dev/null; do 4 | if (( ++i > 24 )); then 5 | echo "Timed out waiting for docker in docker to start up. Logs:" >&2 6 | docker logs trafficjam_test >&2 7 | exit 1 8 | fi 9 | sleep 5 10 | done 11 | } 12 | 13 | @test "Test the non-swarm environment" { 14 | docker exec trafficjam_test bats /opt/trafficjam/test/test-dind.bats 15 | } 16 | 17 | @test "Deploy the swarm environment" { 18 | docker compose --file "$BATS_TEST_DIRNAME"/docker-compose-swarm.yml --project-name trafficjam_test_swarm up --detach 19 | while ! docker exec swarm-manager docker info &> /dev/null || ! docker exec swarm-worker docker info &> /dev/null; do 20 | if (( ++i > 24 )); then 21 | echo "Timed out waiting for docker in docker to start up. Logs:" >&2 22 | docker logs swarm-manager 23 | docker logs swarm-worker 24 | exit 1 25 | fi 26 | sleep 5 27 | done 28 | docker exec swarm-manager docker swarm init 29 | docker exec swarm-worker $(docker exec swarm-manager docker swarm join-token worker | grep "join --token") 30 | sleep 5 31 | docker exec swarm-manager docker build -t trafficjam /opt/trafficjam 32 | docker exec swarm-manager docker build -t whoami /opt/trafficjam/test/containers/whoami 33 | docker exec swarm-worker docker build -t trafficjam /opt/trafficjam 34 | docker exec swarm-worker docker build -t whoami /opt/trafficjam/test/containers/whoami 35 | docker exec swarm-manager docker stack deploy -c /opt/trafficjam/test/docker-compose-dind-swarm.yml test 36 | } 37 | 38 | @test "Test the swarm manager" { 39 | docker exec swarm-manager bats /opt/trafficjam/test/test-dind-swarm.bats 40 | } 41 | 42 | @test "Test the swarm worker" { 43 | docker exec swarm-worker bats /opt/trafficjam/test/test-dind-swarm.bats 44 | } 45 | 46 | @test "killing the swarm daemon removes the service" { 47 | docker exec swarm-manager docker service rm test_trafficjam 48 | sleep 5 49 | run bash -c "docker exec swarm-manager docker service ls | grep trafficjam_DEFAULT" 50 | [ "$status" -eq 1 ] 51 | } 52 | 53 | function teardown_file() { 54 | docker compose --file "$BATS_TEST_DIRNAME"/docker-compose.yml --project-name trafficjam_test down 55 | docker compose --file "$BATS_TEST_DIRNAME"/docker-compose-swarm.yml --project-name trafficjam_test_swarm down 56 | docker image rm --force trafficjam_bats trafficjam_test trafficjam_test_whoami 57 | } 58 | -------------------------------------------------------------------------------- /trafficjam-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kaysond/trafficjam/0a9d75e18a187193c4142ce8e38c6ef14b12639a/trafficjam-diagram.png -------------------------------------------------------------------------------- /trafficjam-functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | function tj_trap() { 3 | log_debug "Trapping signal" 4 | if [[ -n "$SWARM_DAEMON" ]]; then 5 | remove_service || exit 1 6 | fi 7 | exit 0 8 | } 9 | 10 | function tj_sleep() { 11 | #Slow logging on errors 12 | log_debug "Error Count: $ERRCOUNT" 13 | if ((ERRCOUNT > 10)); then 14 | SLEEP_TIME=$((POLL_INTERVAL * 11)) 15 | else 16 | SLEEP_TIME=$((POLL_INTERVAL * (ERRCOUNT + 1))) 17 | fi 18 | 19 | # This pattern, along with the trap above, allows for quick script exits 20 | sleep "${SLEEP_TIME}s" & 21 | wait $! 22 | } 23 | 24 | function log() { 25 | echo "[$(date "+%Y-%m-%d %H:%M:%S")] $1" 26 | } 27 | 28 | function log_error() { 29 | echo "[$(date "+%Y-%m-%d %H:%M:%S")] ERROR: $1" >&2 30 | ERRCOUNT=$((ERRCOUNT + 1)) 31 | } 32 | 33 | function log_debug() { 34 | if [[ -n "$DEBUG" ]]; then 35 | echo "[$(date "+%Y-%m-%d %H:%M:%S")] DEBUG: $1" 36 | fi 37 | } 38 | 39 | function detect_iptables_version() { 40 | IPTABLES_CMD=iptables-nft 41 | if ! iptables-nft --numeric --list DOCKER-USER &> /dev/null; then 42 | IPTABLES_CMD=iptables-legacy 43 | log "DEPRECATION NOTICE: support for legacy iptables is deprecated and will be removed in a future relase" 44 | fi 45 | } 46 | 47 | function clear_rules() { 48 | if [[ -z "${NETWORK_DRIVER:-}" ]]; then 49 | get_network_driver || NETWORK_DRIVER=local 50 | fi 51 | 52 | if [[ "$NETWORK_DRIVER" == "overlay" ]]; then 53 | get_netns 54 | fi 55 | 56 | DATE=$(date "+%Y-%m-%d %H:%M:%S") 57 | remove_old_rules TRAFFICJAM || true #this would normally fail if no rules exist but we don't want to exit 58 | remove_old_rules TRAFFICJAM_INPUT || true 59 | 60 | exit 0 61 | } 62 | 63 | function remove_service() { 64 | local ID 65 | if ID=$(docker service ls --quiet --filter "label=trafficjam.id=$INSTANCE_ID") && [[ -n "$ID" ]]; then 66 | local RESULT 67 | if ! RESULT=$(docker service rm "$ID" 2>&1); then 68 | log_error "Unexpected error while removing existing service: $RESULT" 69 | else 70 | log "Removed service $ID: $RESULT" 71 | fi 72 | else 73 | log_debug "No existing service found to remove" 74 | fi 75 | } 76 | 77 | function deploy_service() { 78 | if ! docker inspect "$(docker service ls --quiet --filter "label=trafficjam.id=$INSTANCE_ID")" &> /dev/null; then 79 | if ! SERVICE_ID=$( 80 | # rslave bind-propagation is needed so that any new mounts in the host docker/netns appear in the container 81 | docker service create \ 82 | --quiet \ 83 | --detach \ 84 | --name "trafficjam_$INSTANCE_ID" \ 85 | --mount type=bind,source=/var/run/docker.sock,destination=/var/run/docker.sock \ 86 | --mount type=bind,source=/var/run/docker/netns,destination=/var/run/netns,bind-propagation=rslave \ 87 | --env TZ="$TZ" \ 88 | --env POLL_INTERVAL="$POLL_INTERVAL" \ 89 | --env NETWORK="$NETWORK" \ 90 | --env WHITELIST_FILTER="$WHITELIST_FILTER" \ 91 | --env DEBUG="$DEBUG" \ 92 | --env SWARM_WORKER=true \ 93 | --cap-add NET_ADMIN \ 94 | --cap-add SYS_ADMIN \ 95 | --mode global \ 96 | --restart-condition on-failure \ 97 | --network host \ 98 | --label trafficjam.id="$INSTANCE_ID" \ 99 | "$SWARM_IMAGE" 2>&1 100 | ); then 101 | log_error "Unexpected error while deploying service: $SERVICE_ID" 102 | return 1 103 | else 104 | #docker service create may print warnings to stderr even if it succeeds 105 | #particularly due to the trafficjam image not being accessible in a registry during CI 106 | SERVICE_ID=$(printf '%s' "$SERVICE_ID" | tail -n1) 107 | log "Created service trafficjam_$INSTANCE_ID: $SERVICE_ID" 108 | fi 109 | else 110 | log_debug "Existing service found, not deploying" 111 | fi 112 | } 113 | 114 | function get_allowed_swarm_ips() { 115 | local RESULT 116 | if ! RESULT=$(docker service inspect --format '{{ if .UpdateStatus }}{{ .UpdateStatus.State }}{{ end }}' "$SERVICE_ID" 2>&1); then 117 | log_error "Unexpected error while getting service update state: $RESULT" 118 | return 1 119 | elif [[ "$RESULT" != "updating" ]]; then 120 | #Filter out any service container that is not running 121 | local CONT_IDS 122 | if ! CONT_IDS=$(docker service ps --quiet --filter desired-state=running "$SERVICE_ID" | cut -c -12 2>&1); then 123 | log_error "Unexpected error while determining service container IDs: $CONT_IDS" 124 | return 1 125 | fi 126 | local SERVICE_LOGS 127 | if ! SERVICE_LOGS=$(docker service logs --timestamps --since "$SERVICE_LOGS_SINCE" "$SERVICE_ID" 2>&1); then 128 | log_error "Unexpected error while retrieving service logs: $SERVICE_LOGS" 129 | return 1 130 | fi 131 | # We have to only grab the latest log entries because of https://github.com/moby/moby/issues/38640 132 | SERVICE_LOGS_SINCE=$(tail -n1 <<< "$SERVICE_LOGS" | cut -d ' ' -f 1) 133 | 134 | #This mess searches the service logs for running containers' "#WHITELIST_IPS#" output 135 | #and saves the most recent output from each container into the variable 136 | if ! ALLOWED_SWARM_IPS=$({ printf '%s' "$SERVICE_LOGS" | 137 | grep -E "$(printf '(%s)' "$CONT_IDS" | tr '\n' '|')" | 138 | grep -E "#WHITELIST_IPS#" | 139 | # reverse the lines 140 | tac | 141 | # only get the first (newest) log entry per container 142 | awk '!a[$1]++ { print }' | 143 | # delete everything up to and including the tag 144 | sed 's/^.*#WHITELIST_IPS#//' | 145 | # one IP per line 146 | tr ' ' '\n' | 147 | sort -t . -d | 148 | uniq | 149 | # back to one line for nicer debug log output 150 | tr '\n' ' '; } 2>&1); then 151 | log_debug "No swarm whitelist ips found" 152 | ALLOWED_SWARM_IPS="$OLD_ALLOWED_SWARM_IPS" 153 | else 154 | log_debug "Allowed Swarm IPs: $ALLOWED_SWARM_IPS" 155 | fi 156 | else 157 | log_debug "Skipping swarm ip check because service is still updating" 158 | fi 159 | } 160 | 161 | function update_service() { 162 | local RESULT 163 | if ! RESULT=$(docker service update --detach --env-add "ALLOWED_SWARM_IPS=$ALLOWED_SWARM_IPS" "$SERVICE_ID" 2>&1); then 164 | log_error "Unexpected error while updating service: $RESULT" 165 | else 166 | log "Updated service $SERVICE_ID" 167 | fi 168 | } 169 | 170 | function get_network_driver() { 171 | if ! NETWORK_DRIVER=$(docker network inspect --format="{{ .Driver }}" "$NETWORK" 2>&1) || [[ -z "$NETWORK_DRIVER" ]]; then 172 | if [[ -n "$SWARM_WORKER" ]]; then 173 | log_debug "Network was not found, but this is a swarm node" 174 | else 175 | log_error "Unexpected error while determining network driver: $NETWORK_DRIVER" 176 | fi 177 | return 1 178 | else 179 | log_debug "Network driver of $NETWORK is $NETWORK_DRIVER" 180 | fi 181 | } 182 | 183 | function get_network_subnet() { 184 | if ! SUBNET=$(docker network inspect --format="{{ (index .IPAM.Config 0).Subnet }}" "$NETWORK" 2>&1) || [[ -z "$SUBNET" ]]; then 185 | log_error "Unexpected error while determining network subnet: $SUBNET" 186 | return 1 187 | else 188 | log_debug "Subnet of $NETWORK is $SUBNET" 189 | fi 190 | } 191 | 192 | function get_whitelisted_container_ips() { 193 | local CONTAINER_IDS 194 | if ! CONTAINER_IDS=$(docker ps --filter "$WHITELIST_FILTER" --filter network="$NETWORK" --format="{{ .ID }}" 2>&1); then 195 | log_error "Unexpected error while getting whitelist container IDs: $CONTAINER_IDS" 196 | return 1 197 | fi 198 | 199 | if [[ -z "$CONTAINER_IDS" ]]; then 200 | WHITELIST_IPS="" 201 | log_debug "No containers matched the whitelist" 202 | return 0 203 | fi 204 | 205 | log_debug "Whitelisted containers: $CONTAINER_IDS" 206 | 207 | if ! WHITELIST_IPS=$(xargs docker inspect --format="{{ (index .NetworkSettings.Networks \"$NETWORK\").IPAddress }}" <<< "$CONTAINER_IDS" 2>&1) || [[ -z "$WHITELIST_IPS" ]]; then 208 | log_error "Unexpected error while getting whitelisted container IPs: ${WHITELIST_IPS}" 209 | return 1 210 | fi 211 | 212 | log_debug "Whitelisted container IPs: $WHITELIST_IPS" 213 | 214 | } 215 | 216 | function get_netns() { 217 | if ! NETWORK_ID=$(docker network inspect --format="{{.ID}}" "$NETWORK") || [ -z "$NETWORK_ID" ]; then 218 | log_error "Could not retrieve ID for network $NETWORK" 219 | return 1 220 | else 221 | log_debug "ID of network $NETWORK is $NETWORK_ID" 222 | fi 223 | 224 | for f in /var/run/netns/*; do 225 | case $(basename "$f") in 226 | lb_*) true ;; 227 | *"${NETWORK_ID:0:9}"*) NETNS="$f" ;; 228 | esac 229 | done 230 | if [[ -z "$NETNS" ]]; then 231 | if [[ -n "$SWARM_WORKER" ]]; then 232 | log_debug "No container on network $NETWORK on this node, skipping" 233 | else 234 | log_error "Could not retrieve network namespace for network ID $NETWORK_ID" 235 | return 1 236 | fi 237 | else 238 | log_debug "Network namespace of $NETWORK (ID: $NETWORK_ID) is $NETNS" 239 | fi 240 | } 241 | 242 | function get_local_load_balancer_ip() { 243 | if ! LOCAL_LOAD_BALANCER_IP=$(docker network inspect "$NETWORK" --format "{{ (index .Containers \"lb-$NETWORK\").IPv4Address }}" | awk -F/ '{ print $1 }') || { [ -z "$LOCAL_LOAD_BALANCER_IP" ] && [[ -z "$SWARM_WORKER" ]]; }; then 244 | log_error "Could not retrieve load balancer IP for network $NETWORK" 245 | return 1 246 | fi 247 | 248 | if [[ -z "$LOCAL_LOAD_BALANCER_IP" ]] && [[ -n "$SWARM_WORKER" ]]; then 249 | log_debug "No load balancer found on this node" 250 | else 251 | log_debug "Load balancer IP of $NETWORK is $LOCAL_LOAD_BALANCER_IP" 252 | fi 253 | } 254 | 255 | function iptables_tj() { 256 | if [[ "$NETWORK_DRIVER" == "overlay" ]]; then 257 | nsenter --net="$NETNS" -- "$IPTABLES_CMD" "$@" 258 | else 259 | $IPTABLES_CMD "$@" 260 | fi 261 | } 262 | 263 | function add_chain() { 264 | local RESULT 265 | if ! iptables_tj --table filter --numeric --list TRAFFICJAM >&/dev/null; then 266 | if ! RESULT=$(iptables_tj --new TRAFFICJAM 2>&1); then 267 | if [[ -z "$SWARM_WORKER" ]]; then 268 | log_error "Unexpected error while adding chain TRAFFICJAM: $RESULT" 269 | return 1 270 | else 271 | # Ugly workaround for nsenter: setns(): can't reassociate to namespace: Invalid argument 272 | log_error "Unexpected error while adding chain TRAFFICJAM: $RESULT." 273 | log_error "killing container to get access to the new network namespace (ugly workaround)" 274 | kill 1 275 | fi 276 | else 277 | log "Added chain: TRAFFICJAM" 278 | fi 279 | fi 280 | 281 | local CHAIN 282 | if [[ "$NETWORK_DRIVER" == "overlay" ]]; then 283 | CHAIN="FORWARD" 284 | else 285 | CHAIN="DOCKER-USER" 286 | fi 287 | 288 | if ! iptables_tj --table filter --numeric --list "$CHAIN" | grep "TRAFFICJAM" >&/dev/null; then 289 | if ! RESULT=$(iptables_tj --table filter --insert "$CHAIN" --jump TRAFFICJAM 2>&1); then 290 | log_error "Unexpected error while adding jump rule: $RESULT" 291 | return 1 292 | else 293 | log "Added rule: --table filter --insert $CHAIN --jump TRAFFICJAM" 294 | fi 295 | fi 296 | } 297 | 298 | function block_subnet_traffic() { 299 | local RESULT 300 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM --source "$SUBNET" --destination "$SUBNET" --jump DROP --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 301 | log_error "Unexpected error while setting subnet blocking rule: $RESULT" 302 | return 1 303 | else 304 | log "Added rule: --table filter --insert TRAFFICJAM --source $SUBNET --destination $SUBNET --jump DROP" 305 | fi 306 | } 307 | 308 | function add_input_chain() { 309 | local RESULT 310 | if ! iptables_tj --table filter --numeric --list TRAFFICJAM_INPUT >&/dev/null; then 311 | if ! RESULT=$(iptables_tj --new TRAFFICJAM_INPUT); then 312 | log_error "Unexpected error while adding chain TRAFFICJAM_INPUT: $RESULT" 313 | return 1 314 | else 315 | log "Added chain: TRAFFICJAM_INPUT" 316 | fi 317 | fi 318 | if ! iptables_tj --table filter --numeric --list INPUT | grep "TRAFFICJAM_INPUT" >&/dev/null; then 319 | if ! RESULT=$(iptables_tj --table filter --insert INPUT --jump TRAFFICJAM_INPUT); then 320 | log_error "Unexpected error while adding jump rule: $RESULT" 321 | return 1 322 | else 323 | log "Added rule: --table filter --insert INPUT --jump TRAFFICJAM_INPUT" 324 | fi 325 | fi 326 | } 327 | 328 | function block_host_traffic() { 329 | local RESULT 330 | #Drop local socket-bound packets coming from the target subnet 331 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM_INPUT --source "$SUBNET" --jump DROP --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 332 | log_error "Unexpected error while setting host blocking rules: $RESULT" 333 | return 1 334 | else 335 | log "Added rule: --table filter --insert TRAFFICJAM_INPUT --source $SUBNET --jump DROP" 336 | fi 337 | 338 | #But allow them if the connection was initiated by the host 339 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM_INPUT --source "$SUBNET" --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 340 | log_error "Unexpected error while setting host blocking rules: $RESULT" 341 | return 1 342 | else 343 | log "Added rule: --table filter --insert TRAFFICJAM_INPUT --source $SUBNET --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN" 344 | fi 345 | } 346 | 347 | function report_local_whitelist_ips() { 348 | log "#WHITELIST_IPS#$WHITELIST_IPS $LOCAL_LOAD_BALANCER_IP" 349 | } 350 | 351 | function allow_local_load_balancer_traffic() { 352 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM --source "$LOCAL_LOAD_BALANCER_IP" --destination "$SUBNET" --jump RETURN --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 353 | log_error "Unexpected error while setting load balancer allow rule: $RESULT" 354 | return 1 355 | else 356 | log "Added rule: --table filter --insert TRAFFICJAM --source $LOCAL_LOAD_BALANCER_IP --destination $SUBNET --jump RETURN" 357 | fi 358 | } 359 | 360 | function allow_swarm_whitelist_traffic() { 361 | if [[ -n "$ALLOWED_SWARM_IPS" ]]; then 362 | for IP in $ALLOWED_SWARM_IPS; do 363 | if ! grep -q "$IP" <<< "$WHITELIST_IPS" && ! grep -q "$IP" <<< "$LOCAL_LOAD_BALANCER_IP"; then 364 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM --source "$IP" --destination "$SUBNET" --jump RETURN --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 365 | log_error "Unexpected error while setting allow swarm whitelist rule: $RESULT" 366 | return 1 367 | else 368 | log "Added rule: --table filter --insert TRAFFICJAM --source $IP --destination $SUBNET --jump RETURN" 369 | fi 370 | else 371 | log_debug "$IP is local; skipping in swarm whitelist rules" 372 | fi 373 | done 374 | fi 375 | } 376 | 377 | function allow_local_whitelist_traffic() { 378 | local IP 379 | local RESULT 380 | for IP in $WHITELIST_IPS; do 381 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM --source "$IP" --destination "$SUBNET" --jump RETURN --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 382 | log_error "Unexpected error while setting whitelist allow rule: $RESULT" 383 | return 1 384 | else 385 | log "Added rule: --table filter --insert TRAFFICJAM --source $IP --destination $SUBNET --jump RETURN" 386 | fi 387 | done 388 | if ! RESULT=$(iptables_tj --table filter --insert TRAFFICJAM --source "$SUBNET" --destination "$SUBNET" --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN --match comment --comment "trafficjam_$INSTANCE_ID $DATE" 2>&1); then 389 | log_error "Unexpected error while setting whitelist allow rule: $RESULT" 390 | return 1 391 | else 392 | log "Added rule: --table filter --insert TRAFFICJAM --source $SUBNET --destination $SUBNET --match conntrack --ctstate RELATED,ESTABLISHED --jump RETURN" 393 | fi 394 | } 395 | 396 | function remove_old_rules() { 397 | local RULENUMS 398 | local RESULT 399 | local RULES 400 | 401 | if ! RULES=$(iptables_tj --line-numbers --table filter --numeric --list "$1" 2>&1); then 402 | log_error "Could not get rules from chain '$1' for removal: $RULES" 403 | return 1 404 | fi 405 | #Make sure to reverse sort rule numbers othwerise the numbers change! 406 | if ! RULENUMS=$(echo "$RULES" | grep "trafficjam_$INSTANCE_ID" | grep -v "$DATE" | awk '{ print $1 }' | sort -nr); then 407 | log "No old rules to remove from chain '$1'" 408 | else 409 | for RULENUM in $RULENUMS; do 410 | RULE=$(iptables_tj --table filter --numeric --list "$1" "$RULENUM" 2> /dev/null) # Suppress warnings since its just logging 411 | if ! RESULT=$(iptables_tj --table filter --delete "$1" "$RULENUM" 2>&1); then 412 | log_error "Could not remove $1 rule \"$RULE\": $RESULT" 413 | else 414 | log "Removed $1 rule: $RULE" 415 | fi 416 | done 417 | fi 418 | } 419 | -------------------------------------------------------------------------------- /trafficjam.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -Eeuo pipefail 3 | 4 | if [[ "${1:-}" != "--clear" ]]; then 5 | if [[ -z "${NETWORK:-}" ]]; then 6 | echo "NETWORK is not set" >&2 7 | exit 1 8 | fi 9 | 10 | if [[ -z "${WHITELIST_FILTER:-}" ]]; then 11 | echo "WHITELIST_FILTER is not set" >&2 12 | exit 1 13 | fi 14 | fi 15 | 16 | #Initialize variables since we set -u 17 | : "${INSTANCE_ID:=DEFAULT}" 18 | : "${SWARM_DAEMON:=}" 19 | : "${SWARM_IMAGE:=kaysond/trafficjam}" 20 | : "${POLL_INTERVAL:=5}" 21 | : "${ALLOW_HOST_TRAFFIC:=}" 22 | : "${DEBUG:=}" 23 | : "${TZ:=}" 24 | : "${SWARM_WORKER:=}" 25 | NETNS="" 26 | OLD_SUBNET="" 27 | OLD_WHITELIST_IPS="" 28 | LOCAL_LOAD_BALANCER_IP="" 29 | OLD_LOCAL_LOAD_BALANCER_IP="" 30 | SERVICE_LOGS_SINCE="" 31 | : "${ALLOWED_SWARM_IPS:=}" 32 | OLD_ALLOWED_SWARM_IPS="" 33 | 34 | if [[ -n "$TZ" ]]; then 35 | cp /usr/share/zoneinfo/"$TZ" /etc/localtime && echo "$TZ" > /etc/timezone 36 | fi 37 | 38 | if [[ "$INSTANCE_ID" =~ [^a-zA-Z0-9_] ]]; then 39 | echo "INSTANCE_ID contains invalid characters" >&2 40 | exit 1 41 | fi 42 | 43 | . trafficjam-functions.sh 44 | 45 | trap tj_trap EXIT 46 | 47 | ERRCOUNT=0 48 | 49 | detect_iptables_version 50 | 51 | trap clear_rules SIGUSR1 52 | 53 | if [[ "${1:-}" == "--clear" ]]; then 54 | clear_rules 55 | fi 56 | 57 | if [[ -n "$SWARM_DAEMON" ]]; then 58 | remove_service 59 | 60 | while true; do 61 | tj_sleep 62 | 63 | deploy_service || continue 64 | 65 | get_allowed_swarm_ips || continue 66 | 67 | if [[ "$ALLOWED_SWARM_IPS" != "$OLD_ALLOWED_SWARM_IPS" ]]; then 68 | update_service || continue 69 | 70 | OLD_ALLOWED_SWARM_IPS="$ALLOWED_SWARM_IPS" 71 | fi 72 | 73 | ERRCOUNT=0 74 | done 75 | else 76 | while true; do 77 | tj_sleep 78 | 79 | if [[ -n "$SWARM_WORKER" ]]; then 80 | log_debug "Running in swarm mode" 81 | fi 82 | 83 | get_network_driver || continue 84 | 85 | get_network_subnet || continue 86 | 87 | get_whitelisted_container_ips || continue 88 | 89 | if [[ "$NETWORK_DRIVER" == "overlay" ]]; then 90 | get_netns || continue 91 | get_local_load_balancer_ip || continue 92 | fi 93 | 94 | DATE=$(date "+%Y-%m-%d %H:%M:%S") 95 | 96 | if [[ 97 | "$SUBNET" != "$OLD_SUBNET" || 98 | "$WHITELIST_IPS" != "$OLD_WHITELIST_IPS" || 99 | "$LOCAL_LOAD_BALANCER_IP" != "$OLD_LOCAL_LOAD_BALANCER_IP" ]] \ 100 | ; then 101 | 102 | if [[ -n "$SWARM_WORKER" && -z "$WHITELIST_IPS" && -z "$LOCAL_LOAD_BALANCER_IP" ]]; then 103 | log_debug "No loadbalancer or container running on this node, skipping" 104 | continue 105 | fi 106 | 107 | add_chain || continue 108 | 109 | block_subnet_traffic || continue 110 | 111 | if [[ -z "$ALLOW_HOST_TRAFFIC" ]]; then 112 | add_input_chain || continue 113 | block_host_traffic || continue 114 | fi 115 | 116 | if [[ "$NETWORK_DRIVER" == "overlay" ]]; then 117 | report_local_whitelist_ips || continue 118 | allow_local_load_balancer_traffic || continue 119 | allow_swarm_whitelist_traffic || continue 120 | fi 121 | 122 | allow_local_whitelist_traffic || continue 123 | 124 | remove_old_rules TRAFFICJAM || continue 125 | 126 | if [[ -z "$ALLOW_HOST_TRAFFIC" ]]; then 127 | remove_old_rules TRAFFICJAM_INPUT || continue 128 | fi 129 | 130 | OLD_SUBNET="$SUBNET" 131 | OLD_WHITELIST_IPS="$WHITELIST_IPS" 132 | OLD_LOCAL_LOAD_BALANCER_IP="$LOCAL_LOAD_BALANCER_IP" 133 | fi 134 | 135 | ERRCOUNT=0 136 | done 137 | fi 138 | --------------------------------------------------------------------------------