├── .gitignore ├── psql-app ├── upgrade │ ├── upgrade_2.2.0.sh │ ├── upgrade_2.2.1.sh │ ├── upgrade_2.2.2.sh │ ├── upgrade_2.1.0.sh │ ├── upgrade_2.1.0.sql │ ├── upgrade_2.2.2.sql │ ├── upgrade_2.2.1.sql │ └── upgrade_2.2.0.sql ├── Dockerfile ├── README.md └── scripts │ └── run ├── dev-image ├── README.md └── Dockerfile ├── postgres ├── scripts │ ├── 005_obmp_init.sh │ └── 004_obmp_psql_cfg.sh ├── Dockerfile └── README.md ├── collector ├── scripts │ ├── install │ └── run ├── Dockerfile └── README.md ├── LICENSE ├── whois └── Dockerfile ├── README.md └── docker-compose.yml /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.log 3 | 4 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Upgrade script for L3VPN 3 | # 4 | # Copyright (c) 2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | 9 | source /usr/local/openbmp/pg_profile 10 | 11 | echo "==> Upgrading to 2.2.0 SQL ==================================== " 12 | psql < /tmp/upgrade/upgrade_2.2.0.sql 13 | echo "==> Done upgrading to 2.2.0 SQL ================================== " 14 | 15 | 16 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Upgrade script for L3VPN 3 | # 4 | # Copyright (c) 2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | 9 | source /usr/local/openbmp/pg_profile 10 | 11 | echo "==> Upgrading to 2.2.1 SQL ==================================== " 12 | psql < /tmp/upgrade/upgrade_2.2.1.sql 13 | echo "==> Done upgrading to 2.2.1 SQL ================================== " 14 | 15 | 16 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Upgrade script for L3VPN 3 | # 4 | # Copyright (c) 2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | 9 | source /usr/local/openbmp/pg_profile 10 | 11 | echo "==> Upgrading to 2.2.2 SQL ==================================== " 12 | psql < /tmp/upgrade/upgrade_2.2.2.sql 13 | echo "==> Done upgrading to 2.2.2 SQL ================================== " 14 | 15 | 16 | -------------------------------------------------------------------------------- /dev-image/README.md: -------------------------------------------------------------------------------- 1 | OpenBMP Development Image 2 | ---------------------------- 3 | This image is the base development image used to build all the OpenBMP 4 | components. It has all the needed dependencies included. 5 | 6 | ### Build Image 7 | 8 | ``` 9 | docker build -t openbmp/dev-image:build-NNN . 10 | ``` 11 | 12 | ### Publish Image to dockerhub 13 | 14 | ``` 15 | # Login to docker 16 | docker login 17 | 18 | # Tag the image 19 | docker tag openbmp/dev-image:build-NNN openbmp/dev-image:latest 20 | 21 | # Upload the image 22 | docker push openbmp/dev-image:build-NNN 23 | docker push openbmp/dev-image:latest 24 | ``` 25 | 26 | ### Running 27 | 28 | ``` 29 | docker run --rm -v $(PWD):/ws -it openbmp/dev-image /bin/bash 30 | ``` -------------------------------------------------------------------------------- /postgres/scripts/005_obmp_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # postgres: Init script 4 | # 5 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 6 | # 7 | 8 | # >> NOTE, before adding extensions, required preload/config should be done first in 004_obmp_psql_cfg.sh 9 | 10 | # Add extensions 11 | psql -U $POSTGRES_USER -c "CREATE EXTENSION IF NOT EXISTS postgis CASCADE;" $POSTGRES_DB 12 | psql -U $POSTGRES_USER -c "CREATE EXTENSION IF NOT EXISTS pgrouting CASCADE;" $POSTGRES_DB 13 | 14 | # Add cron extension and config 15 | psql -U $POSTGRES_USER -c "CREATE EXTENSION IF NOT EXISTS pg_cron;" $POSTGRES_DB 16 | psql -U $POSTGRES_USER -c "GRANT USAGE ON SCHEMA cron TO $POSTGRES_USER;" $POSTGRES_DB 17 | 18 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.1.0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Upgrade script for L3VPN 3 | # 4 | # Copyright (c) 2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | 9 | source /usr/local/openbmp/pg_profile 10 | 11 | psql -c "select * from l3vpn_rib limit 1" > /dev/null 2>&1 12 | 13 | if [[ $? -ne 0 ]]; then 14 | echo "==> Upgrading L3VPN SQL ======================================= " 15 | psql < /usr/local/openbmp/database/10_l3vpn.sql 16 | echo "==> Done upgrading L3VPN SQL ================================== " 17 | 18 | echo "==> Upgrading to 2.1.0 SQL ==================================== " 19 | psql < /tmp/upgrade/upgrade_2.1.0.sql 20 | echo "==> Done upgrading to 2.1.0 SQL ================================== " 21 | fi 22 | 23 | 24 | -------------------------------------------------------------------------------- /collector/scripts/install: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Collector install script 3 | # 4 | # Copyright (c) 2021 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | 8 | # Add build details 9 | touch /usr/local/version-${VERSION} 10 | 11 | # 12 | # Defaults 13 | # 14 | 15 | 16 | # Disable interactive 17 | export DEBIAN_FRONTEND=noninteractive 18 | 19 | # Install base packages 20 | apt-get update 21 | 22 | # Fix ubuntu docker install 23 | #sed -i 's/exit 101/exit 0/' /usr/sbin/policy-rc.d 24 | 25 | # General depend install 26 | apt-get install -y iproute2 wget zlib1g libssl1.1 libsasl2-2 27 | 28 | # -- 29 | # -- Add host entries for reverse PTR lookups 30 | # -- 31 | if [[ -f /config/hosts ]]; then 32 | cat /config/hosts >> /etc/hosts 33 | fi 34 | 35 | # -- 36 | # -- Clean up 37 | # -- 38 | apt-get clean 39 | rm -rf /var/lib/apt/lists/* /var/tmp/* 40 | rm -f /tmp/install 41 | -------------------------------------------------------------------------------- /postgres/scripts/004_obmp_psql_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # OpenBMP Postgres configuration 4 | # 5 | # Copyright (c) 2022 Cisco Systems, Inc. and Tim Evens. All rights reserved. 6 | # 7 | 8 | 9 | # Create SSL cert 10 | openssl req -x509 -newkey rsa:4096 -nodes -subj "/C=US/ST=CA/L=Seattle/O=OpenBMP/CN=localhost" \ 11 | -keyout $PGDATA/psql_server.key -out $PGDATA/psql_server.crt -days 2048 \ 12 | 13 | # Init timeseries location 14 | mkdir -p $PGDATA_TS 15 | chmod 0700 $PGDATA_TS 16 | psql -U $POSTGRES_USER -c "CREATE TABLESPACE timeseries LOCATION '$PGDATA_TS';" $POSTGRES_DB 17 | 18 | # Update postgres conf 19 | sed -i -e "s/^\#*listen_addresses.*=.*/listen_addresses = '*'/" $PGDATA/postgresql.conf 20 | sed -i -e "s/^\#*ssl[ ]*=.*/ssl = on/" $PGDATA/postgresql.conf 21 | sed -i -e "s/^\#*ssl_cert_file.*=.*/ssl_cert_file = '${PGDATA//\//\\\/}\/psql_server.crt'/" $PGDATA/postgresql.conf 22 | sed -i -e "s/^\#*ssl_key_file.*=.*/ssl_key_file = '${PGDATA//\//\\\/}\/psql_server.key'/" $PGDATA/postgresql.conf 23 | 24 | sed -i -e "s/^shared_preload_libraries.*/shared_preload_libraries = 'timescaledb,pg_cron'/g" $PGDATA/postgresql.conf 25 | 26 | echo "cron.database_name = 'openbmp'" >> $PGDATA/postgresql.conf 27 | 28 | egrep -q -e '^hostssl( |\t)+all' $PGDATA/pg_hba.conf 29 | if [[ $? ]]; then 30 | echo 'hostssl all all 0.0.0.0/0 md5' >> $PGDATA/pg_hba.conf 31 | fi 32 | 33 | 34 | pg_ctl -D "$PGDATA" -m fast -w restart -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2021 Cisco Systems, Inc. 2 | 3 | Redistribution and use in source and binary forms, with or without modification, 4 | are permitted provided that the following conditions are met: 5 | 6 | 1. Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | 2. Redistributions in binary form must reproduce the above copyright notice, this 10 | list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | 3. Neither the name of the copyright holder nor the names of its contributors 14 | may be used to endorse or promote products derived from this software 15 | without specific prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 | IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 21 | INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 | BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 24 | OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 25 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 26 | EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- /whois/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3 2 | # Postgres Backend: openbmp/whois 3 | # 4 | # Copyright (c) 2022 Cisco Systems, Inc. and others. All rights reserved. 5 | # 6 | # BUILD: 7 | # tar -c -C ../../ ./obmp-whois ./obmp-docker/whois \ 8 | # | docker buildx build --platform linux/amd64 --progress=plain \ 9 | # -f obmp-docker/whois/Dockerfile -t openbmp/whois:2.2.0 - 10 | 11 | # ----------------------------------------------- 12 | # stage: Build 13 | # ----------------------------------------------- 14 | FROM golang:alpine AS build 15 | 16 | WORKDIR /ws 17 | 18 | COPY obmp-whois/ /ws 19 | 20 | ENV GO111MODULE=on 21 | ENV GCGO_ENABLED=0 22 | ENV GOARCH=amd64 23 | ENV GOOS=linux 24 | 25 | RUN apk add --no-cache bash git 26 | RUN cd /ws && go mod tidy 27 | 28 | # Build the app 29 | RUN cd /ws && go build -o obmp-whoisd . 30 | 31 | 32 | # ----------------------------------------------- 33 | # stage: Final container 34 | # ----------------------------------------------- 35 | FROM alpine 36 | 37 | COPY --from=build /ws/obmp-whoisd /usr/local/bin/ 38 | 39 | WORKDIR /tmp 40 | 41 | # Environment varialbes that should be set when running container 42 | # Defaults are given below 43 | ENV DB_MAX_THREADS=10 44 | ENV WHOIS_PORT=43 45 | 46 | ENV PGHOST=obmp-psql 47 | ENV PGDATABASE=openbmp 48 | ENV PGUSER=openbmp 49 | ENV PGPASSWORD=openbmp 50 | ENV PGPORT=5432 51 | 52 | EXPOSE 43 53 | 54 | # -- general depends 55 | RUN apk add --no-cache bash 56 | 57 | # -- set entry point to bash and act as login (load profile) 58 | ENTRYPOINT /usr/local/bin/obmp-whoisd --logfile /dev/stdout -p $WHOIS_PORT -t $DB_MAX_THREADS -------------------------------------------------------------------------------- /postgres/Dockerfile: -------------------------------------------------------------------------------- 1 | # Postgres Container 2 | # 3 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and others. All rights reserved. 4 | # 5 | # Build: 6 | # DOCKER_BUILDKIT=1 docker build --platform linux/amd64 \ 7 | # --build-arg VERSION=2.2.1 \ 8 | # --squash \ 9 | # -t openbmp/postgres:2.2.1 . 10 | # 11 | # Run: 12 | # docker run --rm -it -p 5432:5432 \ 13 | # -e POSTGRES_PASSWORD=openbmp \ 14 | # -e POSTGRES_USER=openbmp \ 15 | # -e POSTGRES_DB=openbmp \ 16 | # openbmp/postgres:2.2.1 17 | 18 | # ----------------------------------------------- 19 | # stage: Build 20 | # ----------------------------------------------- 21 | 22 | 23 | # ----------------------------------------------- 24 | # stage: Final container 25 | # ----------------------------------------------- 26 | FROM timescale/timescaledb-ha:pg14-ts2.8-latest 27 | 28 | ARG VERSION=0.0.0 29 | 30 | ENV PGDATA=/var/lib/postgresql/data 31 | ENV PGDATA_TS=/var/lib/postgresql/ts/data 32 | 33 | # Expected data locations for base tables and timeseries 34 | # 35 | VOLUME ["/var/lib/postgresql/data"] 36 | VOLUME ["/var/lib/postgresql/ts"] 37 | 38 | ADD --chmod=755 scripts/004_obmp_psql_cfg.sh /docker-entrypoint-initdb.d/004_obmp_psql_cfg.sh 39 | ADD --chmod=755 scripts/005_obmp_init.sh /docker-entrypoint-initdb.d/005_obmp_init.sh 40 | 41 | USER root 42 | 43 | RUN rm -rf /usr/lib/postgresql/12 /usr/lib/postgresql/13 \ 44 | && mkdir -p /var/lib/postgresql/data /var/lib/postgresql/ts/data \ 45 | && chown -R postgres /var/lib/postgresql/data /var/lib/postgresql/ts/data \ 46 | && mkdir -p /usr/local/openbmp \ 47 | && touch /usr/local/openbmp/version-${VERSION} 48 | 49 | USER postgres 50 | -------------------------------------------------------------------------------- /dev-image/Dockerfile: -------------------------------------------------------------------------------- 1 | # Development build container: openbmp/dev-image 2 | # 3 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and others. All rights reserved. 4 | # 5 | # Container used to build the OpenBMP components 6 | # 7 | # Author: Tim Evens 8 | # 9 | # BUILD: docker buildx build --platform linux/amd64 --progress=plain -t openbmp/dev-image:latest . 10 | 11 | FROM debian:stable-slim AS build 12 | 13 | ENV DEBIAN_FRONTEND=noninteractive 14 | 15 | WORKDIR /ws 16 | 17 | # Install the various depends 18 | RUN apt-get update 19 | RUN apt-get install -y openjdk-17-jdk-headless maven 20 | RUN mkdir -p /usr/share/man/man1/ \ 21 | && apt-get -y install git gcc g++ libboost-dev cmake libssl-dev libsasl2-dev \ 22 | curl wget libgss-dev liblz4-dev libzstd-dev 23 | # zlib1g-dev 24 | 25 | # Build/install zlib - zlib1g-dev does not work for static builds of librdkafka 26 | RUN cd /tmp && git clone https://github.com/madler/zlib.git \ 27 | && cd zlib \ 28 | && git checkout v1.2.12 \ 29 | && CFLAGS=-fPIC ./configure --static \ 30 | && make install 31 | 32 | # Build/install librdkafka 33 | RUN cd /tmp && git clone https://github.com/edenhill/librdkafka.git \ 34 | && cd librdkafka \ 35 | && git checkout v1.9.2 \ 36 | && ./configure --enable-static --disable-curl \ 37 | && make \ 38 | && make install 39 | 40 | # Installed under /usr/local/lib 41 | 42 | # Build/install yaml-cpp 43 | RUN cd /tmp && git clone https://github.com/jbeder/yaml-cpp.git \ 44 | && cd yaml-cpp \ 45 | && git checkout yaml-cpp-0.7.0 \ 46 | && mkdir build && cd build \ 47 | && cmake -DYAML_BUILD_SHARED_LIB=OFF .. \ 48 | && make && make install \ 49 | && cd /tmp 50 | 51 | # Clean up 52 | RUN rm -rf /tmp/* && apt-get clean 53 | 54 | -------------------------------------------------------------------------------- /collector/scripts/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # All-in-One run script 3 | # 4 | # Copyright (c) 2021 Cisco Systems, Inc. and Tim Evens. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | ADMIN_ID=${ADMIN_ID:="collector"} 9 | 10 | DOCKER_HOST_IP=$(ip route | grep default | head -1 | awk '{ print $3}') 11 | 12 | if [[ ${KAFKA_FQDN:-""} == "" ]]; then 13 | echo "ERROR: Missing ENV KAFKA_FQDN. Cannot proceed until you add that in docker run -e KAFKA_FQDN=<...>" 14 | exit 1 15 | else 16 | if [[ ${KAFKA_FQDN} == "localhost" ]]; then 17 | KAFKA_FQDN="docker-localhost" 18 | 19 | elif [[ ${KAFKA_FQDN} == "127.0.0.1" ]]; then 20 | KAFKA_FQDN="docker-localhost" 21 | 22 | elif [[ ${KAFKA_FQDN} == "::1" ]]; then 23 | KAFKA_FQDN="docker-localhost" 24 | fi 25 | fi 26 | 27 | 28 | if [[ -f /config/openbmpd ]]; then 29 | source /config/openbmpd 30 | else 31 | source /etc/default/openbmpd 32 | fi 33 | 34 | # 35 | # System info 36 | # 37 | if [[ ${MEM:-""} = "" ]]; then 38 | SYS_TOTAL_MEM=$(grep MemTotal /proc/meminfo | awk '{print int($2 / 1000)}') 39 | else 40 | SYS_TOTAL_MEM=$(($MEM * 1024)) 41 | fi 42 | 43 | SYS_NUM_CPU=$(grep processor /proc/cpuinfo | wc -l) 44 | 45 | # Update the hosts file 46 | echo "$DOCKER_HOST_IP docker-localhost" >> /etc/hosts 47 | 48 | # Update the etc hosts file 49 | if [[ -f /config/hosts ]]; then 50 | cat /config/hosts >> /etc/hosts 51 | fi 52 | 53 | 54 | # Update openbmpd config file 55 | OPENBMP_CFG_FILE=/usr/etc/openbmp/openbmpd.conf 56 | sed -r -i "s/admin_id:.*/admin_id: ${ADMIN_ID}/" /usr/etc/openbmp/openbmpd.conf 57 | sed -r -i "s/localhost:9092/${KAFKA_FQDN}/" /usr/etc/openbmp/openbmpd.conf 58 | 59 | if [[ -f /config/openbmpd.conf ]]; then 60 | OPENBMP_CFG_FILE=/config/openbmpd.conf 61 | fi 62 | 63 | # Startup delay to allow for Kafka to start if not already running 64 | echo "Waiting 30 seconds to allow for Kafka and other containers to startup." 65 | sleep 30 66 | 67 | # Start openbmpd and wait - openbmpd runs in foreground 68 | 69 | echo "Running openbmpd collector, see /var/log/openbmpd.log " 70 | /usr/bin/openbmpd -f -c ${OPENBMP_CFG_FILE} 71 | -------------------------------------------------------------------------------- /collector/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3 2 | # Collector: openbmp/collector 3 | # 4 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and others. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | # BUILD: 9 | # 10 | # Docker context does not support multiple paths or mounting volumes for builds. 11 | # In effort to build the container from local git clones, we dynamically build a context 12 | # 13 | # Clone the obmp-collector and obmp-docker repos into the same directory. 14 | # Change directories to obmp-docker/collector and run the below from that 15 | # directory. 16 | # 17 | # Use buildkit - either via export DOCKER_BUILDKIT=1 or by using buildx cli 18 | # 19 | # Example docker build: 20 | # tar -c -C ../../ ./obmp-collector ./obmp-docker/collector \ 21 | # | docker buildx build --platform linux/amd64 --progress=plain \ 22 | # --build-arg VERSION=2.2.3 \ 23 | # -f obmp-docker/collector/Dockerfile -t openbmp/collector:2.2.3 - 24 | # 25 | 26 | # ----------------------------------------------- 27 | # stage: Build collector 28 | # ----------------------------------------------- 29 | FROM openbmp/dev-image:latest AS build 30 | 31 | COPY obmp-collector/ /ws 32 | WORKDIR /ws 33 | 34 | RUN rm -rf build && mkdir -p build && cd build \ 35 | && cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX:PATH=/usr ../ \ 36 | && make \ 37 | && make install 38 | 39 | 40 | # ----------------------------------------------- 41 | # stage: Final container 42 | # ----------------------------------------------- 43 | # Pull base image. 44 | FROM debian:stable-slim 45 | 46 | # Add files. 47 | ADD --chmod=755 obmp-docker/collector/scripts/install /tmp/ 48 | ADD --chmod=755 obmp-docker/collector/scripts/run /usr/sbin/ 49 | 50 | ARG VERSION=0 51 | 52 | # Copy files from previous stages 53 | COPY --chmod=755 --from=build /usr/bin/openbmpd /usr/bin/ 54 | COPY --from=build /usr/etc/openbmp/openbmpd.conf /usr/etc/openbmp/openbmpd.conf 55 | 56 | # Proxy servers 57 | #ENV http_proxy http://proxy:80 58 | #ENV https_proxy http://proxy:80 59 | #ENV no_proxy "domain.com" 60 | 61 | # Run Install script 62 | RUN /tmp/install 63 | 64 | # Define mount points. 65 | VOLUME ["/config"] 66 | 67 | # Define working directory. 68 | WORKDIR /tmp 69 | 70 | # Define default command. 71 | CMD ["/usr/sbin/run"] 72 | 73 | # Expose ports. 74 | # openbmpd/collector 75 | EXPOSE 5000 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OpenBMP docker files 2 | Docker files for OpenBMP. 3 | 4 | ## (Prerequisite) Platform Docker Install 5 | 6 | > Ignore this step if you already have a current docker install 7 | 8 | > **NOTE** 9 | > You should use the latest docker version, documented in this section. 10 | 11 | Follow the instructions on https://docs.docker.com/get-docker/ 12 | 13 | ### Optionally add a non-root user to run docker as 14 | usermod -aG docker ubuntu 15 | 16 | # Logout and log back so the group takes affect. 17 | 18 | 19 | ### Optionally configure **/etc/default/docker** (e.g. for proxy config) 20 | 21 | export http_proxy="http://proxy:80/" 22 | export https_proxy="http://proxy:80/" 23 | export no_proxy="127.0.0.1,openbmp.org,/var/run/docker.sock" 24 | 25 | Make sure you can run '**docker run hello-world**' successfully. 26 | 27 | 28 | ## OpenBMP Docker Files 29 | Each docker file contains a readme file, see below: 30 | 31 | * [Collector](collector/README.md) 32 | * [PostgreSQL](postgres/README.md) 33 | * [PSQL Consumer](psql-app/README.md) 34 | 35 | 36 | ## Using Docker Compose to run everything 37 | 38 | ### Install Docker Compose 39 | You will need docker-compose. You can install that via [Docker Compose](https://docs.docker.com/compose/install/) 40 | instructions. Docker compose will run everything, including handling restarts of containers. 41 | 42 | #### (1) Mount/Make persistent directories 43 | Create expected directories. You can choose to mount these as well or update the compose file to change them. 44 | 45 | > **NOTE** 46 | > If you are using OSX/Mac, then you will need to update your docker preferences to allow ```/var/openbmp``` 47 | 48 | Make sure to create the **OBMP_DATA_ROOT** directory first. 49 | ``` 50 | export OBMP_DATA_ROOT=/var/openbmp 51 | sudo mkdir -p $OBMP_DATA_ROOT 52 | ``` 53 | 54 | Create sub directories 55 | ``` 56 | mkdir -p ${OBMP_DATA_ROOT}/config 57 | mkdir -p ${OBMP_DATA_ROOT}/kafka-data 58 | mkdir -p ${OBMP_DATA_ROOT}/zk-data 59 | mkdir -p ${OBMP_DATA_ROOT}/zk-log 60 | mkdir -p ${OBMP_DATA_ROOT}/postgres/data 61 | mkdir -p ${OBMP_DATA_ROOT}/postgres/ts 62 | mkdir -p ${OBMP_DATA_ROOT}/grafana 63 | mkdir -p ${OBMP_DATA_ROOT}/grafana/dashboards 64 | 65 | sudo chmod -R 7777 $OBMP_DATA_ROOT 66 | ``` 67 | 68 | > In order to init the DB tables, you must create the file ```${OBMP_DATA_ROOT}/config/init_db```. This should 69 | > only be done once or whenever you want to completely wipe out the DB and start over. 70 | 71 | Change ```OBMP_DATA_ROOT=``` to where you created the directories above. The default is ```/var/openbmp``` 72 | 73 | ``` 74 | OBMP_DATA_ROOT=/var/openbmp docker-compose -p obmp up -d 75 | ``` 76 | 77 | -------------------------------------------------------------------------------- /collector/README.md: -------------------------------------------------------------------------------- 1 | # OpenBMP Collector Container Image 2 | Collector is the container for collecting BMP messages from BMP senders, e.g. routers. 3 | 4 | ## Container Includes 5 | * The Latest collector (listening port is TCP 5000) 6 | 7 | ## Building the container 8 | See [Dockerfile] notes. 9 | 10 | ## Steps to Use the container 11 | 12 | ### 1) Install docker 13 | Follow the [Docker Instructions](https://docs.docker.com/installation/) to install docker. 14 | 15 | ### 2) Download the docker image 16 | 17 | docker pull openbmp/collector 18 | 19 | ### 3) [OPTIONAL] Add persistent configs 20 | 21 | mkdir -p /var/openbmp/config 22 | chmod 777 /var/openbmp/config 23 | 24 | #### config/hosts 25 | You can add custom host entries so that the collector will reverse lookup IP addresses 26 | using a persistent hosts file. 27 | 28 | Run docker with ```-v /var/openbmp/config:/config``` to make use of the persistent config files. 29 | 30 | #### config/openbmpd.conf 31 | You can provide a customized **openbmpd.conf**. See [Config Example](https://github.com/OpenBMP/obmp-collector/blob/main/Server/openbmpd.conf) 32 | 33 | ### 4) Run docker container 34 | 35 | #### Environment Variables 36 | Below table lists the environment variables that can be used with ``docker run -e `` 37 | 38 | NAME | Value | Details 39 | :---- | ----- |: ------- 40 | KAFKA\_FQDN | hostanme or IP | Kafka broker hostname[:port]. Hostname can be an IP address 41 | OPENBMP\_ADMIN\_ID | name or IP | Name or IP of the collector, default is the docker hostname 42 | OPENBMP\_BUFFER | Size in MB | Defines the openbmpd buffer per router for BMP messages. Default is 16 MB. 43 | 44 | #### Run normally 45 | 46 | > ##### IMPORTANT 47 | > You must define the **KAFKA_FQDN** as a 'hostname'. If all containers are running on the same node, this 48 | > hostname can be local specific, such as 'localhost' or 'myhost'. If Kafka is running on a different server, 49 | > than the consumers and producers, then the KAFKA_FQDN should be a valid hostname that can be resolved using DNS. 50 | > This can be internal DNS or manually done by updating the /etc/hosts file on each machine. 51 | 52 | docker run -d --name=obmp_collector -e KAFKA_FQDN=localhost \ 53 | --sysctl net.ipv4.tcp_keepalive_intvl=30 \ 54 | --sysctl net.ipv4.tcp_keepalive_probes=5 \ 55 | --sysctl net.ipv4.tcp_keepalive_time=180 \ 56 | -v /var/openbmp/config:/config \ 57 | -p 5000:5000 \ 58 | openbmp/collector 59 | 60 | 61 | ### Monitoring/Troubleshooting 62 | 63 | You can use standard docker exec commands to monitor the log files. To monitor 64 | openbmp, use ```docker exec obmp_collector tail -f /var/log/openbmpd.log``` 65 | 66 | Alternatively, it can be easier at times to navigate all the log files from within the container. You can do so using: 67 | 68 | docker exec -it obmp_collector bash 69 | 70 | 71 | #### docker logs 72 | You can use ```docker logs obmp_collector``` to get the console logs. This is useful if the container exits due to 73 | invalid start or for another reason. 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /psql-app/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3 2 | # Postgres Backend: openbmp/psql-app 3 | # 4 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and others. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | # Docker context does not support multiple paths or mounting volumes for builds. 9 | # In effort to build the container from local git clones, we dynamically build a context 10 | # 11 | # Clone the obmp-psql, obmp-java-api-message, and obmp-docker repos into the same directory. 12 | # Change directories to obmp-docker/psql-app and run the below from that 13 | # directory. 14 | # 15 | # Use buildkit - either via export DOCKER_BUILDKIT=1 or by using buildx cli 16 | # 17 | # Example docker build: 18 | # tar -c -C ../../ ./obmp-psql ./obmp-docker/psql-app ./obmp-java-api-message \ 19 | # | docker buildx build --platform linux/amd64 --progress=plain \ 20 | # --build-arg VERSION=2.2.2 \ 21 | # -f obmp-docker/psql-app/Dockerfile -t openbmp/psql-app:2.2.2 - 22 | 23 | # ----------------------------------------------- 24 | # stage: Build 25 | # ----------------------------------------------- 26 | FROM openbmp/dev-image:latest AS build 27 | 28 | ARG VERSION=0.0.0 29 | 30 | # Proxy servers 31 | #ENV http_proxy http://proxy:80 32 | #ENV https_proxy http://proxy:80 33 | #ENV no_proxy "domain.com" 34 | 35 | COPY obmp-psql/ /ws 36 | COPY obmp-java-api-message/ /tmp/obmp-java-api-message 37 | WORKDIR /ws 38 | 39 | RUN cd /tmp/obmp-java-api-message \ 40 | && mvn clean install \ 41 | && cd /ws \ 42 | && mvn clean package 43 | 44 | # ----------------------------------------------- 45 | # stage: Final container 46 | # ----------------------------------------------- 47 | FROM openjdk:17-slim 48 | 49 | # Copy files from previous stages 50 | COPY --from=build /ws/target/obmp-psql-consumer-0.1.0-SNAPSHOT.jar /usr/local/openbmp/obmp-psql-consumer.jar 51 | COPY --from=build /ws/database/ /usr/local/openbmp/database 52 | COPY --chmod=755 --from=build /ws/cron_scripts/gen-whois/*.py /usr/local/openbmp/ 53 | COPY --chmod=755 --from=build /ws/cron_scripts/peeringdb/*.py /usr/local/openbmp/ 54 | COPY --chmod=755 --from=build /ws/cron_scripts/rpki/*.py /usr/local/openbmp/ 55 | COPY --chmod=755 --from=build /ws/scripts/geo-csv-to-psql.py /usr/local/openbmp/ 56 | COPY --chmod=755 --from=build /ws/scripts/db-ip-import.sh /usr/local/openbmp/ 57 | 58 | # Add files 59 | ADD --chmod=755 obmp-docker/psql-app/scripts/run /usr/sbin/ 60 | COPY --chmod=755 obmp-docker/psql-app/upgrade /tmp/upgrade 61 | 62 | #---------------------------------- 63 | # Define persistent data volumes 64 | VOLUME ["/config"] 65 | 66 | #---------------------------------- 67 | # Expose ports. 68 | 69 | # Consumer JMX console 70 | EXPOSE 9005 71 | 72 | #---------------------------------- 73 | # Define working directory. 74 | WORKDIR /tmp 75 | 76 | # Base setup tasks 77 | RUN touch /usr/local/version-${VERSION} \ 78 | && chmod 755 /usr/local/openbmp/*.py 79 | 80 | #---------------------------------- 81 | # Install depends 82 | RUN apt-get update \ 83 | && apt-get install --allow-unauthenticated -y unzip curl wget whois vim rsyslog cron rsync kafkacat \ 84 | procps python3-minimal python3-distutils python3-psycopg2 python3-dnspython postgresql-client \ 85 | && ln -s /usr/bin/python3 /usr/bin/python 86 | 87 | RUN cd /tmp && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \ 88 | && python3 get-pip.py 89 | 90 | RUN pip install ipaddr pykafka click netaddr 91 | 92 | RUN pip3 install urllib3 requests 93 | 94 | # Cleanup 95 | RUN apt-get autoremove && apt-get clean 96 | 97 | # Define default command. 98 | CMD ["/usr/sbin/run"] 99 | 100 | -------------------------------------------------------------------------------- /postgres/README.md: -------------------------------------------------------------------------------- 1 | # OpenBMP Postgres 2 | The postgres container is a plain postgres/timescaleDB container with 3 | some modifications to support OpenBMP. Any postgres install will work as long as 4 | they have similar changes as shown in [Dockerfile](Dockerfile). 5 | 6 | ## Building 7 | See the [Dockerfile](Dockerfile) notes for build instructions. 8 | 9 | ## Running 10 | ``` 11 | docker run --rm -it -p 5432:5432 \ 12 | -e POSTGRES_PASSWORD=openbmp \ 13 | -e POSTGRES_USER=openbmp \ 14 | -e POSTGRES_DB=openbmp \ 15 | openbmp/postgres: 16 | ``` 17 | 18 | ### Configuration/Environment Variables 19 | See both [Postgres](https://hub.docker.com/_/postgres) and 20 | [TimescaleDB](https://hub.docker.com/r/timescale/timescaledb) documentation for more 21 | information on how to configure/run the docker container. 22 | 23 | ### PostgreSQL Related 24 | 25 | #### Postgres can be killed by the Linux OOM-Killer 26 | This is very bad as it causes Postgres to restart. This will happen because postgres uses a large shared buffer, 27 | which causes the OOM to believe it's using a lot of VM. 28 | 29 | It is suggested to run the postgres server with the following Linux settings: 30 | 31 | # Update runtime 32 | sysctl -w vm.vfs_cache_pressure=500 33 | sysctl -w vm.swappiness=10 34 | sysctl -w vm.min_free_kbytes=1000000 35 | sysctl -w vm.overcommit_memory=2 36 | sysctl -w vm.overcommit_ratio=95 37 | 38 | # Update startup 39 | echo "vm.vfs_cache_pressure=500" >> /etc/sysctl.conf 40 | echo "vm.min_free_kbytes=1000000" >> /etc/sysctl.conf 41 | echo "vm.swappiness=10" >> /etc/sysctl.conf 42 | echo "vm.overcommit_memory=2" >> /etc/sysctl.conf 43 | echo "vm.overcommit_ratio=95" >> /etc/sysctl.conf 44 | 45 | 46 | See Postgres [hugepages](https://www.postgresql.org/docs/current/static/kernel-resources.html#LINUX-HUGE-PAGES) for 47 | details on how to enable and use hugepages. Some Linux distributions enable **transparent hugepages** which 48 | will prevent the ability to configure ```vm.nr_hugepages```. If you find that you cannot set ```vm.nr_hugepages```, 49 | then try the below: 50 | 51 | echo never > /sys/kernel/mm/transparent_hugepage/enabled 52 | echo never > /sys/kernel/mm/transparent_hugepage/defrag 53 | sync && echo 3 > /proc/sys/vm/drop_caches 54 | 55 | 56 | #### Postgres Vacuum (reclaim disk space) 57 | Postgres reclaims deleted/updated records using the vacuum process. You can run this manually/cron via the 58 | ```VACUUM``` command. **autovacuum** is used to do this periodically. Careful tuning of this 59 | is required. Checkout [autovacuum-tuning-basics](https://blog.2ndquadrant.com/autovacuum-tuning-basics/), 60 | [Routine Vacuuming](https://www.postgresql.org/docs/current/static/routine-vacuuming.html), and 61 | [VACUUM](https://www.postgresql.org/docs/current/static/sql-vacuum.html) for more details. 62 | 63 | #### Create persistent postgres locations 64 | 65 | *You should use fast SSD and/or ZFS.* Size of these locations/mount points are directly related to the 66 | number of NLRI's maintained and number of changes/updates per second. 67 | 68 | > TODO: Will post numbers of how to determine the disk size needed. For now, if you have less 69 | > than 50,000,00 prefixes, then you can use 1TB. If you have more than that, you should consider 70 | > multiple disks. ZFS can make your life easier as you can easily add disks and it supports compression. 71 | 72 | - **postgres/main** - This location will be used for the main postgres data 73 | files and tables. 74 | 75 | > This really should be a mount point to a dedicated filesystem 76 | 77 | ``` 78 | mkdir -p /var/openbmp/postgres/main 79 | chmod 7777 /var/openbmp/postgres/main 80 | ``` 81 | 82 | - **postgres/ts** - This location will be used for the time series postgres tables 83 | 84 | > This really should be a mount point to a dedicated filesystem 85 | 86 | ``` 87 | mkdir -p /var/openbmp/postgres/ts 88 | chmod 7777 /var/openbmp/postgres/ts 89 | ``` 90 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.1.0.sql: -------------------------------------------------------------------------------- 1 | -- ----------------------------------------------------------------------- 2 | -- Copyright (c) 2022 Cisco Systems, Inc. and others. All rights reserved. 3 | -- 4 | -- Ugrade from 2.0.3 to 2.1.0 changes 5 | -- ----------------------------------------------------------------------- 6 | ALTER TABLE base_attrs SET (autovacuum_vacuum_cost_limit = 1000); 7 | ALTER TABLE base_attrs SET (autovacuum_vacuum_cost_delay = 5); 8 | 9 | alter table global_ip_rib 10 | add column advertising_peers int DEFAULT 0, 11 | add column withdrawn_peers int DEFAULT 0; 12 | 13 | CREATE OR REPLACE FUNCTION update_global_ip_rib( 14 | int_time interval DEFAULT '15 minutes' 15 | ) 16 | RETURNS void AS $$ 17 | DECLARE 18 | execution_start timestamptz := clock_timestamp(); 19 | insert_count int; 20 | BEGIN 21 | raise INFO 'Start time : %', now(); 22 | raise INFO 'Interval Time : %', int_time; 23 | raise INFO '-> Inserting rows in global_ip_rib ...'; 24 | 25 | lock table global_ip_rib IN SHARE ROW EXCLUSIVE MODE; 26 | 27 | -- Load changed prefixes only - First time will load every prefix. Expect in that case it'll take a little while. 28 | INSERT INTO global_ip_rib (prefix,prefix_len,recv_origin_as, 29 | iswithdrawn,timestamp,first_added_timestamp,num_peers,advertising_peers,withdrawn_peers) 30 | SELECT r.prefix, 31 | max(r.prefix_len), 32 | r.origin_as, 33 | bool_and(r.iswithdrawn) as isWithdrawn, 34 | max(r.timestamp), 35 | min(r.first_added_timestamp), 36 | count(distinct r.peer_hash_id) as total_peers, 37 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = False) as advertising_peers, 38 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = True) as withdrawn_peers 39 | FROM ip_rib r 40 | WHERE prefix in ( 41 | SELECT prefix 42 | FROM ip_rib_log 43 | WHERE 44 | timestamp >= now() - int_time AND 45 | origin_as != 23456 46 | GROUP BY prefix 47 | ) 48 | GROUP BY r.prefix, r.origin_as 49 | ON CONFLICT (prefix,recv_origin_as) 50 | DO UPDATE SET timestamp=excluded.timestamp, 51 | first_added_timestamp=excluded.timestamp, 52 | iswithdrawn=excluded.iswithdrawn, 53 | num_peers=excluded.num_peers, 54 | advertising_peers=excluded.advertising_peers, 55 | withdrawn_peers=excluded.withdrawn_peers; 56 | 57 | GET DIAGNOSTICS insert_count = row_count; 58 | raise INFO 'Rows updated : %', insert_count; 59 | raise INFO 'Execution time : %', clock_timestamp() - execution_start; 60 | raise INFO 'Completion time: %', now(); 61 | 62 | -- Update IRR 63 | raise INFO '-> Updating IRR info'; 64 | UPDATE global_ip_rib r SET 65 | irr_origin_as=i.origin_as, 66 | irr_source=i.source, 67 | irr_descr=i.descr 68 | FROM info_route i 69 | WHERE r.timestamp >= now() - (int_time * 3) and i.prefix = r.prefix; 70 | 71 | -- Update RPKI entries - Limit query to only update what has changed in interval time 72 | -- NOTE: The global_ip_rib table should have current times when first run (new table). 73 | -- This will result in this query taking a while. After first run, it shouldn't take 74 | -- as long. 75 | raise INFO '-> Updating RPKI info'; 76 | UPDATE global_ip_rib r SET rpki_origin_as=p.origin_as 77 | FROM rpki_validator p 78 | WHERE r.timestamp >= now() - (int_time * 3) 79 | AND p.prefix >>= r.prefix 80 | AND r.prefix_len >= p.prefix_len 81 | AND r.prefix_len <= p.prefix_len_max; 82 | 83 | END; 84 | $$ LANGUAGE plpgsql; 85 | 86 | drop index IF EXISTS global_ip_rib_iswithdrawn_timestamp_idx; 87 | CREATE INDEX ON global_ip_rib (iswithdrawn,timestamp DESC); 88 | 89 | drop index IF EXISTS ip_rib_timestamp_idx; 90 | CREATE INDEX ON ip_rib (timestamp DESC); 91 | 92 | drop index IF EXISTS global_ip_rib_iswithdrawn_idx; 93 | drop index IF EXISTS global_ip_rib_timestamp_idx; 94 | drop index IF EXISTS global_ip_rib_timestamp_prefix_idx; 95 | 96 | CREATE INDEX ON global_ip_rib (timestamp DESC); 97 | 98 | -------------------------------------------------------------------------------- /psql-app/README.md: -------------------------------------------------------------------------------- 1 | # OpenBMP Postgres Application Container 2 | This container is the main application container for OpenBMP and PostgreSQL. 3 | 4 | It provides: 5 | 6 | * PostgreSQL consumer 7 | * RPKI validator improt/sync 8 | * IRR and peering DB import/sync 9 | * Schedules and runs the metric DB functions 10 | * Schedules and runs the DB timescale DB chunk drops 11 | 12 | ## Building 13 | See the [Dockerfile](Dockerfile) notes for build instructions. 14 | 15 | ## Running 16 | 17 | ### Kafka Validation Testing 18 | The Kafka setup can be tricky due to docker networking between containers and remote systems. Kafka clustering 19 | makes use of a bootstrap server which will advertise each broker ```hostname:port``` that the consumer/producer 20 | will use. Each consumer/producer will connect to the brokers using these **advertised** hostnames and ports. The 21 | setting in Kafka to configure the broker hostname is ```advertised.listeners```. 22 | 23 | The postgres container (**this container**) uses the **KAFKA_FQDN** as the bootstrap server, 24 | syntax is ``````. This will work with an 25 | IP or hostname. When using a hostname, the hostname *MUST* resolve within the container. While this may work for 26 | bootstrap server conection, the advertised hostnames need to also resolve in the container. 27 | 28 | **Kafka Validation is a 3 step process** 29 | 30 | 1. Successfully connect to the bootstrap server and retrieve metadata (e.g. broker hostname:port) 31 | 2. Successfully produce a test message to ```openbmp.parsed.test``` topic 32 | 3. Successfully consume a test message from ```openbmp.parsed.test``` topic 33 | 34 | > **IMPORTANT** 35 | > If using your own Kafka install, make sure you allow producing/consuming to/from **openbmp.parsed.test** 36 | > for the consumer validation. 37 | 38 | ### Hostnames in Container 39 | You can map the Kafka hostname and each broker if they are different using two methods: 40 | 41 | 1. add ```--add-host HOSTNAME:IP``` to **docker run** command. Make sure to add one for the bootstrap and each broker. 42 | 2. Create a **/var/openbmp/config/hosts** file and add the Kafka bootstrap and broker hostname to IP mappings. 43 | 44 | ### VM Specifications 45 | 46 | #### Storage 47 | 48 | You will need to dedicate space for the postgres instance. Normally two partitions are used. A good 49 | starting size for postgres main is 500GB and postgres ts (timescaleDB) is 1TB. Both disks 50 | should be fast SSD. ZFS can be used on either of them to add compression. The size you need will depend 51 | on the number of NLRI's and updates per second. 52 | 53 | #### Memory & CPU 54 | 55 | The size of memory will depend on the type of queries and number of NLRI's. A good starting point for 56 | memory is a server with more than 48GB RAM. You can run on as little as 4GB RAM but that will only 57 | scale to about 10,000,000 NLRI's. 64BG of RAM should scale to 150,000,000 NLRI's. 58 | 59 | The number of vCPU's also varies by the number of concurrent connections and how many threads you use for 60 | the postgres consumer. A good starting point is at least 8 vCPU's. 61 | 62 | 63 | ### 1) Install docker 64 | Follow the [Docker Instructions](https://docs.docker.com/install) to install docker CE. 65 | 66 | ### 2) Add persistent volumes 67 | 68 | Persistent volumes make it possible for upgrades without loosing any data. 69 | 70 | #### (a) Create persistent config location 71 | 72 | mkdir -p /var/openbmp/config 73 | chmod 777 /var/openbmp/config 74 | 75 | ##### config/hosts 76 | You can add custom host entries so that the collector will reverse lookup IP addresses 77 | using a persistent hosts file. 78 | 79 | Run docker with ```-v /var/openbmp/config:/config``` to make use of the persistent config files. 80 | 81 | ##### config/obmp-psql.yml 82 | If the [obmp-psql.yml](https://github.com/OpenBMP/obmp-postgres/blob/master/src/main/resources/obmp-psql.yml) file 83 | does not exist, a default one will be created. You should update this based on your settings. This file 84 | is inline documented. 85 | 86 | 87 | ### 3) Run docker container 88 | 89 | > Running the docker container for the first time will download the container image. 90 | 91 | #### Environment Variables 92 | Below table lists the environment variables that can be used with ``docker run -e `` 93 | 94 | NAME | Value | Details 95 | :---- | ----- |:------- 96 | KAFKA\_FQDN | hostanme or IP | Kafka broker hostname. Hostname can be an IP address. 97 | ENABLE_RPKI | 1 | Set to 1 to eanble RPKI. RPKI is disabled by default 98 | ENABLE_IRR | 1 | Set to 1 to enable IRR. IRR is disabled by default 99 | MEM | number | Number value in GB to allocate to Postgres. This will be the shared_buffers value. 100 | PGUSER | username | Postgres username, default is **openbmp** 101 | PGPASSWORD | password | Postgres password, default is **openbmp** 102 | PGDATABASE | database | Name of postgres database, default is **openbmp** 103 | 104 | #### Docker Run obmp-psql-app 105 | > **NOTE:** 106 | > If the container fails to start, it's likely due to the configuration. Check using 107 | > ```docker logs obmp-psql-app``` 108 | 109 | ``` 110 | docker run --rm -d --name obmp-psql-app \ 111 | -h obmp-psql-app \ 112 | -e ENABLE_RPKI=1 \ 113 | -e ENABLE_IRR=1 \ 114 | -e KAFKA_FQDN=kafka \ 115 | -e MEM=16 \ 116 | -v /var/openbmp/config:/config \ 117 | -p 9005:9005 -p 8080:8080 \ 118 | openbmp/psql-app:build-50 119 | ``` 120 | 121 | ### Monitoring/Troubleshooting 122 | 123 | Useful commands: 124 | 125 | - docker logs obmp-psql-app 126 | - docker exec obmp-psql-app tail -f /var/log/obmp-psql.log 127 | - docker exec obmp-psql-app tail -f /var/log/postgresql/postgresql-10-main.log 128 | - docker exec -it obmp-psql-app bash 129 | 130 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.2.sql: -------------------------------------------------------------------------------- 1 | -- ----------------------------------------------------------------------- 2 | -- Copyright (c) 2022 Cisco Systems, Inc. and others. All rights reserved. 3 | -- 4 | -- Upgrade form 2.2.1 to 2.2.2 5 | -- ----------------------------------------------------------------------- 6 | CREATE INDEX IF NOT EXISTS base_attrs_next_hop_idx ON base_attrs (next_hop); 7 | 8 | 9 | CREATE OR REPLACE FUNCTION update_global_ip_rib(max_interval interval DEFAULT '2 hour') 10 | RETURNS void AS $$ 11 | DECLARE 12 | execution_start timestamptz := clock_timestamp(); 13 | insert_count int; 14 | start_time timestamptz := now(); 15 | BEGIN 16 | 17 | select time_bucket('5 minutes', timestamp - interval '5 minute') INTO start_time 18 | FROM global_ip_rib order by timestamp desc limit 1; 19 | 20 | IF start_time is null THEN 21 | start_time = time_bucket('5 minutes', now() - max_interval); 22 | raise INFO '-> Last query time is null, setting last query time within %', max_interval; 23 | ELSIF start_time < now() - max_interval THEN 24 | start_time = time_bucket('5 minutes', now() - max_interval); 25 | raise INFO '-> Last query time is greater than max % time, setting last query time', max_interval; 26 | ELSIF start_time > now() THEN 27 | start_time = time_bucket('5 minutes', now() - interval '15 minutes'); 28 | raise INFO '-> Last query time is greater than current time, setting last query time to past 15 minutes'; 29 | END IF; 30 | 31 | raise INFO 'Start time : %', execution_start; 32 | raise INFO 'Last Query Time : %', start_time; 33 | 34 | raise INFO '-> Updating changed prefixes ...'; 35 | 36 | insert_count = 0; 37 | 38 | INSERT INTO global_ip_rib (prefix,prefix_len,recv_origin_as, 39 | iswithdrawn,timestamp,first_added_timestamp,num_peers,advertising_peers,withdrawn_peers) 40 | 41 | SELECT r.prefix, 42 | max(r.prefix_len), 43 | r.origin_as, 44 | bool_and(r.iswithdrawn) as isWithdrawn, 45 | max(r.timestamp), 46 | min(r.first_added_timestamp), 47 | count(distinct r.peer_hash_id) as total_peers, 48 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = False) as advertising_peers, 49 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = True) as withdrawn_peers 50 | FROM ip_rib r 51 | WHERE 52 | (timestamp >= start_time OR first_added_timestamp >= start_time) 53 | AND origin_as != 23456 54 | GROUP BY r.prefix, r.origin_as 55 | ON CONFLICT (prefix,recv_origin_as) 56 | DO UPDATE SET timestamp=excluded.timestamp, 57 | first_added_timestamp=excluded.first_added_timestamp, 58 | iswithdrawn=excluded.iswithdrawn, 59 | num_peers=excluded.num_peers, 60 | advertising_peers=excluded.advertising_peers, 61 | withdrawn_peers=excluded.withdrawn_peers; 62 | 63 | GET DIAGNOSTICS insert_count = row_count; 64 | raise INFO 'Rows updated : %', insert_count; 65 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 66 | raise INFO 'Completion time: %', clock_timestamp(); 67 | 68 | -- Update IRR 69 | raise INFO '-> Updating IRR info'; 70 | UPDATE global_ip_rib r SET 71 | irr_origin_as=i.origin_as, 72 | irr_source=i.source, 73 | irr_descr=i.descr 74 | FROM info_route i 75 | WHERE r.timestamp >= start_time and i.prefix = r.prefix; 76 | 77 | GET DIAGNOSTICS insert_count = row_count; 78 | raise INFO 'Rows updated : %', insert_count; 79 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 80 | raise INFO 'Completion time: %', clock_timestamp(); 81 | 82 | 83 | -- Update RPKI entries - Limit query to only update what has changed in interval time 84 | -- NOTE: The global_ip_rib table should have current times when first run (new table). 85 | -- This will result in this query taking a while. After first run, it shouldn't take 86 | -- as long. 87 | raise INFO '-> Updating RPKI info'; 88 | UPDATE global_ip_rib r SET rpki_origin_as=p.origin_as 89 | FROM rpki_validator p 90 | WHERE r.timestamp >= start_time 91 | AND p.prefix >>= r.prefix 92 | AND r.prefix_len >= p.prefix_len 93 | AND r.prefix_len <= p.prefix_len_max; 94 | 95 | GET DIAGNOSTICS insert_count = row_count; 96 | raise INFO 'Rows updated : %', insert_count; 97 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 98 | 99 | 100 | raise INFO 'Completion time: %', clock_timestamp(); 101 | 102 | END; 103 | $$ LANGUAGE plpgsql; 104 | 105 | drop view IF EXISTS v_ls_links CASCADE; 106 | 107 | ALTER TABLE ls_links 108 | ALTER COLUMN admin_group TYPE bigint, 109 | ALTER COLUMN unreserved_bw TYPE varchar(128); 110 | ALTER TABLE ls_links_log 111 | ALTER COLUMN admin_group TYPE bigint, 112 | ALTER COLUMN unreserved_bw TYPE varchar(128); 113 | 114 | 115 | CREATE VIEW v_ls_links AS 116 | SELECT localn.name as Local_Router_Name,remoten.name as Remote_Router_Name, 117 | localn.igp_router_id as Local_IGP_RouterId,localn.router_id as Local_RouterId, 118 | remoten.igp_router_id Remote_IGP_RouterId, remoten.router_id as Remote_RouterId, 119 | localn.seq, localn.bgp_ls_id as bgpls_id, 120 | CASE WHEN ln.protocol in ('OSPFv2', 'OSPFv3') THEN localn.ospf_area_id ELSE localn.isis_area_id END as AreaId, 121 | ln.mt_id as MT_ID,interface_addr as InterfaceIP,neighbor_addr as NeighborIP, 122 | ln.isIPv4,ln.protocol,igp_metric,local_link_id,remote_link_id,admin_group,max_link_bw,max_resv_bw, 123 | unreserved_bw,te_def_metric,mpls_proto_mask,srlg,ln.name,ln.timestamp,local_node_hash_id,remote_node_hash_id, 124 | localn.igp_router_id as localn_igp_router_id,remoten.igp_router_id as remoten_igp_router_id, 125 | ln.base_attr_hash_id as base_attr_hash_id, ln.peer_hash_id as peer_hash_id, 126 | CASE WHEN ln.iswithdrawn THEN 'WITHDRAWN' ELSE 'ACTIVE' END as state 127 | FROM ls_links ln 128 | JOIN ls_nodes localn ON (ln.local_node_hash_id = localn.hash_id 129 | AND ln.peer_hash_id = localn.peer_hash_id) 130 | JOIN ls_nodes remoten ON (ln.remote_node_hash_id = remoten.hash_id 131 | AND ln.peer_hash_id = remoten.peer_hash_id); 132 | -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.1.sql: -------------------------------------------------------------------------------- 1 | -- ----------------------------------------------------------------------- 2 | -- Copyright (c) 2022 Cisco Systems, Inc. and others. All rights reserved. 3 | -- 4 | -- Ugrade from 2.2.0 to 2.2.1 changes 5 | -- ----------------------------------------------------------------------- 6 | CREATE INDEX IF NOT EXISTS ip_rib_first_added_timestamp_idx ON ip_rib (first_added_timestamp DESC); 7 | 8 | CREATE OR REPLACE FUNCTION update_global_ip_rib(max_interval interval DEFAULT '2 hour') 9 | RETURNS void AS $$ 10 | DECLARE 11 | execution_start timestamptz := clock_timestamp(); 12 | insert_count int; 13 | start_time timestamptz := now(); 14 | chg_prefix inet; 15 | BEGIN 16 | 17 | select time_bucket('5 minutes', timestamp - interval '5 minute') INTO start_time 18 | FROM global_ip_rib order by timestamp desc limit 1; 19 | 20 | IF start_time is null THEN 21 | start_time = time_bucket('5 minutes', now() - max_interval); 22 | raise INFO '-> Last query time is null, setting last query time within %', max_interval; 23 | ELSIF start_time < now() - max_interval THEN 24 | start_time = time_bucket('5 minutes', now() - max_interval); 25 | raise INFO '-> Last query time is greater than max % time, setting last query time', max_interval; 26 | ELSIF start_time > now() THEN 27 | start_time = time_bucket('5 minutes', now() - interval '15 minutes'); 28 | raise INFO '-> Last query time is greater than current time, setting last query time to past 15 minutes'; 29 | END IF; 30 | 31 | raise INFO 'Start time : %', execution_start; 32 | raise INFO 'Last Query Time : %', start_time; 33 | 34 | raise INFO '-> Looping through changed prefixes ...'; 35 | 36 | insert_count = 0; 37 | 38 | FOR chg_prefix IN 39 | SELECT prefix 40 | FROM ip_rib_log WHERE timestamp >= start_time AND origin_as != 23456 41 | UNION SELECT prefix FROM ip_rib where first_added_timestamp >= start_time 42 | GROUP BY prefix 43 | 44 | LOOP 45 | insert_count = insert_count + 1; 46 | 47 | INSERT INTO global_ip_rib (prefix,prefix_len,recv_origin_as, 48 | iswithdrawn,timestamp,first_added_timestamp,num_peers,advertising_peers,withdrawn_peers) 49 | 50 | SELECT r.prefix, 51 | max(r.prefix_len), 52 | r.origin_as, 53 | bool_and(r.iswithdrawn) as isWithdrawn, 54 | max(r.timestamp), 55 | min(r.first_added_timestamp), 56 | count(distinct r.peer_hash_id) as total_peers, 57 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = False) as advertising_peers, 58 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = True) as withdrawn_peers 59 | FROM ip_rib r 60 | WHERE r.prefix = chg_prefix 61 | AND origin_as != 23456 62 | GROUP BY r.prefix, r.origin_as 63 | ON CONFLICT (prefix,recv_origin_as) 64 | DO UPDATE SET timestamp=excluded.timestamp, 65 | first_added_timestamp=excluded.first_added_timestamp, 66 | iswithdrawn=excluded.iswithdrawn, 67 | num_peers=excluded.num_peers, 68 | advertising_peers=excluded.advertising_peers, 69 | withdrawn_peers=excluded.withdrawn_peers; 70 | 71 | END LOOP; 72 | 73 | raise INFO 'Rows updated : %', insert_count; 74 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 75 | raise INFO 'Completion time: %', clock_timestamp(); 76 | 77 | -- Update IRR 78 | raise INFO '-> Updating IRR info'; 79 | UPDATE global_ip_rib r SET 80 | irr_origin_as=i.origin_as, 81 | irr_source=i.source, 82 | irr_descr=i.descr 83 | FROM info_route i 84 | WHERE r.timestamp >= start_time and i.prefix = r.prefix; 85 | 86 | GET DIAGNOSTICS insert_count = row_count; 87 | raise INFO 'Rows updated : %', insert_count; 88 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 89 | raise INFO 'Completion time: %', clock_timestamp(); 90 | 91 | 92 | -- Update RPKI entries - Limit query to only update what has changed in interval time 93 | -- NOTE: The global_ip_rib table should have current times when first run (new table). 94 | -- This will result in this query taking a while. After first run, it shouldn't take 95 | -- as long. 96 | raise INFO '-> Updating RPKI info'; 97 | UPDATE global_ip_rib r SET rpki_origin_as=p.origin_as 98 | FROM rpki_validator p 99 | WHERE r.timestamp >= start_time 100 | AND p.prefix >>= r.prefix 101 | AND r.prefix_len >= p.prefix_len 102 | AND r.prefix_len <= p.prefix_len_max; 103 | 104 | GET DIAGNOSTICS insert_count = row_count; 105 | raise INFO 'Rows updated : %', insert_count; 106 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 107 | 108 | 109 | raise INFO 'Completion time: %', clock_timestamp(); 110 | 111 | END; 112 | $$ LANGUAGE plpgsql; 113 | 114 | CREATE OR REPLACE FUNCTION sync_global_ip_rib() 115 | RETURNS void AS $$ 116 | DECLARE 117 | execution_start timestamptz := clock_timestamp(); 118 | insert_count int; 119 | start_time timestamptz := now(); 120 | BEGIN 121 | 122 | raise INFO 'Start time : %', execution_start; 123 | 124 | INSERT INTO global_ip_rib (prefix,prefix_len,recv_origin_as, 125 | iswithdrawn,timestamp,first_added_timestamp,num_peers,advertising_peers,withdrawn_peers) 126 | 127 | SELECT r.prefix, 128 | max(r.prefix_len), 129 | r.origin_as, 130 | bool_and(r.iswithdrawn) as isWithdrawn, 131 | max(r.timestamp), 132 | min(r.first_added_timestamp), 133 | count(distinct r.peer_hash_id) as total_peers, 134 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = False) as advertising_peers, 135 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = True) as withdrawn_peers 136 | FROM ip_rib r 137 | WHERE origin_as != 23456 138 | GROUP BY r.prefix, r.origin_as 139 | ON CONFLICT (prefix,recv_origin_as) 140 | DO UPDATE SET timestamp=excluded.timestamp, 141 | first_added_timestamp=excluded.first_added_timestamp, 142 | iswithdrawn=excluded.iswithdrawn, 143 | num_peers=excluded.num_peers, 144 | advertising_peers=excluded.advertising_peers, 145 | withdrawn_peers=excluded.withdrawn_peers; 146 | 147 | GET DIAGNOSTICS insert_count = row_count; 148 | raise INFO 'Rows updated : %', insert_count; 149 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 150 | raise INFO 'Completion time: %', clock_timestamp(); 151 | 152 | -- Update IRR 153 | raise INFO '-> Updating IRR info'; 154 | UPDATE global_ip_rib r SET 155 | irr_origin_as=i.origin_as, 156 | irr_source=i.source, 157 | irr_descr=i.descr 158 | FROM info_route i 159 | WHERE i.prefix = r.prefix; 160 | 161 | GET DIAGNOSTICS insert_count = row_count; 162 | raise INFO 'Rows updated : %', insert_count; 163 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 164 | raise INFO 'Completion time: %', clock_timestamp(); 165 | 166 | 167 | -- Update RPKI entries - Limit query to only update what has changed in interval time 168 | -- NOTE: The global_ip_rib table should have current times when first run (new table). 169 | -- This will result in this query taking a while. After first run, it shouldn't take 170 | -- as long. 171 | raise INFO '-> Updating RPKI info'; 172 | UPDATE global_ip_rib r SET rpki_origin_as=p.origin_as 173 | FROM rpki_validator p 174 | WHERE 175 | p.prefix >>= r.prefix 176 | AND r.prefix_len >= p.prefix_len 177 | AND r.prefix_len <= p.prefix_len_max; 178 | 179 | GET DIAGNOSTICS insert_count = row_count; 180 | raise INFO 'Rows updated : %', insert_count; 181 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 182 | 183 | 184 | raise INFO 'Completion time: %', clock_timestamp(); 185 | 186 | END; 187 | $$ LANGUAGE plpgsql; 188 | 189 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | volumes: 4 | data-volume: 5 | driver_opts: 6 | type: none 7 | device: ${OBMP_DATA_ROOT}/postgres/data 8 | o: bind 9 | ts-volume: 10 | driver_opts: 11 | type: none 12 | device: ${OBMP_DATA_ROOT}/postgres/ts 13 | o: bind 14 | 15 | services: 16 | 17 | zookeeper: 18 | restart: unless-stopped 19 | container_name: obmp-zookeeper 20 | image: confluentinc/cp-zookeeper:7.1.1 21 | volumes: 22 | - ${OBMP_DATA_ROOT}/zk-data:/var/lib/zookeeper/data 23 | - ${OBMP_DATA_ROOT}/zk-log:/var/lib/zookeeper/log 24 | environment: 25 | ZOOKEEPER_CLIENT_PORT: 2181 26 | ZOOKEEPER_TICK_TIME: 2000 27 | 28 | kafka: 29 | restart: unless-stopped 30 | container_name: obmp-kafka 31 | image: confluentinc/cp-kafka:7.1.1 32 | 33 | # Change the mount point to where you want to store Kafka data. 34 | # Normally 80GB or more 35 | volumes: 36 | - ${OBMP_DATA_ROOT}/kafka-data:/var/lib/kafka/data 37 | depends_on: 38 | - zookeeper 39 | ports: 40 | - "9092:9092" 41 | environment: 42 | KAFKA_BROKER_ID: 1 43 | KAFKA_ZOOKEEPER_CONNECT: obmp-zookeeper:2181 44 | 45 | # Change/add listeners based on your FQDN that the host and other containers can access. You can use 46 | # an IP address as well. By default, only within the compose/containers can Kafka be accesssed 47 | # using port 29092. Outside access can be enabled, but you should use an FQDN listener. 48 | #KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://:9092 49 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://obmp-kafka:29092 50 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT 51 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT 52 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 53 | KAFKA_NUM_PARTITIONS: 8 54 | KAFKA_LOG_RETENTION_MINUTES: 90 55 | KAFKA_LOG_ROLL_MS: 3600000 56 | KAFKA_LOG_SEGMENT_BYTES: 1073741824 57 | KAFKA_MESSAGE_MAX_BYTES: 100000000 58 | KAFKA_LOG_CLEANER_THREADS: 2 59 | 60 | # TODO: Move comment to documentation 61 | # You can use SWAG and Let's Encrypt to front the Grafana HTTP port 3000 instance. Below is an example 62 | # Create the SWAG direcgtory using "sudo mkdir -m 777 ${OBMP_DATA_ROOT}/swag" 63 | # swag: 64 | # image: linuxserver/letsencrypt:version-1.11.0 65 | # container_name: obmp-swag 66 | # cap_add: 67 | # - NET_ADMIN 68 | # environment: 69 | # - PUID=1000 70 | # - PGID=1000 71 | # - TZ=UTC 72 | # - URL= 73 | # - SUBDOMAINS=, 74 | # - VALIDATION=http 75 | # - EMAIL= 76 | # - ONLY_SUBDOMAINS=true 77 | # - STAGING=false 78 | # volumes: 79 | # - /var/openbmp/swag:/config 80 | # ports: 81 | # - 443:443 82 | # - 80:80 #optional 83 | # restart: unless-stopped 84 | 85 | grafana: 86 | restart: unless-stopped 87 | container_name: obmp-grafana 88 | image: grafana/grafana:9.1.7 89 | ports: 90 | - "3000:3000" 91 | volumes: 92 | - ${OBMP_DATA_ROOT}/grafana:/var/lib/grafana 93 | - ${OBMP_DATA_ROOT}/grafana/provisioning:/etc/grafana/provisioning/ 94 | environment: 95 | - GF_SECURITY_ADMIN_PASSWORD=openbmp 96 | - GF_AUTH_ANONYMOUS_ENABLED=true 97 | - GF_USERS_HOME_PAGE=d/obmp-home/obmp-home 98 | - GF_INSTALL_PLUGINS=agenty-flowcharting-panel,grafana-piechart-panel,grafana-worldmap-panel,grafana-simple-json-datasource,vonage-status-panel 99 | 100 | # TODO: Move comment block to documentation 101 | # The below is an example of how to enable OAuth 102 | #- GF_AUTH_GENERIC_OAUTH_NAME=MyCompany SSO 103 | #- GF_AUTH_GENERIC_OAUTH_ICON=signin 104 | #- GF_AUTH_GENERIC_OAUTH_ENABLED=true 105 | #- GF_AUTH_GENERIC_OAUTH_CLIENT_ID= 106 | #- GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET= 107 | #- GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP=true 108 | #- GF_AUTH_GENERIC_OAUTH_AUTH_URL= 109 | #- GF_AUTH_GENERIC_OAUTH_TOKEN_URL= 117 | #- GF_SERVER_ROOT_URL=%(protocol)s://%(domain)s:%(http_port)s/ 118 | 119 | psql: 120 | restart: unless-stopped 121 | container_name: obmp-psql 122 | image: openbmp/postgres:2.2.1 123 | privileged: true 124 | shm_size: 1536m 125 | sysctls: 126 | - net.ipv4.tcp_keepalive_intvl=30 127 | - net.ipv4.tcp_keepalive_probes=5 128 | - net.ipv4.tcp_keepalive_time=180 129 | ports: 130 | - "5432:5432" 131 | volumes: 132 | - data-volume:/var/lib/postgresql/data 133 | - ts-volume:/var/lib/postgresql/ts 134 | # alter_job max_runtime in _timescaledb_config.bgw_job ( https://docs.timescale.com/latest/api#alter_job ) 135 | command: > 136 | -c max_wal_size=10GB 137 | environment: 138 | - POSTGRES_PASSWORD=openbmp 139 | - POSTGRES_USER=openbmp 140 | - POSTGRES_DB=openbmp 141 | 142 | collector: 143 | restart: unless-stopped 144 | container_name: obmp-collector 145 | image: openbmp/collector:2.2.3 146 | sysctls: 147 | - net.ipv4.tcp_keepalive_intvl=30 148 | - net.ipv4.tcp_keepalive_probes=5 149 | - net.ipv4.tcp_keepalive_time=180 150 | ports: 151 | - "5000:5000" 152 | volumes: 153 | - ${OBMP_DATA_ROOT}/config:/config 154 | environment: 155 | - KAFKA_FQDN=obmp-kafka:29092 156 | 157 | psql-app: 158 | restart: unless-stopped 159 | container_name: obmp-psql-app 160 | image: openbmp/psql-app:2.2.2 161 | sysctls: 162 | - net.ipv4.tcp_keepalive_intvl=30 163 | - net.ipv4.tcp_keepalive_probes=5 164 | - net.ipv4.tcp_keepalive_time=180 165 | ports: 166 | - "9005:9005" 167 | 168 | volumes: 169 | - ${OBMP_DATA_ROOT}/config:/config 170 | environment: 171 | - MEM=3 # Set memory to at least 2GB but ideally 4GB 172 | - KAFKA_FQDN=obmp-kafka:29092 173 | - RPKI_URL=https://rpki.cloudflare.com/rpki.json # define the URL to retrieve json endoed RPKI data 174 | - RPKI_PASS=None 175 | - RPKI_USER=None 176 | - ENABLE_RPKI=1 # 1 enables, 0 disables RPKI sync 177 | - ENABLE_IRR=1 # 1 enables, 0 disables IRR sync 178 | - ENABLE_DBIP=1 # 1 enables, 0 disables DBIP import 179 | - POSTGRES_REPORT_WINDOW='8 minute' # default POSTGRESS window to select when building 180 | # summary tables. For deployments that absorb large 181 | # bursts increase the value, ex 60 minute 182 | - POSTGRES_PASSWORD=openbmp 183 | - POSTGRES_USER=openbmp 184 | - POSTGRES_DB=openbmp 185 | - POSTGRES_HOST=obmp-psql 186 | - POSTGRES_PORT=5432 187 | - POSTGRES_DROP_peer_event_log='1 year' 188 | - POSTGRES_DROP_stat_reports='4 weeks' 189 | - POSTGRES_DROP_ip_rib_log='4 weeks' 190 | - POSTGRES_DROP_alerts='4 weeks' 191 | - POSTGRES_DROP_ls_nodes_log='4 months' 192 | - POSTGRES_DROP_ls_links_log='4 months' 193 | - POSTGRES_DROP_ls_prefixes_log='4 months' 194 | - POSTGRES_DROP_stats_chg_byprefix='4 weeks' 195 | - POSTGRES_DROP_stats_chg_byasn='4 weeks' 196 | - POSTGRES_DROP_stats_chg_bypeer='4 weeks' 197 | - POSTGRES_DROP_stats_ip_origins='4 weeks' 198 | - POSTGRES_DROP_stats_peer_rib='4 weeks' 199 | - POSTGRES_DROP_stats_peer_update_counts='4 weeks' 200 | 201 | whois: 202 | restart: unless-stopped 203 | container_name: obmp-whois 204 | image: openbmp/whois:2.2.0 205 | sysctls: 206 | - net.ipv4.tcp_keepalive_intvl=30 207 | - net.ipv4.tcp_keepalive_probes=5 208 | - net.ipv4.tcp_keepalive_time=180 209 | ports: 210 | - "4300:43" 211 | # volumes: 212 | # - ${OBMP_DATA_ROOT}/config:/config 213 | environment: 214 | - POSTGRES_PASSWORD=openbmp 215 | - POSTGRES_USER=openbmp 216 | - POSTGRES_DB=openbmp 217 | - POSTGRES_HOST=obmp-psql 218 | - POSTGRES_PORT=5432 -------------------------------------------------------------------------------- /psql-app/upgrade/upgrade_2.2.0.sql: -------------------------------------------------------------------------------- 1 | -- ----------------------------------------------------------------------- 2 | -- Copyright (c) 2022 Cisco Systems, Inc. and others. All rights reserved. 3 | -- 4 | -- Ugrade from 2.1.0 to 2.2.0 changes 5 | -- ----------------------------------------------------------------------- 6 | CREATE INDEX IF NOT EXISTS ip_rib_prefix_idx ON ip_rib (prefix); 7 | 8 | CREATE OR REPLACE FUNCTION update_global_ip_rib(max_interval interval DEFAULT '2 hour') 9 | RETURNS void AS $$ 10 | DECLARE 11 | execution_start timestamptz := clock_timestamp(); 12 | insert_count int; 13 | start_time timestamptz := now(); 14 | chg_prefix inet; 15 | BEGIN 16 | 17 | select time_bucket('5 minutes', timestamp - interval '5 minute') INTO start_time 18 | FROM global_ip_rib order by timestamp desc limit 1; 19 | 20 | IF start_time is null THEN 21 | start_time = time_bucket('5 minutes', now() - max_interval); 22 | raise INFO '-> Last query time is null, setting last query time within %', max_interval; 23 | ELSIF start_time < now() - max_interval THEN 24 | start_time = time_bucket('5 minutes', now() - max_interval); 25 | raise INFO '-> Last query time is greater than max % time, setting last query time', max_interval; 26 | ELSIF start_time > now() THEN 27 | start_time = time_bucket('5 minutes', now() - interval '15 minutes'); 28 | raise INFO '-> Last query time is greater than current time, setting last query time to past 15 minutes'; 29 | END IF; 30 | 31 | raise INFO 'Start time : %', execution_start; 32 | raise INFO 'Last Query Time : %', start_time; 33 | 34 | raise INFO '-> Looping through changed prefixes ...'; 35 | 36 | insert_count = 0; 37 | 38 | FOR chg_prefix IN 39 | SELECT prefix 40 | FROM ip_rib_log WHERE timestamp >= start_time AND origin_as != 23456 41 | UNION SELECT prefix FROM ip_rib where first_added_timestamp >= start_time 42 | GROUP BY prefix 43 | 44 | LOOP 45 | insert_count = insert_count + 1; 46 | 47 | INSERT INTO global_ip_rib (prefix,prefix_len,recv_origin_as, 48 | iswithdrawn,timestamp,first_added_timestamp,num_peers,advertising_peers,withdrawn_peers) 49 | 50 | SELECT r.prefix, 51 | max(r.prefix_len), 52 | r.origin_as, 53 | bool_and(r.iswithdrawn) as isWithdrawn, 54 | max(r.timestamp), 55 | min(r.first_added_timestamp), 56 | count(distinct r.peer_hash_id) as total_peers, 57 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = False) as advertising_peers, 58 | count(distinct r.peer_hash_id) FILTER (WHERE r.iswithdrawn = True) as withdrawn_peers 59 | FROM ip_rib r 60 | WHERE r.prefix = chg_prefix 61 | AND origin_as != 23456 62 | GROUP BY r.prefix, r.origin_as 63 | ON CONFLICT (prefix,recv_origin_as) 64 | DO UPDATE SET timestamp=excluded.timestamp, 65 | first_added_timestamp=excluded.first_added_timestamp, 66 | iswithdrawn=excluded.iswithdrawn, 67 | num_peers=excluded.num_peers, 68 | advertising_peers=excluded.advertising_peers, 69 | withdrawn_peers=excluded.withdrawn_peers; 70 | 71 | END LOOP; 72 | 73 | raise INFO 'Rows updated : %', insert_count; 74 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 75 | raise INFO 'Completion time: %', clock_timestamp(); 76 | 77 | -- Update IRR 78 | raise INFO '-> Updating IRR info'; 79 | UPDATE global_ip_rib r SET 80 | irr_origin_as=i.origin_as, 81 | irr_source=i.source, 82 | irr_descr=i.descr 83 | FROM info_route i 84 | WHERE r.timestamp >= start_time and i.prefix = r.prefix; 85 | 86 | GET DIAGNOSTICS insert_count = row_count; 87 | raise INFO 'Rows updated : %', insert_count; 88 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 89 | raise INFO 'Completion time: %', clock_timestamp(); 90 | 91 | 92 | -- Update RPKI entries - Limit query to only update what has changed in interval time 93 | -- NOTE: The global_ip_rib table should have current times when first run (new table). 94 | -- This will result in this query taking a while. After first run, it shouldn't take 95 | -- as long. 96 | raise INFO '-> Updating RPKI info'; 97 | UPDATE global_ip_rib r SET rpki_origin_as=p.origin_as 98 | FROM rpki_validator p 99 | WHERE r.timestamp >= start_time 100 | AND p.prefix >>= r.prefix 101 | AND r.prefix_len >= p.prefix_len 102 | AND r.prefix_len <= p.prefix_len_max; 103 | 104 | GET DIAGNOSTICS insert_count = row_count; 105 | raise INFO 'Rows updated : %', insert_count; 106 | raise INFO 'Duration : %', clock_timestamp() - execution_start; 107 | 108 | 109 | raise INFO 'Completion time: %', clock_timestamp(); 110 | 111 | END; 112 | $$ LANGUAGE plpgsql; 113 | 114 | drop view IF EXISTS v_ip_routes CASCADE; 115 | CREATE VIEW v_ip_routes AS 116 | SELECT CASE WHEN length(rtr.name) > 0 THEN rtr.name ELSE host(rtr.ip_address) END AS RouterName, 117 | CASE WHEN length(p.name) > 0 THEN p.name ELSE host(p.peer_addr) END AS PeerName, 118 | r.prefix AS Prefix,r.prefix_len AS PrefixLen, 119 | attr.origin AS Origin,r.origin_as AS Origin_AS,attr.med AS MED, 120 | attr.local_pref AS LocalPref,attr.next_hop AS NH,attr.as_path AS AS_Path, 121 | attr.as_path_count AS ASPath_Count,attr.community_list AS Communities, 122 | attr.ext_community_list AS ExtCommunities,attr.large_community_list AS LargeCommunities, 123 | attr.cluster_list AS ClusterList, 124 | attr.originator_id as Originator, attr.aggregator AS Aggregator,p.peer_addr AS PeerAddress, p.peer_as AS PeerASN,r.isIPv4 as isIPv4, 125 | p.isIPv4 as isPeerIPv4, p.isL3VPNpeer as isPeerVPN, 126 | r.timestamp AS LastModified, r.first_added_timestamp as FirstAddedTimestamp, 127 | r.path_id, r.labels, 128 | r.hash_id as rib_hash_id, 129 | r.base_attr_hash_id as base_hash_id, r.peer_hash_id, rtr.hash_id as router_hash_id,r.isWithdrawn, 130 | r.isPrePolicy,r.isAdjRibIn 131 | FROM ip_rib r 132 | JOIN bgp_peers p ON (r.peer_hash_id = p.hash_id) 133 | JOIN base_attrs attr ON (attr.hash_id = r.base_attr_hash_id and attr.peer_hash_id = r.peer_hash_id) 134 | JOIN routers rtr ON (p.router_hash_id = rtr.hash_id); 135 | 136 | drop view IF EXISTS v_ip_routes_geo CASCADE; 137 | CREATE VIEW v_ip_routes_geo AS 138 | SELECT CASE WHEN length(rtr.name) > 0 THEN rtr.name ELSE host(rtr.ip_address) END AS RouterName, 139 | CASE WHEN length(p.name) > 0 THEN p.name ELSE host(p.peer_addr) END AS PeerName, 140 | r.prefix AS Prefix,r.prefix_len AS PrefixLen, 141 | attr.origin AS Origin,r.origin_as AS Origin_AS,attr.med AS MED, 142 | attr.local_pref AS LocalPref,attr.next_hop AS NH,attr.as_path AS AS_Path, 143 | attr.as_path_count AS ASPath_Count,attr.community_list AS Communities, 144 | attr.ext_community_list AS ExtCommunities,attr.large_community_list AS LargeCommunities, 145 | attr.cluster_list AS ClusterList,attr.originator_id as Originator, 146 | attr.aggregator AS Aggregator,p.peer_addr AS PeerAddress, p.peer_as AS PeerASN,r.isIPv4 as isIPv4, 147 | p.isIPv4 as isPeerIPv4, p.isL3VPNpeer as isPeerVPN, 148 | r.timestamp AS LastModified, r.first_added_timestamp as FirstAddedTimestamp, 149 | r.path_id, r.labels, 150 | r.hash_id as rib_hash_id, 151 | r.base_attr_hash_id as base_hash_id, r.peer_hash_id, rtr.hash_id as router_hash_id,r.isWithdrawn, 152 | r.isPrePolicy,r.isAdjRibIn, 153 | g.ip as geo_ip,g.city as City, g.stateprov as stateprov, g.country as country, 154 | g.latitude as latitude, g.longitude as longitude 155 | FROM ip_rib r 156 | JOIN bgp_peers p ON (r.peer_hash_id = p.hash_id) 157 | JOIN base_attrs attr ON (attr.hash_id = r.base_attr_hash_id and attr.peer_hash_id = r.peer_hash_id) 158 | JOIN routers rtr ON (p.router_hash_id = rtr.hash_id) 159 | LEFT JOIN geo_ip g ON (g.ip && host(r.prefix)::inet) 160 | WHERE r.isWithdrawn = false; 161 | 162 | 163 | drop view IF EXISTS v_ip_routes_history CASCADE; 164 | CREATE VIEW v_ip_routes_history AS 165 | SELECT 166 | CASE WHEN length(rtr.name) > 0 THEN rtr.name ELSE host(rtr.ip_address) END AS RouterName, 167 | rtr.ip_address as RouterAddress, 168 | CASE WHEN length(p.name) > 0 THEN p.name ELSE host(p.peer_addr) END AS PeerName, 169 | log.prefix AS Prefix,log.prefix_len AS PrefixLen, 170 | attr.origin AS Origin,log.origin_as AS Origin_AS, 171 | attr.med AS MED,attr.local_pref AS LocalPref,attr.next_hop AS NH, 172 | attr.as_path AS AS_Path,attr.as_path_count AS ASPath_Count,attr.community_list AS Communities, 173 | attr.ext_community_list AS ExtCommunities,attr.large_community_list AS LargeCommunities, 174 | attr.cluster_list AS ClusterList,attr.originator_id as Originator, 175 | attr.aggregator AS Aggregator,p.peer_addr AS PeerIp, 176 | p.peer_as AS PeerASN, p.isIPv4 as isPeerIPv4, p.isL3VPNpeer as isPeerVPN, 177 | log.id,log.timestamp AS LastModified, 178 | CASE WHEN log.iswithdrawn THEN 'Withdrawn' ELSE 'Advertised' END as event, 179 | log.base_attr_hash_id as base_attr_hash_id, log.peer_hash_id, rtr.hash_id as router_hash_id 180 | FROM ip_rib_log log 181 | JOIN base_attrs attr 182 | ON (log.base_attr_hash_id = attr.hash_id AND 183 | log.peer_hash_id = attr.peer_hash_id) 184 | JOIN bgp_peers p ON (log.peer_hash_id = p.hash_id) 185 | JOIN routers rtr ON (p.router_hash_id = rtr.hash_id); 186 | 187 | -------------------------------------------------------------------------------- /psql-app/scripts/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Postgres Backend: Run script 3 | # 4 | # Copyright (c) 2021-2022 Cisco Systems, Inc. and others. All rights reserved. 5 | # 6 | # Author: Tim Evens 7 | # 8 | 9 | # Postgres details - Can be set using docker -e 10 | export POSTGRES_USER=${POSTGRES_USER:="openbmp"} 11 | export POSTGRES_PASSWORD=${POSTGRES_PASSWORD:="openbmp"} 12 | export POSTGRES_HOST=${POSTGRES_HOST:="127.0.0.1"} 13 | export POSTGRES_PORT=${POSTGRES_PORT:="5432"} 14 | export POSTGRES_DB=${POSTGRES_DB:="openbmp"} 15 | export POSTGRES_SSL_ENABLE=${POSTGRES_SSL_ENABLE:="true"} 16 | export POSTGRES_SSL_MODE=${POSTGRES_SSL_MODE:="require"} 17 | export MEM=${MEM:="1"} # mem in gigabytes 18 | export PGCONNECT_TIMEOUT=15 19 | 20 | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 21 | # Functions 22 | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 23 | 24 | # ----------------------------------------------- 25 | # Check Kafka to make sure it's valid 26 | # ----------------------------------------------- 27 | check_kafka() { 28 | echo "===> Performing Kafka check" 29 | 30 | if [[ ${KAFKA_FQDN:-""} == "" ]]; then 31 | echo "ERROR: Missing ENV KAFKA_FQDN. Cannot proceed until you add that in docker run -e KAFKA_FQDN=<...>" 32 | exit 1 33 | 34 | fi 35 | 36 | echo "===> Checking Kafka bootstrap server connection" 37 | kafkacat -u -b $KAFKA_FQDN -L | grep broker 38 | 39 | if [[ $? -ne 0 ]]; then 40 | echo "ERROR: Failed to connect to Kafka at $KAFKA_FQDN, check the docker run -e KAFKA_FQDN= value" 41 | exit 1 42 | fi 43 | 44 | echo "testing" | timeout 5 kafkacat -b $KAFKA_FQDN -P -t openbmp.parsed.test 45 | echo "===> Checking if we can successfully consume messages" 46 | timeout 5 kafkacat -u -b $KAFKA_FQDN -C -c 1 -o beginning -t openbmp.parsed.test > /dev/null 47 | 48 | if [[ $? -ne 0 ]]; then 49 | echo "ERROR: Failed to connect to Kafka broker, check the Kafka 'advertised.listeners' configuration." 50 | echo " Advertised hostname must be reachable within the container. You can run this container" 51 | echo " with --add-host : to map the ip address within the container." 52 | echo " You can also add/update the persistent /config/hosts file with the broker hostname/ip." 53 | exit 1 54 | fi 55 | } 56 | 57 | # ----------------------------------------------- 58 | # Configure Postgres shell profile 59 | # ----------------------------------------------- 60 | config_postgres_profile() { 61 | echo "===> Configuring PostgreSQL Shell Profile" 62 | 63 | echo "export PGUSER=$POSTGRES_USER" > /usr/local/openbmp/pg_profile 64 | echo "export PGPASSWORD=$POSTGRES_PASSWORD" >> /usr/local/openbmp/pg_profile 65 | echo "export PGHOST=$POSTGRES_HOST" >> /usr/local/openbmp/pg_profile 66 | echo "export PGPORT=$POSTGRES_PORT" >> /usr/local/openbmp/pg_profile 67 | echo "export PGDATABASE=$POSTGRES_DB" >> /usr/local/openbmp/pg_profile 68 | } 69 | 70 | # ----------------------------------------------- 71 | # Initdb Postgres 72 | # ----------------------------------------------- 73 | initdb_postgres() { 74 | if [[ ! -f /config/do_not_init_db ]]; then 75 | echo " ===> Initializing the DB" 76 | 77 | echo "Waiting for postgres to start..." 78 | done=0 79 | while [ $done -eq 0 ]; do 80 | psql -c "select 1;" > /dev/null 2>&1 81 | if [[ $? -ne 0 ]]; then 82 | echo " postgres not running, sleeping for 20 seconds..." 83 | sleep 20 84 | else 85 | done=1 86 | break 87 | fi 88 | done 89 | 90 | # Load the schema files 91 | echo " ===> Loading Schemas" 92 | 93 | echo "------" > /var/log/db_schema_load.log 94 | for file in $(ls -v /usr/local/openbmp/database/*.sql); do 95 | echo " ===[ $file ] ========================================" >> /var/log/db_schema_load.log 96 | psql < $file >> /var/log/db_schema_load.log 2>&1 97 | done 98 | 99 | touch /config/do_not_init_db 100 | fi 101 | } 102 | 103 | # ----------------------------------------------- 104 | # Update hosts file 105 | # ----------------------------------------------- 106 | update_hosts() { 107 | echo "===> Updating /etc/hosts" 108 | 109 | # Update the etc hosts file 110 | if [[ -f /config/hosts ]]; then 111 | cat /config/hosts >> /etc/hosts 112 | fi 113 | } 114 | 115 | # ----------------------------------------------- 116 | # Enable RPKI 117 | # ----------------------------------------------- 118 | enable_rpki() { 119 | echo "===> Enabling RPKI" 120 | 121 | cat > /etc/cron.d/openbmp-rpki < /var/log/cron-rpki-import.log 126 | 127 | SETVAR 128 | 129 | } 130 | 131 | # ----------------------------------------------- 132 | # Enable DB-IP import 133 | # ----------------------------------------------- 134 | enable_dbip() { 135 | echo "===> Enabling DB-IP Import" 136 | 137 | cat > /etc/cron.d/openbmp-dbip <&1 > /var/log/cron-dbip-import.log 141 | 142 | SETVAR 143 | 144 | # Load DB-IP on start 145 | echo "Running DB-IP Import" 146 | /usr/local/openbmp/db-ip-import.sh 2>/var/log/cron-dbip-import.log > /var/log/cron-dbip-import.log & 147 | } 148 | 149 | 150 | # ----------------------------------------------- 151 | # Enable IRR 152 | # ----------------------------------------------- 153 | enable_irr() { 154 | echo "===> Enabling IRR" 155 | 156 | cat > /etc/cron.d/openbmp-irr <&1 > /var/log/irr_load.log 161 | 162 | SETVAR 163 | 164 | # Load IRR data 165 | echo "Loading IRR data" 166 | /usr/local/openbmp/gen_whois_route.py -u $PGUSER -p $PGPASSWORD $PGHOST 2>/var/log/irr_load.log > /var/log/irr_load.log & 167 | } 168 | 169 | # ----------------------------------------------- 170 | # config_cron 171 | # ----------------------------------------------- 172 | config_cron() { 173 | cat > /etc/cron.d/openbmp < /var/log/asn_load.log 2>&1 178 | 5 1,12 * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/peeringdb.lock /usr/local/openbmp/peeringdb.py > /var/log/cron-peeringdb.log 2>&1 179 | 180 | # Update aggregation table stats 181 | */5 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/update_chg_stats.lock psql -c "select update_chg_stats('5 minute')" > /var/log/cron-update_chg_stats.log 2>&1 182 | */5 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/update_l3vpn_chg_stats.lock psql -c "select update_l3vpn_chg_stats('5 minute')" > /var/log/cron-update_l3vpn_chg_stats.log 2>&1 183 | 184 | 185 | # Update peer rib counts 186 | */15 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/update_peer_rib_counts.lock psql -c "select update_peer_rib_counts()" > /var/log/cron-update_peer_rib_counts.log 2>&1 187 | 188 | # Update peer update counts 189 | */30 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/update_peer_counts.lock psql -c "select update_peer_update_counts(1800)" > /var/log/cron-update_peer_counts.log 2>&1 190 | 191 | # Update global rib 192 | */5 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/global_ip_rib.lock psql -c "select update_global_ip_rib();" > /var/log/cron-update_global_ip_rib.log 2>&1 193 | 5 */4 * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/global_ip_rib.lock psql -c "select purge_global_ip_rib('6 hour');" > /var/log/cron-purge_global_ip_rib.log 2>&1 194 | 195 | # Update origin stats 196 | 21 * * * * root . /usr/local/openbmp/pg_profile && flock -n /tmp/locks/update_origin_stats.lock psql -c "select update_origin_stats('1 hour');" > /var/log/cron-update_origin_stats.log 2>&1 197 | 198 | 199 | SETVAR 200 | 201 | } 202 | 203 | # ----------------------------------------------- 204 | # Upgrade SQL 205 | # ----------------------------------------------- 206 | upgrade() { 207 | 208 | if [[ -f /config/do_not_init_db ]]; then 209 | 210 | if [[ ! -f /config/psql-app-upgraded.2.1.0 ]]; then 211 | echo "===> Upgrading to 2.1.0" 212 | /tmp/upgrade/upgrade_2.1.0.sh 213 | touch /config/psql-app-upgraded.2.1.0 214 | echo "===> Done with upgrade" 215 | fi 216 | 217 | if [[ ! -f /config/psql-app-upgraded.2.2.0 ]]; then 218 | echo "===> Upgrading to 2.2.0" 219 | /tmp/upgrade/upgrade_2.2.0.sh 220 | touch /config/psql-app-upgraded.2.2.0 221 | echo "===> Done with upgrade" 222 | fi 223 | 224 | if [[ ! -f /config/psql-app-upgraded.2.2.1 ]]; then 225 | echo "===> Upgrading to 2.2.1" 226 | /tmp/upgrade/upgrade_2.2.1.sh 227 | touch /config/psql-app-upgraded.2.2.1 228 | echo "===> Done with upgrade" 229 | fi 230 | 231 | if [[ ! -f /config/psql-app-upgraded.2.2.2 ]]; then 232 | echo "===> Upgrading to 2.2.2" 233 | /tmp/upgrade/upgrade_2.2.2.sh 234 | touch /config/psql-app-upgraded.2.2.2 235 | echo "===> Done with upgrade" 236 | fi 237 | 238 | else 239 | touch /config/psql-app-upgraded.2.1.0 240 | touch /config/psql-app-upgraded.2.2.0 241 | touch /config/psql-app-upgraded.2.2.1 242 | touch /config/psql-app-upgraded.2.2.2 243 | fi 244 | } 245 | 246 | 247 | # ----------------------------------------------- 248 | # run_consumer 249 | # ----------------------------------------------- 250 | run_consumer() { 251 | echo "===> Starting consumer" 252 | 253 | if [[ ! -f /config/obmp-psql.yml ]]; then 254 | cd /config 255 | unzip /usr/local/openbmp/obmp-psql-consumer.jar obmp-psql.yml 256 | 257 | 258 | if [[ ! -f /config/obmp-psql.yml ]]; then 259 | echo "ERROR: Cannot create /config/obmp-psql.yml" 260 | echo " Update permissions on /config volume to 7777 OR add configuration file to /config volume" 261 | exit 1 262 | fi 263 | 264 | # Update configuration 265 | sed -i -e "s/\([ ]*bootstrap.servers:\)\(.*\)/\1 \"${KAFKA_FQDN}\"/" /config/obmp-psql.yml 266 | sed -i -e "s/\([ ]*host[ ]*:\)\(.*\)/\1 \"${POSTGRES_HOST}:${POSTGRES_PORT}\"/" /config/obmp-psql.yml 267 | sed -i -e "s/\([ ]*username[ ]*:\)\(.*\)/\1 \"${POSTGRES_USER}\"/" /config/obmp-psql.yml 268 | sed -i -e "s/\([ ]*password[ ]*:\)\(.*\)/\1 \"${POSTGRES_PASSWORD}\"/" /config/obmp-psql.yml 269 | sed -i -e "s/\([ ]*db_name[ ]*:\)\(.*\)/\1 \"${POSTGRES_DB}\"/" /config/obmp-psql.yml 270 | sed -i -e "s/\([ ]*ssl_enable[ ]*:\)\(.*\)/\1 \"${POSTGRES_SSL_ENABLE}\"/" /config/obmp-psql.yml 271 | sed -i -e "s/\([ ]*ssl_mode[ ]*:\)\(.*\)/\1 \"${POSTGRES_SSL_MODE}\"/" /config/obmp-psql.yml 272 | fi 273 | 274 | heap_mem=${MEM}G 275 | 276 | # Run 277 | cd /var/log 278 | java -Xmx${heap_mem} -Xms128m -XX:+UseG1GC -XX:+UnlockExperimentalVMOptions \ 279 | -XX:InitiatingHeapOccupancyPercent=30 -XX:G1MixedGCLiveThresholdPercent=30 \ 280 | -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=20 -XX:ConcGCThreads=5 \ 281 | -XX:+ExitOnOutOfMemoryError \ 282 | -Duser.timezone=UTC \ 283 | -jar /usr/local/openbmp/obmp-psql-consumer.jar \ 284 | -cf /config/obmp-psql.yml > /var/log/psql-console.log & 285 | 286 | cd /tmp 287 | } 288 | 289 | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 290 | # Run 291 | # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 292 | SYS_NUM_CPU=$(grep processor /proc/cpuinfo | wc -l) 293 | 294 | # Clear locks 295 | if [[ ! -d /tmp/locks ]]; then 296 | mkdir /tmp/locks 297 | else 298 | rm -rf /tmp/locks/* 299 | fi 300 | 301 | update_hosts 302 | 303 | check_kafka 304 | 305 | config_postgres_profile 306 | 307 | source /usr/local/openbmp/pg_profile 308 | 309 | config_cron 310 | 311 | rm -f /etc/cron.d/openbmp-rpki 312 | if [[ ${ENABLE_RPKI:-""} != "" && $ENABLE_RPKI == 1 ]]; then 313 | enable_rpki 314 | fi 315 | 316 | rm -f /etc/cron.d/openbmp-irr 317 | if [[ ${ENABLE_IRR:-""} != "" && $ENABLE_IRR == 1 ]]; then 318 | enable_irr 319 | fi 320 | 321 | rm -f /etc/cron.d/openbmp-dbip 322 | if [[ ${ENABLE_DBIP:-""} != "" && $ENABLE_DBIP == 1 ]]; then 323 | enable_dbip 324 | fi 325 | 326 | 327 | initdb_postgres 328 | 329 | # Get rid of previous rsyslogd pid 330 | rm -f /var/run/rsyslogd.pid 331 | 332 | service cron start 333 | service rsyslog start 334 | 335 | upgrade 336 | 337 | run_consumer 338 | 339 | echo "===> Now running!!!" 340 | 341 | while [ 1 ]; do 342 | sleep 300 343 | pgrep -f obmp-psql-consumer.jar >/dev/null 2>&1 344 | if [[ $? != 0 ]]; then 345 | echo "PSQL consumer is not running, restarting in 30 seconds" 346 | cat /var/log/psql-console.log 347 | sleep 30 348 | run_consumer 349 | fi 350 | done 351 | 352 | --------------------------------------------------------------------------------